De-abseil, part 2: mutex, locks, (most) time

- util::Mutex is now a std::shared_timed_mutex, which is capable of
  exclusive and shared locks.

- util::Lock is still present as a std::lock_guard<util::Mutex>.

- the locking annotations are preserved, but updated to the latest
  supported by clang rather than using abseil's older/deprecated ones.

- ACQUIRE_LOCK macro is gone since we don't pass mutexes by pointer into
  locks anymore (WTF abseil).

- ReleasableLock is gone.  Instead there are now some llarp::util helper
  methods to obtain unique and/or shared locks:
    - `auto lock = util::unique_lock(mutex);` gets an RAII-but-also
      unlockable object (std::unique_lock<T>, with T inferred from
      `mutex`).
    - `auto lock = util::shared_lock(mutex);` gets an RAII shared (i.e.
      "reader") lock of the mutex.
    - `auto lock = util::unique_locks(mutex1, mutex2, mutex3);` can be
      used to atomically lock multiple mutexes at once (returning a
      tuple of the locks).
  This are templated on the mutex which makes them a bit more flexible
  than using a concrete type: they can be used for any type of lockable
  mutex, not only util::Mutex.  (Some of the code here uses them for
  getting locks around a std::mutex).  Until C++17, using the RAII types
  is painfully verbose:

  ```C++
  // pre-C++17 - needing to figure out the mutex type here is annoying:
  std::unique_lock<util::Mutex> lock(mutex);
  // pre-C++17 and even more verbose (but at least the type isn't needed):
  std::unique_lock<decltype(mutex)> lock(mutex);
  // our compromise:
  auto lock = util::unique_lock(mutex);
  // C++17:
  std::unique_lock lock(mutex);
  ```

  All of these functions will also warn (under gcc or clang) if you
  discard the return value.  You can also do fancy things like
  `auto l = util::unique_lock(mutex, std::adopt_lock)` (which lets a
  lock take over an already-locked mutex).

- metrics code is gone, which also removes a big pile of code that was
  only used by metrics:
  - llarp::util::Scheduler
  - llarp:🧵:TimerQueue
  - llarp::util::Stopwatch
pull/1122/head
Jason Rhinelander 4 years ago
parent 9c0f230dbf
commit b4440094b0

@ -3,7 +3,6 @@
#include <util/logging/logger.hpp>
#include <util/logging/ostream_logger.hpp>
#include <absl/synchronization/mutex.h>
#include <cxxopts.hpp>
#include <string>
#include <vector>
@ -110,10 +109,6 @@ namespace
int
main(int argc, char* argv[])
{
#ifdef LOKINET_DEBUG
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
#endif
// clang-format off
cxxopts::Options options(
"lokinetctl",

@ -102,10 +102,6 @@ main(int argc, char *argv[])
// SetUnhandledExceptionFilter(win32_signal_handler);
#endif
#ifdef LOKINET_DEBUG
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
#endif
// clang-format off
cxxopts::Options options(
"lokinet",

@ -23,38 +23,19 @@ namespace llarp
struct Config;
struct Crypto;
struct CryptoManager;
struct MetricsConfig;
struct RouterContact;
namespace thread
{
class ThreadPool;
}
namespace metrics
{
class DefaultManagerGuard;
class PublisherScheduler;
} // namespace metrics
namespace thread
{
class Scheduler;
}
struct Context
{
/// get context from main pointer
static Context *
Get(llarp_main *);
Context();
~Context();
// These come first, in this order.
// This ensures we get metric collection on shutdown
std::unique_ptr< thread::Scheduler > m_scheduler;
std::unique_ptr< metrics::DefaultManagerGuard > m_metricsManager;
std::unique_ptr< metrics::PublisherScheduler > m_metricsPublisher;
Context() = default;
std::unique_ptr< Crypto > crypto;
std::unique_ptr< CryptoManager > cryptoManager;
@ -123,9 +104,6 @@ namespace llarp
bool
ReloadConfig();
void
setupMetrics(const MetricsConfig &metricsConfig);
std::string configfile;
std::string pidfile;
std::unique_ptr< std::promise< void > > closeWaiter;

@ -36,25 +36,16 @@ set(LIB_UTIL_SRC
util/meta/object.cpp
util/meta/traits.cpp
util/meta/variant.cpp
util/metrics/core.cpp
util/metrics/json_publisher.cpp
util/metrics/metrics.cpp
util/metrics/metrictank_publisher.cpp
util/metrics/stream_publisher.cpp
util/metrics/types.cpp
util/printer.cpp
util/status.cpp
util/stopwatch.cpp
util/str.cpp
util/string_view.cpp
util/thread/logic.cpp
util/thread/queue_manager.cpp
util/thread/queue.cpp
util/thread/scheduler.cpp
util/thread/thread_pool.cpp
util/thread/threading.cpp
util/thread/threadpool.cpp
util/thread/timerqueue.cpp
util/time.cpp
util/types.cpp
)
@ -70,7 +61,7 @@ endif()
target_link_libraries(${UTIL_LIB} PUBLIC ${CRYPTOGRAPHY_LIB} ${LOG_LIB} ${CURL_LIBRARIES})
target_link_libraries(${UTIL_LIB} PUBLIC
absl::synchronization absl::flat_hash_map absl::container
absl::time absl::hash
nlohmann_json::nlohmann_json
ghc_filesystem
optional-lite
@ -78,7 +69,7 @@ target_link_libraries(${UTIL_LIB} PUBLIC
# cut back on fluff
if (NOT WIN32)
target_link_libraries(${UTIL_LIB} PUBLIC absl::variant absl::strings)
target_link_libraries(${UTIL_LIB} PUBLIC absl::strings)
endif(NOT WIN32)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")

@ -9,7 +9,6 @@
#include <util/logging/logger_syslog.hpp>
#include <util/logging/logger.hpp>
#include <util/mem.hpp>
#include <util/meta/memfn.hpp>
#include <util/str.hpp>
#include <util/lokinet_init.h>

@ -11,12 +11,6 @@
#include <router/router.hpp>
#include <service/context.hpp>
#include <util/logging/logger.h>
#include <util/meta/memfn.hpp>
#include <util/metrics/json_publisher.hpp>
#include <util/metrics/metrics.hpp>
#include <util/metrics/metrictank_publisher.hpp>
#include <util/metrics/stream_publisher.hpp>
#include <util/thread/scheduler.hpp>
#include <absl/strings/str_split.h>
#include <cxxopts.hpp>
@ -28,14 +22,6 @@
namespace llarp
{
Context::Context() = default;
Context::~Context()
{
if(m_scheduler)
m_scheduler->stop();
}
bool
Context::CallSafe(std::function< void(void) > f)
{
@ -73,90 +59,9 @@ namespace llarp
nodedb_dir = config->netdb.nodedbDir();
if(!config->metrics.disableMetrics)
{
auto &metricsConfig = config->metrics;
auto &tags = metricsConfig.metricTags;
tags["netid"] = config->router.netId();
tags["nickname"] = config->router.nickname();
setupMetrics(metricsConfig);
if(!config->metrics.disableMetricLogs)
{
m_metricsManager->instance()->addGlobalPublisher(
std::make_shared< metrics::StreamPublisher >(std::cerr));
}
}
return true;
}
void
Context::setupMetrics(const MetricsConfig &metricsConfig)
{
if(!m_scheduler)
{
m_scheduler = std::make_unique< thread::Scheduler >();
}
if(!m_metricsManager)
{
m_metricsManager = std::make_unique< metrics::DefaultManagerGuard >();
}
if(!m_metricsPublisher)
{
m_metricsPublisher = std::make_unique< metrics::PublisherScheduler >(
*m_scheduler, m_metricsManager->instance());
}
if(!metricsConfig.jsonMetricsPath.native().empty())
{
m_metricsManager->instance()->addGlobalPublisher(
std::make_shared< metrics::JsonPublisher >(
std::bind(&metrics::JsonPublisher::directoryPublisher,
std::placeholders::_1, metricsConfig.jsonMetricsPath)));
}
if(!metricsConfig.metricTankHost.empty())
{
if(std::getenv("LOKINET_ENABLE_METRIC_TANK"))
{
static std::string WARNING = R"(
__ ___ ____ _ _ ___ _ _ ____
\ \ / / \ | _ \| \ | |_ _| \ | |/ ___|
\ \ /\ / / _ \ | |_) | \| || || \| | | _
\ V V / ___ \| _ <| |\ || || |\ | |_| |
\_/\_/_/ \_\_| \_\_| \_|___|_| \_|\____|
This Lokinet session is not private!!
Sending connection metrics to metrictank!!
__ ___ ____ _ _ ___ _ _ ____
\ \ / / \ | _ \| \ | |_ _| \ | |/ ___|
\ \ /\ / / _ \ | |_) | \| || || \| | | _
\ V V / ___ \| _ <| |\ || || |\ | |_| |
\_/\_/_/ \_\_| \_\_| \_|___|_| \_|\____|
)";
std::cerr << WARNING << '\n';
std::pair< std::string, std::string > split =
absl::StrSplit(metricsConfig.metricTankHost, ':');
m_metricsManager->instance()->addGlobalPublisher(
std::make_shared< metrics::MetricTankPublisher >(
metricsConfig.metricTags, split.first, stoi(split.second)));
}
else
{
std::cerr << "metrictank host specified, but "
"LOKINET_ENABLE_METRIC_TANK not set, skipping\n";
}
}
m_metricsPublisher->setDefault(absl::Seconds(30));
m_scheduler->start();
}
void
Context::SetPIDFile(const std::string &fname)
{

@ -2,7 +2,6 @@
#include <iwp/linklayer.hpp>
#include <memory>
#include <router/abstractrouter.hpp>
#include <util/meta/memfn.hpp>
namespace llarp
{

@ -48,7 +48,7 @@ namespace llarp
bool isNewSession = false;
if(itr == m_AuthedAddrs.end())
{
ACQUIRE_LOCK(Lock_t lock, m_PendingMutex);
Lock_t lock(m_PendingMutex);
if(m_Pending.count(from) == 0)
{
if(not permitInbound)
@ -60,7 +60,7 @@ namespace llarp
}
else
{
ACQUIRE_LOCK(Lock_t lock, m_AuthedLinksMutex);
Lock_t lock(m_AuthedLinksMutex);
auto range = m_AuthedLinks.equal_range(itr->second);
session = range.first->second;
}

@ -76,7 +76,7 @@ namespace llarp
void
LinkManager::AddLink(LinkLayer_ptr link, bool inbound)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
if(inbound)
{
@ -128,7 +128,7 @@ namespace llarp
return;
}
util::Lock l(&_mutex);
util::Lock l(_mutex);
LogInfo("stopping links");
stopping = true;
@ -145,7 +145,7 @@ namespace llarp
if(stopping)
return;
util::Lock l(&_mutex);
util::Lock l(_mutex);
m_PersistingSessions[remote] =
std::max(until, m_PersistingSessions[remote]);
@ -297,7 +297,7 @@ namespace llarp
std::vector< RouterID > sessionsNeeded;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr = m_PersistingSessions.begin();
while(itr != m_PersistingSessions.end())

@ -38,7 +38,7 @@ namespace llarp
bool
ILinkLayer::HasSessionTo(const RouterID& id)
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
return m_AuthedLinks.find(id) != m_AuthedLinks.end();
}
@ -48,7 +48,7 @@ namespace llarp
{
std::vector< std::shared_ptr< ILinkSession > > sessions;
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
if(m_AuthedLinks.size() == 0)
return;
const size_t sz = randint() % m_AuthedLinks.size();
@ -84,7 +84,7 @@ namespace llarp
{
std::shared_ptr< ILinkSession > session;
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto itr = m_AuthedLinks.find(pk);
if(itr == m_AuthedLinks.end())
return false;
@ -98,7 +98,7 @@ namespace llarp
{
std::vector< std::shared_ptr< ILinkSession > > sessions;
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto itr = m_AuthedLinks.begin();
while(itr != m_AuthedLinks.end())
{
@ -136,7 +136,7 @@ namespace llarp
std::vector< std::shared_ptr< ILinkSession > > closedPending;
auto _now = Now();
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto itr = m_AuthedLinks.begin();
while(itr != m_AuthedLinks.end())
{
@ -156,7 +156,7 @@ namespace llarp
}
}
{
ACQUIRE_LOCK(Lock_t l, m_PendingMutex);
Lock_t l(m_PendingMutex);
auto itr = m_Pending.begin();
while(itr != m_Pending.end())
@ -176,7 +176,7 @@ namespace llarp
}
}
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
for(const auto& r : closedSessions)
{
if(m_AuthedLinks.count(r) == 0)
@ -196,8 +196,8 @@ namespace llarp
bool
ILinkLayer::MapAddr(const RouterID& pk, ILinkSession* s)
{
ACQUIRE_LOCK(Lock_t l_authed, m_AuthedLinksMutex);
ACQUIRE_LOCK(Lock_t l_pending, m_PendingMutex);
Lock_t l_authed(m_AuthedLinksMutex);
Lock_t l_pending(m_PendingMutex);
llarp::Addr addr = s->GetRemoteEndpoint();
auto itr = m_Pending.find(addr);
if(itr != m_Pending.end())
@ -237,7 +237,7 @@ namespace llarp
std::vector< util::StatusObject > pending, established;
{
ACQUIRE_LOCK(Lock_t l, m_PendingMutex);
Lock_t l(m_PendingMutex);
std::transform(m_Pending.cbegin(), m_Pending.cend(),
std::back_inserter(pending),
[](const auto& item) -> util::StatusObject {
@ -245,7 +245,7 @@ namespace llarp
});
}
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
std::transform(m_AuthedLinks.cbegin(), m_AuthedLinks.cend(),
std::back_inserter(established),
[](const auto& item) -> util::StatusObject {
@ -265,7 +265,7 @@ namespace llarp
ILinkLayer::TryEstablishTo(RouterContact rc)
{
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
if(m_AuthedLinks.count(rc.pubkey) >= MaxSessionsPerKey)
return false;
}
@ -274,7 +274,7 @@ namespace llarp
return false;
const llarp::Addr addr(to);
{
ACQUIRE_LOCK(Lock_t l, m_PendingMutex);
Lock_t l(m_PendingMutex);
if(m_Pending.count(addr) >= MaxSessionsPerKey)
return false;
}
@ -301,7 +301,7 @@ namespace llarp
ILinkLayer::Tick(llarp_time_t now)
{
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto itr = m_AuthedLinks.begin();
while(itr != m_AuthedLinks.end())
{
@ -311,7 +311,7 @@ namespace llarp
}
{
ACQUIRE_LOCK(Lock_t l, m_PendingMutex);
Lock_t l(m_PendingMutex);
auto itr = m_Pending.begin();
while(itr != m_Pending.end())
{
@ -338,7 +338,7 @@ namespace llarp
if(m_Logic && tick_id)
m_Logic->remove_call(tick_id);
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto itr = m_AuthedLinks.begin();
while(itr != m_AuthedLinks.end())
{
@ -347,7 +347,7 @@ namespace llarp
}
}
{
ACQUIRE_LOCK(Lock_t l, m_PendingMutex);
Lock_t l(m_PendingMutex);
auto itr = m_Pending.begin();
while(itr != m_Pending.end())
{
@ -362,7 +362,7 @@ namespace llarp
{
static constexpr llarp_time_t CloseGraceWindow = 500;
const auto now = Now();
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
RouterID r = remote;
llarp::LogInfo("Closing all to ", r);
auto range = m_AuthedLinks.equal_range(r);
@ -379,7 +379,7 @@ namespace llarp
void
ILinkLayer::KeepAliveSessionTo(const RouterID& remote)
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto range = m_AuthedLinks.equal_range(remote);
auto itr = range.first;
while(itr != range.second)
@ -396,7 +396,7 @@ namespace llarp
{
std::shared_ptr< ILinkSession > s;
{
ACQUIRE_LOCK(Lock_t l, m_AuthedLinksMutex);
Lock_t l(m_AuthedLinksMutex);
auto range = m_AuthedLinks.equal_range(remote);
auto itr = range.first;
// pick lowest backlog session
@ -445,7 +445,7 @@ namespace llarp
ILinkLayer::PutSession(const std::shared_ptr< ILinkSession >& s)
{
static constexpr size_t MaxSessionsPerEndpoint = 5;
ACQUIRE_LOCK(Lock_t lock, m_PendingMutex);
Lock_t lock(m_PendingMutex);
llarp::Addr addr = s->GetRemoteEndpoint();
if(m_Pending.count(addr) >= MaxSessionsPerEndpoint)
return false;

@ -74,12 +74,11 @@ namespace llarp
void
ForEachSession(std::function< void(const ILinkSession*) > visit,
bool randomize = false) const
LOCKS_EXCLUDED(m_AuthedLinksMutex);
bool randomize = false) const EXCLUDES(m_AuthedLinksMutex);
void
ForEachSession(std::function< void(ILinkSession*) > visit)
LOCKS_EXCLUDED(m_AuthedLinksMutex);
EXCLUDES(m_AuthedLinksMutex);
static void
udp_tick(llarp_udp_io* udp);
@ -120,7 +119,7 @@ namespace llarp
Name() const = 0;
util::StatusObject
ExtractStatus() const LOCKS_EXCLUDED(m_AuthedLinksMutex);
ExtractStatus() const EXCLUDES(m_AuthedLinksMutex);
void
CloseSessionTo(const RouterID& remote);
@ -138,7 +137,7 @@ namespace llarp
bool
VisitSessionByPubkey(const RouterID& pk,
std::function< bool(ILinkSession*) > visit)
LOCKS_EXCLUDED(m_AuthedLinksMutex);
EXCLUDES(m_AuthedLinksMutex);
virtual uint16_t
Rank() const = 0;
@ -196,13 +195,13 @@ namespace llarp
/// called by link session to remove a pending session who is timed out
// void
// RemovePending(ILinkSession* s) LOCKS_EXCLUDED(m_PendingMutex);
// RemovePending(ILinkSession* s) EXCLUDES(m_PendingMutex);
/// count the number of sessions that are yet to be fully connected
size_t
NumberOfPendingSessions() const
{
ACQUIRE_LOCK(Lock_t lock, m_PendingMutex);
Lock_t lock(m_PendingMutex);
return m_Pending.size();
}

@ -10,7 +10,6 @@
#include <router_contact.hpp>
#include <util/buffer.hpp>
#include <util/logging/logger.hpp>
#include <util/metrics/metrics.hpp>
#include <memory>
@ -69,12 +68,10 @@ namespace llarp
}
// create the message to parse based off message type
llarp::LogDebug("inbound message ", *strbuf.cur);
bool isLIM = false;
switch(*strbuf.cur)
{
case 'i':
msg = &holder->i;
isLIM = true;
msg = &holder->i;
break;
case 'd':
msg = &holder->d;
@ -98,12 +95,6 @@ namespace llarp
return false;
}
if(!isLIM)
{
metrics::integerTick(msg->Name(), "RX", 1, "id",
RouterID(from->GetPubKey()).ToString());
}
msg->session = from;
firstkey = false;
return true;

@ -43,14 +43,14 @@ llarp_nodedb::Remove(const llarp::RouterID &pk)
void
llarp_nodedb::Clear()
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
entries.clear();
}
bool
llarp_nodedb::Get(const llarp::RouterID &pk, llarp::RouterContact &result)
{
llarp::util::Lock l(&access);
llarp::util::Lock l(access);
auto itr = entries.find(pk);
if(itr == entries.end())
return false;
@ -71,7 +71,7 @@ llarp_nodedb::RemoveIf(
{
std::set< std::string > files;
{
llarp::util::Lock l(&access);
llarp::util::Lock l(access);
auto itr = entries.begin();
while(itr != entries.end())
{
@ -91,7 +91,7 @@ llarp_nodedb::RemoveIf(
bool
llarp_nodedb::Has(const llarp::RouterID &pk)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
return entries.find(pk) != entries.end();
}
@ -118,7 +118,7 @@ std::vector< llarp::RouterContact >
llarp_nodedb::FindClosestTo(const llarp::dht::Key_t &location,
uint32_t numRouters)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
std::vector< const llarp::RouterContact * > all;
all.reserve(entries.size());
@ -180,7 +180,7 @@ llarp_nodedb::UpdateAsyncIfNewer(llarp::RouterContact rc,
std::shared_ptr< llarp::Logic > logic,
std::function< void(void) > completionHandler)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
auto itr = entries.find(rc.pubkey);
if(itr == entries.end() || itr->second.rc.OtherIsNewer(rc))
{
@ -201,7 +201,7 @@ llarp_nodedb::UpdateAsyncIfNewer(llarp::RouterContact rc,
bool
llarp_nodedb::Insert(const llarp::RouterContact &rc)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
auto itr = entries.find(rc.pubkey.as_array());
if(itr != entries.end())
entries.erase(itr);
@ -241,7 +241,7 @@ void
llarp_nodedb::SaveAll()
{
std::array< byte_t, MAX_RC_SIZE > tmp;
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
for(const auto &item : entries)
{
llarp_buffer_t buf(tmp);
@ -307,7 +307,7 @@ llarp_nodedb::loadfile(const fs::path &fpath)
return false;
}
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
entries.emplace(rc.pubkey.as_array(), rc);
}
return true;
@ -316,7 +316,7 @@ llarp_nodedb::loadfile(const fs::path &fpath)
void
llarp_nodedb::visit(std::function< bool(const llarp::RouterContact &) > visit)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
auto itr = entries.begin();
while(itr != entries.end())
{
@ -331,7 +331,7 @@ llarp_nodedb::VisitInsertedBefore(
std::function< void(const llarp::RouterContact &) > visit,
llarp_time_t insertedAfter)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
auto itr = entries.begin();
while(itr != entries.end())
{
@ -487,14 +487,14 @@ llarp_nodedb::LoadAll()
size_t
llarp_nodedb::num_loaded() const
{
absl::ReaderMutexLock l(&access);
auto l = llarp::util::shared_lock(access);
return entries.size();
}
bool
llarp_nodedb::select_random_exit(llarp::RouterContact &result)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
const auto sz = entries.size();
auto itr = entries.begin();
if(sz < 3)
@ -529,7 +529,7 @@ bool
llarp_nodedb::select_random_hop(const llarp::RouterContact &prev,
llarp::RouterContact &result, size_t N)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
/// checking for "guard" status for N = 0 is done by caller inside of
/// pathbuilder's scope
size_t sz = entries.size();
@ -575,7 +575,7 @@ bool
llarp_nodedb::select_random_hop_excluding(
llarp::RouterContact &result, const std::set< llarp::RouterID > &exclude)
{
llarp::util::Lock lock(&access);
llarp::util::Lock lock(access);
/// checking for "guard" status for N = 0 is done by caller inside of
/// pathbuilder's scope
const size_t sz = entries.size();

@ -6,10 +6,9 @@
#include <util/common.hpp>
#include <util/fs.hpp>
#include <util/thread/threading.hpp>
#include <util/thread/annotations.hpp>
#include <dht/key.hpp>
#include <absl/base/thread_annotations.h>
#include <set>
#include <utility>
@ -86,28 +85,27 @@ struct llarp_nodedb
ShouldSaveToDisk(llarp_time_t now = 0) const;
bool
Remove(const llarp::RouterID &pk) LOCKS_EXCLUDED(access);
Remove(const llarp::RouterID &pk) EXCLUDES(access);
void
RemoveIf(std::function< bool(const llarp::RouterContact &) > filter)
LOCKS_EXCLUDED(access);
EXCLUDES(access);
void
Clear() LOCKS_EXCLUDED(access);
Clear() EXCLUDES(access);
bool
Get(const llarp::RouterID &pk, llarp::RouterContact &result)
LOCKS_EXCLUDED(access);
Get(const llarp::RouterID &pk, llarp::RouterContact &result) EXCLUDES(access);
bool
Has(const llarp::RouterID &pk) LOCKS_EXCLUDED(access);
Has(const llarp::RouterID &pk) EXCLUDES(access);
std::string
getRCFilePath(const llarp::RouterID &pubkey) const;
/// insert without writing to disk
bool
Insert(const llarp::RouterContact &rc) LOCKS_EXCLUDED(access);
Insert(const llarp::RouterContact &rc) EXCLUDES(access);
/// invokes Insert() asynchronously with an optional completion
/// callback
@ -123,7 +121,7 @@ struct llarp_nodedb
UpdateAsyncIfNewer(llarp::RouterContact rc,
std::shared_ptr< llarp::Logic > l = nullptr,
std::function< void(void) > completionHandler = nullptr)
LOCKS_EXCLUDED(access);
EXCLUDES(access);
ssize_t
Load(const fs::path &path);
@ -135,11 +133,11 @@ struct llarp_nodedb
AsyncFlushToDisk();
bool
loadfile(const fs::path &fpath) LOCKS_EXCLUDED(access);
loadfile(const fs::path &fpath) EXCLUDES(access);
void
visit(std::function< bool(const llarp::RouterContact &) > visit)
LOCKS_EXCLUDED(access);
EXCLUDES(access);
void
set_dir(const char *dir);
@ -153,32 +151,31 @@ struct llarp_nodedb
/// visit all entries inserted into nodedb cache before a timestamp
void
VisitInsertedBefore(std::function< void(const llarp::RouterContact &) > visit,
llarp_time_t insertedAfter) LOCKS_EXCLUDED(access);
llarp_time_t insertedAfter) EXCLUDES(access);
void
RemoveStaleRCs(const std::set< llarp::RouterID > &keep, llarp_time_t cutoff);
size_t
num_loaded() const LOCKS_EXCLUDED(access);
num_loaded() const EXCLUDES(access);
bool
select_random_exit(llarp::RouterContact &rc) LOCKS_EXCLUDED(access);
select_random_exit(llarp::RouterContact &rc) EXCLUDES(access);
bool
select_random_hop(const llarp::RouterContact &prev,
llarp::RouterContact &result, size_t N)
LOCKS_EXCLUDED(access);
llarp::RouterContact &result, size_t N) EXCLUDES(access);
bool
select_random_hop_excluding(llarp::RouterContact &result,
const std::set< llarp::RouterID > &exclude)
LOCKS_EXCLUDED(access);
EXCLUDES(access);
static bool
ensure_dir(const char *dir);
void
SaveAll() LOCKS_EXCLUDED(access);
SaveAll() EXCLUDES(access);
};
/// struct for async rc verification

@ -105,7 +105,7 @@ namespace llarp
HopHandler_ptr
MapGet(Map_t& map, const Key_t& k, CheckValue_t check, GetFunc_t get)
{
Lock_t lock(&map.first);
Lock_t lock(map.first);
auto range = map.second.equal_range(k);
for(auto i = range.first; i != range.second; ++i)
{
@ -120,7 +120,7 @@ namespace llarp
bool
MapHas(Map_t& map, const Key_t& k, CheckValue_t check)
{
Lock_t lock(&map.first);
Lock_t lock(map.first);
auto range = map.second.equal_range(k);
for(auto i = range.first; i != range.second; ++i)
{
@ -135,7 +135,7 @@ namespace llarp
void
MapPut(Map_t& map, const Key_t& k, const Value_t& v)
{
Lock_t lock(&map.first);
Lock_t lock(map.first);
map.second.emplace(k, v);
}
@ -168,8 +168,8 @@ namespace llarp
PathContext::AddOwnPath(PathSet_ptr set, Path_ptr path)
{
set->AddPath(path);
MapPut< SyncOwnedPathsMap_t::Lock_t >(m_OurPaths, path->TXID(), path);
MapPut< SyncOwnedPathsMap_t::Lock_t >(m_OurPaths, path->RXID(), path);
MapPut< util::Lock >(m_OurPaths, path->TXID(), path);
MapPut< util::Lock >(m_OurPaths, path->RXID(), path);
}
bool
@ -185,7 +185,7 @@ namespace llarp
HopHandler_ptr
PathContext::GetByUpstream(const RouterID& remote, const PathID_t& id)
{
auto own = MapGet< SyncOwnedPathsMap_t::Lock_t >(
auto own = MapGet< util::Lock >(
m_OurPaths, id,
[](const Path_ptr) -> bool {
// TODO: is this right?
@ -209,7 +209,7 @@ namespace llarp
PathContext::TransitHopPreviousIsRouter(const PathID_t& path,
const RouterID& otherRouter)
{
SyncTransitMap_t::Lock_t lock(&m_TransitPaths.first);
SyncTransitMap_t::Lock_t lock(m_TransitPaths.first);
auto itr = m_TransitPaths.second.find(path);
if(itr == m_TransitPaths.second.end())
return false;
@ -233,7 +233,7 @@ namespace llarp
PathContext::GetLocalPathSet(const PathID_t& id)
{
auto& map = m_OurPaths;
SyncOwnedPathsMap_t::Lock_t lock(&map.first);
util::Lock lock(map.first);
auto itr = map.second.find(id);
if(itr != map.second.end())
{
@ -260,7 +260,7 @@ namespace llarp
const RouterID us(OurRouterID());
auto& map = m_TransitPaths;
{
SyncTransitMap_t::Lock_t lock(&map.first);
SyncTransitMap_t::Lock_t lock(map.first);
auto range = map.second.equal_range(id);
for(auto i = range.first; i != range.second; ++i)
{
@ -300,7 +300,7 @@ namespace llarp
m_PathLimits.Decay(now);
{
SyncTransitMap_t::Lock_t lock(&m_TransitPaths.first);
SyncTransitMap_t::Lock_t lock(m_TransitPaths.first);
auto& map = m_TransitPaths.second;
auto itr = map.begin();
while(itr != map.end())
@ -315,7 +315,7 @@ namespace llarp
}
}
{
SyncOwnedPathsMap_t::Lock_t lock(&m_OurPaths.first);
util::Lock lock(m_OurPaths.first);
auto& map = m_OurPaths.second;
auto itr = map.begin();
while(itr != map.end())
@ -344,7 +344,7 @@ namespace llarp
const RouterID us(OurRouterID());
auto& map = m_TransitPaths;
{
SyncTransitMap_t::Lock_t lock(&map.first);
SyncTransitMap_t::Lock_t lock(map.first);
auto range = map.second.equal_range(id);
for(auto i = range.first; i != range.second; ++i)
{

@ -122,9 +122,9 @@ namespace llarp
void
ForEach(std::function< void(const TransitHop_ptr&) > visit)
LOCKS_EXCLUDED(first)
EXCLUDES(first)
{
Lock_t lock(&first);
Lock_t lock(first);
for(const auto& item : second)
visit(item.second);
}
@ -136,15 +136,13 @@ namespace llarp
struct SyncOwnedPathsMap_t
{
using Mutex_t = util::Mutex;
using Lock_t = util::Lock;
Mutex_t first; // protects second
util::Mutex first; // protects second
OwnedPathsMap_t second GUARDED_BY(first);
void
ForEach(std::function< void(const Path_ptr&) > visit)
{
Lock_t lock(&first);
util::Lock lock(first);
for(const auto& item : second)
visit(item.second);
}

@ -7,7 +7,6 @@
#include <profiling.hpp>
#include <router/abstractrouter.hpp>
#include <util/buffer.hpp>
#include <util/meta/memfn.hpp>
#include <util/thread/logic.hpp>
#include <functional>

@ -27,7 +27,7 @@ namespace llarp
bool
PathSet::ShouldBuildMoreForRoles(llarp_time_t now, PathRole roles) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
const size_t required = MinRequiredForRoles(roles);
size_t has = 0;
for(const auto& item : m_Paths)
@ -52,7 +52,7 @@ namespace llarp
PathSet::NumPathsExistingAt(llarp_time_t futureTime) const
{
size_t num = 0;
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
for(const auto& item : m_Paths)
{
if(item.second->IsReady() && !item.second->Expired(futureTime))
@ -65,7 +65,7 @@ namespace llarp
PathSet::TickPaths(AbstractRouter* r)
{
const auto now = llarp::time_now_ms();
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
for(auto& item : m_Paths)
{
item.second->Tick(now, r);
@ -75,7 +75,7 @@ namespace llarp
void
PathSet::ExpirePaths(llarp_time_t now, AbstractRouter* router)
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
if(m_Paths.size() == 0)
return;
auto itr = m_Paths.begin();
@ -95,7 +95,7 @@ namespace llarp
Path_ptr
PathSet::GetEstablishedPathClosestTo(RouterID id, PathRole roles) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
Path_ptr path = nullptr;
AlignedBuffer< 32 > dist;
AlignedBuffer< 32 > to = id;
@ -119,7 +119,7 @@ namespace llarp
Path_ptr
PathSet::GetNewestPathByRouter(RouterID id, PathRole roles) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
Path_ptr chosen = nullptr;
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
@ -142,7 +142,7 @@ namespace llarp
Path_ptr
PathSet::GetPathByRouter(RouterID id, PathRole roles) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
Path_ptr chosen = nullptr;
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
@ -165,7 +165,7 @@ namespace llarp
Path_ptr
PathSet::GetByEndpointWithID(RouterID ep, PathID_t id) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{
@ -181,7 +181,7 @@ namespace llarp
Path_ptr
PathSet::GetPathByID(PathID_t id) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{
@ -195,7 +195,7 @@ namespace llarp
size_t
PathSet::AvailablePaths(PathRole roles) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
size_t count = 0;
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
@ -211,7 +211,7 @@ namespace llarp
size_t
PathSet::NumInStatus(PathStatus st) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
size_t count = 0;
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
@ -226,7 +226,7 @@ namespace llarp
void
PathSet::AddPath(Path_ptr path)
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
const auto upstream = path->Upstream(); // RouterID
const auto RXID = path->RXID(); // PathID
if(not m_Paths.emplace(std::make_pair(upstream, RXID), path).second)
@ -240,14 +240,14 @@ namespace llarp
void
PathSet::RemovePath(Path_ptr path)
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
m_Paths.erase({path->Upstream(), path->RXID()});
}
Path_ptr
PathSet::GetByUpstream(RouterID remote, PathID_t rxid) const
{
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.find({remote, rxid});
if(itr == m_Paths.end())
return nullptr;
@ -261,7 +261,7 @@ namespace llarp
{
intros.clear();
size_t count = 0;
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{
@ -281,7 +281,7 @@ namespace llarp
{
intros.clear();
size_t count = 0;
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{
@ -350,7 +350,7 @@ namespace llarp
{
intro.Clear();
bool found = false;
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{
@ -369,7 +369,7 @@ namespace llarp
PathSet::PickRandomEstablishedPath(PathRole roles) const
{
std::vector< Path_ptr > established;
Lock_t l(&m_PathsMutex);
Lock_t l(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{

@ -266,7 +266,7 @@ namespace llarp
void
ForEachPath(std::function< void(const Path_ptr&) > visit) const
{
Lock_t lock(&m_PathsMutex);
Lock_t lock(m_PathsMutex);
auto itr = m_Paths.begin();
while(itr != m_Paths.end())
{

@ -118,7 +118,7 @@ namespace llarp
{
if(m_DisableProfiling.load())
return false;
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
auto itr = m_Profiles.find(r);
if(itr == m_Profiles.end())
return false;
@ -130,7 +130,7 @@ namespace llarp
{
if(m_DisableProfiling.load())
return false;
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
auto itr = m_Profiles.find(r);
if(itr == m_Profiles.end())
return false;
@ -142,7 +142,7 @@ namespace llarp
{
if(m_DisableProfiling.load())
return false;
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
auto itr = m_Profiles.find(r);
if(itr == m_Profiles.end())
return false;
@ -152,7 +152,7 @@ namespace llarp
void
Profiling::Tick()
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
std::for_each(m_Profiles.begin(), m_Profiles.end(),
[](auto& item) { item.second.Tick(); });
}
@ -160,7 +160,7 @@ namespace llarp
void
Profiling::MarkConnectTimeout(const RouterID& r)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
m_Profiles[r].connectTimeoutCount += 1;
m_Profiles[r].lastUpdated = llarp::time_now_ms();
}
@ -168,7 +168,7 @@ namespace llarp
void
Profiling::MarkConnectSuccess(const RouterID& r)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
m_Profiles[r].connectGoodCount += 1;
m_Profiles[r].lastUpdated = llarp::time_now_ms();
}
@ -176,14 +176,14 @@ namespace llarp
void
Profiling::ClearProfile(const RouterID& r)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
m_Profiles.erase(r);
}
void
Profiling::MarkHopFail(const RouterID& r)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
m_Profiles[r].pathFailCount += 1;
m_Profiles[r].lastUpdated = llarp::time_now_ms();
}
@ -191,7 +191,7 @@ namespace llarp
void
Profiling::MarkPathFail(path::Path* p)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
size_t idx = 0;
for(const auto& hop : p->hops)
{
@ -208,7 +208,7 @@ namespace llarp
void
Profiling::MarkPathSuccess(path::Path* p)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
const auto sz = p->hops.size();
for(const auto& hop : p->hops)
{
@ -220,7 +220,7 @@ namespace llarp
bool
Profiling::Save(const char* fname)
{
absl::ReaderMutexLock lock(&m_ProfilesMutex);
auto lock = util::shared_lock(m_ProfilesMutex);
size_t sz = (m_Profiles.size() * (RouterProfile::MaxSize + 32 + 8)) + 8;
std::vector< byte_t > tmp(sz, 0);
@ -247,7 +247,7 @@ namespace llarp
bool
Profiling::BEncode(llarp_buffer_t* buf) const
{
absl::ReaderMutexLock lock(&m_ProfilesMutex);
auto lock = util::shared_lock(m_ProfilesMutex);
return BEncodeNoLock(buf);
}
@ -284,7 +284,7 @@ namespace llarp
bool
Profiling::Load(const char* fname)
{
lock_t lock(&m_ProfilesMutex);
util::Lock lock(m_ProfilesMutex);
m_Profiles.clear();
if(!BDecodeReadFromFile(fname, *this))
{

@ -52,8 +52,7 @@ namespace llarp
/// generic variant
bool
IsBad(const RouterID& r, uint64_t chances = 8)
LOCKS_EXCLUDED(m_ProfilesMutex);
IsBad(const RouterID& r, uint64_t chances = 8) EXCLUDES(m_ProfilesMutex);
/// check if this router should have paths built over it
bool
@ -63,31 +62,31 @@ namespace llarp
/// check if this router should be connected directly to
bool
IsBadForConnect(const RouterID& r, uint64_t chances = 8)
LOCKS_EXCLUDED(m_ProfilesMutex);
EXCLUDES(m_ProfilesMutex);
void
MarkConnectTimeout(const RouterID& r) LOCKS_EXCLUDED(m_ProfilesMutex);
MarkConnectTimeout(const RouterID& r) EXCLUDES(m_ProfilesMutex);
void
MarkConnectSuccess(const RouterID& r) LOCKS_EXCLUDED(m_ProfilesMutex);
MarkConnectSuccess(const RouterID& r) EXCLUDES(m_ProfilesMutex);
void
MarkPathFail(path::Path* p) LOCKS_EXCLUDED(m_ProfilesMutex);
MarkPathFail(path::Path* p) EXCLUDES(m_ProfilesMutex);
void
MarkPathSuccess(path::Path* p) LOCKS_EXCLUDED(m_ProfilesMutex);
MarkPathSuccess(path::Path* p) EXCLUDES(m_ProfilesMutex);
void
MarkHopFail(const RouterID& r) LOCKS_EXCLUDED(m_ProfilesMutex);
MarkHopFail(const RouterID& r) EXCLUDES(m_ProfilesMutex);
void
ClearProfile(const RouterID& r) LOCKS_EXCLUDED(m_ProfilesMutex);
ClearProfile(const RouterID& r) EXCLUDES(m_ProfilesMutex);
void
Tick() LOCKS_EXCLUDED(m_ProfilesMutex);
Tick() EXCLUDES(m_ProfilesMutex);
bool
BEncode(llarp_buffer_t* buf) const LOCKS_EXCLUDED(m_ProfilesMutex);
BEncode(llarp_buffer_t* buf) const EXCLUDES(m_ProfilesMutex);
bool
DecodeKey(const llarp_buffer_t& k,
@ -95,10 +94,10 @@ namespace llarp
// disabled because we do load -> bencode::BDecodeReadFromFile -> DecodeKey
bool
Load(const char* fname) LOCKS_EXCLUDED(m_ProfilesMutex);
Load(const char* fname) EXCLUDES(m_ProfilesMutex);
bool
Save(const char* fname) LOCKS_EXCLUDED(m_ProfilesMutex);
Save(const char* fname) EXCLUDES(m_ProfilesMutex);
bool
ShouldSave(llarp_time_t now) const;
@ -111,9 +110,7 @@ namespace llarp
private:
bool
BEncodeNoLock(llarp_buffer_t* buf) const
SHARED_LOCKS_REQUIRED(m_ProfilesMutex);
using lock_t = util::Lock;
BEncodeNoLock(llarp_buffer_t* buf) const REQUIRES_SHARED(m_ProfilesMutex);
mutable util::Mutex m_ProfilesMutex; // protects m_Profiles
std::map< RouterID, RouterProfile > m_Profiles GUARDED_BY(m_ProfilesMutex);
llarp_time_t m_LastSave = 0;

@ -47,7 +47,7 @@ namespace llarp
bool shouldCreateSession = false;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
// create queue for <remote> if it doesn't exist, and get iterator
auto itr_pair =
@ -404,7 +404,7 @@ namespace llarp
{
MessageQueue movedMessages;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr = pendingSessionMessageQueues.find(router);
if(itr == pendingSessionMessageQueues.end())

@ -30,7 +30,7 @@ namespace llarp
bool
QueueMessage(const RouterID &remote, const ILinkMessage *msg,
SendStatusHandler callback) override LOCKS_EXCLUDED(_mutex);
SendStatusHandler callback) override EXCLUDES(_mutex);
void
Tick() override;
@ -122,7 +122,7 @@ namespace llarp
void
FinalizeSessionRequest(const RouterID &router, SendStatus status)
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
llarp::thread::Queue< MessageQueueEntry > outboundQueue;
llarp::thread::Queue< PathID_t > removedPaths;

@ -68,7 +68,7 @@ namespace llarp
{
if(on_result)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr_pair = pendingCallbacks.emplace(router, CallbacksQueue{});
itr_pair.first->second.push_back(on_result);
@ -94,7 +94,7 @@ namespace llarp
{
if(on_result)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr_pair = pendingCallbacks.emplace(rc.pubkey, CallbacksQueue{});
itr_pair.first->second.push_back(on_result);
@ -112,7 +112,7 @@ namespace llarp
bool
OutboundSessionMaker::HavePendingSessionTo(const RouterID &router) const
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
return pendingSessions.find(router) != pendingSessions.end();
}
@ -169,7 +169,7 @@ namespace llarp
void
OutboundSessionMaker::DoEstablish(const RouterID &router)
{
util::ReleasableLock l(&_mutex);
auto l = util::unique_lock(_mutex);
auto itr = pendingSessions.find(router);
@ -183,7 +183,7 @@ namespace llarp
{
// TODO: maybe different failure type?
l.Release();
l.unlock();
FinalizeRequest(router, SessionResult::NoLink);
}
}
@ -193,7 +193,7 @@ namespace llarp
const RouterContact &rc)
{
{
util::ReleasableLock l(&_mutex);
auto l = util::unique_lock(_mutex);
// in case other request found RC for this router after this request was
// made
@ -207,7 +207,7 @@ namespace llarp
if(!link)
{
l.Release();
l.unlock();
FinalizeRequest(router, SessionResult::NoLink);
return;
}
@ -230,7 +230,7 @@ namespace llarp
return false;
size_t numPending = 0;
{
util::Lock lock(&_mutex);
util::Lock lock(_mutex);
if(pendingSessions.find(router) == pendingSessions.end())
numPending += pendingSessions.size();
}
@ -300,7 +300,7 @@ namespace llarp
void
OutboundSessionMaker::CreatePendingSession(const RouterID &router)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
pendingSessions.emplace(router, nullptr);
}
@ -310,7 +310,7 @@ namespace llarp
{
CallbacksQueue movedCallbacks;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
if(type == SessionResult::Establish)
{
@ -338,7 +338,7 @@ namespace llarp
}
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
pendingSessions.erase(router);
}
}

@ -38,15 +38,15 @@ namespace llarp
void
CreateSessionTo(const RouterID &router, RouterCallback on_result) override
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
void
CreateSessionTo(const RouterContact &rc, RouterCallback on_result) override
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
bool
HavePendingSessionTo(const RouterID &router) const override
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
void
ConnectToRandomRouters(int numDesired) override;
@ -55,8 +55,7 @@ namespace llarp
ExtractStatus() const override;
bool
ShouldConnectTo(const RouterID &router) const override
LOCKS_EXCLUDED(_mutex);
ShouldConnectTo(const RouterID &router) const override EXCLUDES(_mutex);
void
Init(ILinkManager *linkManager, I_RCLookupHandler *rcLookup,
@ -77,11 +76,11 @@ namespace llarp
private:
void
DoEstablish(const RouterID &router) LOCKS_EXCLUDED(_mutex);
DoEstablish(const RouterID &router) EXCLUDES(_mutex);
void
GotRouterContact(const RouterID &router, const RouterContact &rc)
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
void
InvalidRouter(const RouterID &router);
@ -97,11 +96,11 @@ namespace llarp
VerifyRC(const RouterContact rc);
void
CreatePendingSession(const RouterID &router) LOCKS_EXCLUDED(_mutex);
CreatePendingSession(const RouterID &router) EXCLUDES(_mutex);
void
FinalizeRequest(const RouterID &router, const SessionResult type)
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
mutable util::Mutex _mutex; // protects pendingSessions, pendingCallbacks

@ -6,7 +6,6 @@
#include <crypto/crypto.hpp>
#include <service/context.hpp>
#include <router_contact.hpp>
#include <util/meta/memfn.hpp>
#include <util/types.hpp>
#include <util/thread/threading.hpp>
#include <nodedb.hpp>
@ -24,14 +23,14 @@ namespace llarp
void
RCLookupHandler::AddValidRouter(const RouterID &router)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
whitelistRouters.insert(router);
}
void
RCLookupHandler::RemoveValidRouter(const RouterID &router)
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
whitelistRouters.erase(router);
}
@ -40,7 +39,7 @@ namespace llarp
{
if(routers.empty())
return;
util::Lock l(&_mutex);
util::Lock l(_mutex);
whitelistRouters.clear();
for(auto &router : routers)
@ -55,7 +54,7 @@ namespace llarp
bool
RCLookupHandler::HaveReceivedWhitelist()
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
return not whitelistRouters.empty();
}
@ -79,7 +78,7 @@ namespace llarp
bool shouldDoLookup = false;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr_pair = pendingCallbacks.emplace(router, CallbacksQueue{});
@ -132,7 +131,7 @@ namespace llarp
return false;
}
util::Lock l(&_mutex);
util::Lock l(_mutex);
if(useWhitelist && whitelistRouters.find(remote) == whitelistRouters.end())
{
@ -178,7 +177,7 @@ namespace llarp
bool
RCLookupHandler::GetRandomWhitelistRouter(RouterID &router) const
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
const auto sz = whitelistRouters.size();
auto itr = whitelistRouters.begin();
@ -266,7 +265,7 @@ namespace llarp
{
// if we are using a whitelist look up a few routers we don't have
util::Lock l(&_mutex);
util::Lock l(_mutex);
for(const auto &r : whitelistRouters)
{
if(now > _routerLookupTimes[r] + RerequestInterval
@ -359,7 +358,7 @@ namespace llarp
bool
RCLookupHandler::HavePendingLookup(RouterID remote) const
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
return pendingCallbacks.find(remote) != pendingCallbacks.end();
}
@ -383,7 +382,7 @@ namespace llarp
{
CallbacksQueue movedCallbacks;
{
util::Lock l(&_mutex);
util::Lock l(_mutex);
auto itr = pendingCallbacks.find(router);

@ -32,32 +32,30 @@ namespace llarp
~RCLookupHandler() override = default;
void
AddValidRouter(const RouterID &router) override LOCKS_EXCLUDED(_mutex);
AddValidRouter(const RouterID &router) override EXCLUDES(_mutex);
void
RemoveValidRouter(const RouterID &router) override LOCKS_EXCLUDED(_mutex);
RemoveValidRouter(const RouterID &router) override EXCLUDES(_mutex);
void
SetRouterWhitelist(const std::vector< RouterID > &routers) override
LOCKS_EXCLUDED(_mutex);
EXCLUDES(_mutex);
bool
HaveReceivedWhitelist();
void
GetRC(const RouterID &router, RCRequestCallback callback,
bool forceLookup = false) override LOCKS_EXCLUDED(_mutex);
bool forceLookup = false) override EXCLUDES(_mutex);
bool
RemoteIsAllowed(const RouterID &remote) const override
LOCKS_EXCLUDED(_mutex);
RemoteIsAllowed(const RouterID &remote) const override EXCLUDES(_mutex);
bool
CheckRC(const RouterContact &rc) const override;
bool
GetRandomWhitelistRouter(RouterID &router) const override
LOCKS_EXCLUDED(_mutex);
GetRandomWhitelistRouter(RouterID &router) const override EXCLUDES(_mutex);
bool
CheckRenegotiateValid(RouterContact newrc, RouterContact oldrc) override;
@ -85,14 +83,14 @@ namespace llarp
const std::vector< RouterContact > &results);
bool
HavePendingLookup(RouterID remote) const LOCKS_EXCLUDED(_mutex);
HavePendingLookup(RouterID remote) const EXCLUDES(_mutex);
bool
RemoteInBootstrap(const RouterID &remote) const;
void
FinalizeRequest(const RouterID &router, const RouterContact *const rc,
RCRequestResult result) LOCKS_EXCLUDED(_mutex);
RCRequestResult result) EXCLUDES(_mutex);
mutable util::Mutex _mutex; // protects pendingCallbacks, whitelistRouters

@ -20,7 +20,6 @@
#include <util/logging/logger_syslog.hpp>
#include <util/logging/logger.hpp>
#include <util/meta/memfn.hpp>
#include <util/metrics/metrics.hpp>
#include <util/str.hpp>
#include <ev/ev.hpp>
@ -141,7 +140,7 @@ namespace llarp
return true;
};
absl::ReaderMutexLock l(&nodedb()->access);
auto l = util::shared_lock(nodedb()->access);
return pick_router(nodedb()->entries);
}
@ -780,7 +779,6 @@ namespace llarp
bool
Router::Sign(Signature &sig, const llarp_buffer_t &buf) const
{
metrics::TimerGuard t("Router", "Sign");
return CryptoManager::instance()->sign(sig, identity(), buf);
}

@ -853,7 +853,7 @@ namespace llarp
{
if(msg->proto == eProtocolTrafficV4 || msg->proto == eProtocolTrafficV6)
{
util::Lock l(&m_state->m_InboundTrafficQueueMutex);
util::Lock l(m_state->m_InboundTrafficQueueMutex);
m_state->m_InboundTrafficQueue.emplace(msg);
return true;
}
@ -903,7 +903,7 @@ namespace llarp
return false;
{
LogWarn("invalidating convotag T=", frame.T);
util::Lock lock(&m_state->m_SendQueueMutex);
util::Lock lock(m_state->m_SendQueueMutex);
m_state->m_SendQueue.emplace_back(
std::make_shared< const routing::PathTransferMessage >(f,
frame.F),
@ -1080,7 +1080,7 @@ namespace llarp
for(const auto& item : sessions)
item.second.first->FlushDownstream();
// send downstream traffic to user for hidden service
util::Lock lock(&m_state->m_InboundTrafficQueueMutex);
util::Lock lock(m_state->m_InboundTrafficQueueMutex);
while(not queue.empty())
{
const auto& msg = queue.top();
@ -1106,7 +1106,7 @@ namespace llarp
for(const auto& item : sessions)
item.second.first->FlushUpstream();
{
util::Lock lock(&m_state->m_SendQueueMutex);
util::Lock lock(m_state->m_SendQueueMutex);
// send outbound traffic
for(const auto& item : m_state->m_SendQueue)
{
@ -1206,7 +1206,7 @@ namespace llarp
return;
}
util::Lock lock(&self->m_state->m_SendQueueMutex);
util::Lock lock(self->m_state->m_SendQueueMutex);
self->m_state->m_SendQueue.emplace_back(transfer, p);
});
}

@ -28,8 +28,9 @@ namespace llarp
template < typename T, typename GetTime, typename PutTime, typename Compare,
typename GetNow = GetNowSyscall, typename Mutex_t = util::Mutex,
typename Lock_t = util::Lock, llarp_time_t dropMs = 5,
llarp_time_t initialIntervalMs = 100, size_t MaxSize = 1024 >
typename Lock_t = std::lock_guard< Mutex_t >,
llarp_time_t dropMs = 5, llarp_time_t initialIntervalMs = 100,
size_t MaxSize = 1024 >
struct CoDelQueue
{
CoDelQueue(std::string name, PutTime put, GetNow now)
@ -41,7 +42,7 @@ namespace llarp
}
size_t
Size() LOCKS_EXCLUDED(m_QueueMutex)
Size() EXCLUDES(m_QueueMutex)
{
Lock_t lock(m_QueueMutex);
return m_QueueIdx;
@ -50,9 +51,9 @@ namespace llarp
template < typename... Args >
bool
EmplaceIf(std::function< bool(T&) > pred, Args&&... args)
LOCKS_EXCLUDED(m_QueueMutex)
EXCLUDES(m_QueueMutex)
{
Lock_t lock(&m_QueueMutex);
Lock_t lock(m_QueueMutex);
if(m_QueueIdx == MaxSize)
return false;
T* t = &m_Queue[m_QueueIdx];
@ -73,9 +74,9 @@ namespace llarp
template < typename... Args >
void
Emplace(Args&&... args) LOCKS_EXCLUDED(m_QueueMutex)
Emplace(Args&&... args) EXCLUDES(m_QueueMutex)
{
Lock_t lock(&m_QueueMutex);
Lock_t lock(m_QueueMutex);
if(m_QueueIdx == MaxSize)
return;
T* t = &m_Queue[m_QueueIdx];
@ -95,13 +96,13 @@ namespace llarp
template < typename Visit, typename Filter >
void
Process(Visit visitor, Filter f) LOCKS_EXCLUDED(m_QueueMutex)
Process(Visit visitor, Filter f) EXCLUDES(m_QueueMutex)
{
llarp_time_t lowest = std::numeric_limits< llarp_time_t >::max();
if(_getNow() < nextTickAt)
return;
// llarp::LogInfo("CoDelQueue::Process - start at ", start);
Lock_t lock(&m_QueueMutex);
Lock_t lock(m_QueueMutex);
auto start = firstPut;
if(m_QueueIdx == 1)

@ -2,6 +2,7 @@
#define LLARP_OBJECT_HPP
#include <util/thread/threading.hpp>
#include <atomic>
#include <nonstd/optional.hpp>
#include <vector>
@ -197,7 +198,7 @@ namespace llarp
}
Node*
findNode(int32_t handle) const SHARED_LOCKS_REQUIRED(m_mutex)
findNode(int32_t handle) const REQUIRES_SHARED(m_mutex)
{
int32_t index = handle & INDEX_MASK;
@ -226,7 +227,7 @@ namespace llarp
add(const Value& value)
{
int32_t handle;
absl::WriterMutexLock l(&m_mutex);
util::Lock l(m_mutex);
CatalogCleaner< Value > guard(this);
Node* node;
@ -264,7 +265,7 @@ namespace llarp
bool
remove(int32_t handle, Value* value = nullptr)
{
absl::WriterMutexLock l(&m_mutex);
util::Lock l(m_mutex);
Node* node = findNode(handle);
if(!node)
@ -289,7 +290,7 @@ namespace llarp
void
removeAll(std::vector< Value >* output = nullptr)
{
absl::WriterMutexLock l(&m_mutex);
util::Lock l(m_mutex);
for(Node* node : m_nodes)
{
@ -314,7 +315,7 @@ namespace llarp
bool
replace(const Value& newValue, int32_t handle)
{
absl::WriterMutexLock l(&m_mutex);
util::Lock l(m_mutex);
Node* node = findNode(handle);
if(!node)
@ -333,7 +334,7 @@ namespace llarp
nonstd::optional< Value >
find(int32_t handle)
{
absl::ReaderMutexLock l(&m_mutex);
auto l = util::shared_lock(m_mutex);
Node* node = findNode(handle);
if(!node)
@ -356,29 +357,25 @@ namespace llarp
};
template < typename Value >
class SCOPED_LOCKABLE CatalogIterator
class SCOPED_CAPABILITY CatalogIterator
{
const Catalog< Value >* m_catalog;
size_t m_index;
std::shared_lock<util::Mutex> lock;
CatalogIterator(const CatalogIterator&) = delete;
CatalogIterator&
operator=(const CatalogIterator&) = delete;
public:
explicit CatalogIterator(const Catalog< Value >* catalog)
SHARED_LOCK_FUNCTION(m_catalog->m_mutex)
: m_catalog(catalog), m_index(-1)
ACQUIRE_SHARED(catalog->m_mutex)
: m_catalog(catalog), m_index(-1), lock(m_catalog->m_mutex)
{
m_catalog->m_mutex.ReaderLock();
operator++();
}
~CatalogIterator() UNLOCK_FUNCTION()
{
m_catalog->m_mutex.ReaderUnlock();
}
void
operator++() NO_THREAD_SAFETY_ANALYSIS
{
@ -425,7 +422,7 @@ namespace llarp
bool
Catalog< Value >::verify() const
{
absl::WriterMutexLock l(&m_mutex);
util::Lock l(m_mutex);
if(m_nodes.size() < m_size)
{

@ -1,753 +0,0 @@
#include <util/metrics/core.hpp>
#include <iostream>
namespace llarp
{
namespace metrics
{
std::pair< Id, bool >
Registry::insert(string_view category, string_view name)
{
// avoid life time issues, putting strings in the stringmem set
string_view cStr = m_stringmem.emplace(category).first->c_str();
string_view nStr = m_stringmem.emplace(name).first->c_str();
NamedCategory namedCategory(cStr, nStr);
const auto it = m_metrics.find(namedCategory);
if(it != m_metrics.end())
{
return {Id(it->second.get()), false};
}
auto cIt = m_categories.find(cStr);
if(cIt == m_categories.end())
{
auto ptr = std::make_shared< Category >(cStr, m_defaultEnabled);
cIt = m_categories.emplace(cStr, ptr).first;
}
const auto mPtr =
std::make_shared< Description >(cIt->second.get(), nStr);
m_metrics.emplace(namedCategory, mPtr);
return {Id(mPtr.get()), true};
}
Id
Registry::add(string_view category, string_view name)
{
absl::WriterMutexLock l(&m_mutex);
auto result = insert(category, name);
return std::get< 1 >(result) ? std::get< 0 >(result) : Id();
}
Id
Registry::get(string_view category, string_view name)
{
Id result = findId(category, name);
if(result)
{
return result;
}
absl::WriterMutexLock l(&m_mutex);
return std::get< 0 >(insert(category, name));
}
const Category *
Registry::add(string_view category)
{
absl::WriterMutexLock l(&m_mutex);
string_view cStr = m_stringmem.emplace(category).first->c_str();
auto it = m_categories.find(cStr);
if(it == m_categories.end())
{
auto ptr = std::make_shared< Category >(cStr, m_defaultEnabled);
it = m_categories.emplace(cStr, ptr).first;
return it->second.get();
}
return nullptr;
}
const Category *
Registry::get(string_view category)
{
const Category *cPtr = findCategory(category);
if(cPtr)
{
return cPtr;
}
absl::WriterMutexLock l(&m_mutex);
string_view cStr = m_stringmem.emplace(category).first->c_str();
auto it = m_categories.find(cStr);
if(it == m_categories.end())
{
auto ptr = std::make_shared< Category >(cStr, m_defaultEnabled);
it = m_categories.emplace(cStr, ptr).first;
}
return it->second.get();
}
void
Registry::enable(const Category *category, bool value)
{
absl::WriterMutexLock l(&m_mutex);
const_cast< Category * >(category)->enabled(value);
}
void
Registry::enableAll(bool value)
{
absl::WriterMutexLock l(&m_mutex);
if(value == m_defaultEnabled)
{
return;
}
m_defaultEnabled = value;
std::for_each(m_categories.begin(), m_categories.end(),
[&](auto &x) { x.second->enabled(value); });
}
void
Registry::registerContainer(const Category *category,
CategoryContainer &container)
{
absl::WriterMutexLock l(&m_mutex);
if(container.m_category == nullptr)
{
const_cast< Category * >(category)->registerContainer(&container);
}
}
void
Registry::publicationType(const Id &id, Publication::Type type)
{
const_cast< Description * >(id.description())->type(type);
}
void
Registry::setFormat(const Id &id, const Format &format)
{
auto *description = const_cast< Description * >(id.description());
absl::WriterMutexLock l(&m_mutex);
auto fmtPtr = std::make_shared< Format >(format);
for(byte_t i = 0; i < Publication::MaxSize; ++i)
{
auto type = static_cast< Publication::Type >(i);
const FormatSpec *spec = format.specFor(type);
if(spec != nullptr)
{
string_view fmt = m_stringmem.emplace(spec->m_format).first->c_str();
fmtPtr->setSpec(type, FormatSpec(spec->m_scale, fmt));
}
}
description->format(fmtPtr);
}
const Category *
Registry::findCategory(string_view category) const
{
absl::ReaderMutexLock l(&m_mutex);
auto it = m_categories.find(category);
return it == m_categories.end() ? nullptr : it->second.get();
}
Id
Registry::findId(string_view category, string_view name) const
{
absl::ReaderMutexLock l(&m_mutex);
auto it = m_metrics.find(std::make_tuple(category, name));
return it == m_metrics.end() ? Id() : Id(it->second.get());
}
std::vector< const Category * >
Registry::getAll() const
{
absl::ReaderMutexLock l(&m_mutex);
std::vector< const Category * > result;
result.reserve(m_categories.size());
std::transform(m_categories.begin(), m_categories.end(),
std::back_inserter(result),
[](const auto &x) { return x.second.get(); });
return result;
}
struct PublisherHelper
{
using SampleCache = std::map< std::shared_ptr< Publisher >, Sample >;
static void
updateSampleCache(SampleCache &cache,
const std::shared_ptr< Publisher > &publisher,
const SampleGroup< double > &doubleGroup,
const SampleGroup< int > &intGroup,
const absl::Time &timeStamp)
{
auto it = cache.find(publisher);
if(it == cache.end())
{
Sample sample;
sample.sampleTime(timeStamp);
it = cache.emplace(publisher, sample).first;
}
it->second.pushGroup(doubleGroup);
it->second.pushGroup(intGroup);
}
struct CollectResult
{
Records records;
absl::Duration samplePeriod;
};
static CollectResult
collect(Manager &manager, const Category *category,
const absl::Duration &now, bool clear)
EXCLUSIVE_LOCKS_REQUIRED(manager.m_mutex)
{
// Collect records from the repo.
const Records result = clear
? Records(manager.m_doubleRepo.collectAndClear(category),
manager.m_intRepo.collectAndClear(category))
: Records(manager.m_doubleRepo.collect(category),
manager.m_intRepo.collect(category));
// Get the time since last reset, and clear if needed.
auto it = manager.m_resetTimes.find(category);
if(it == manager.m_resetTimes.end())
{
if(clear)
{
manager.m_resetTimes.emplace(category, now);
}
return {result, now - manager.m_createTime};
}
auto tmp = now - it->second;
if(clear)
{
it->second = now;
}
return {result, tmp};
}
template < typename Type >
using RecordBuffer = std::vector<
std::shared_ptr< std::vector< TaggedRecords< Type > > > >;
template < typename CategoryIterator >
static void
publish(Manager &manager, const CategoryIterator &categoriesBegin,
const CategoryIterator &categoriesEnd, bool clear)
{
if(categoriesBegin == categoriesEnd)
{
return;
}
RecordBuffer< double > doubleRecordBuffer;
RecordBuffer< int > intRecordBuffer;
SampleCache sampleCache;
absl::Time timeStamp = absl::Now();
absl::Duration now = absl::Now() - absl::UnixEpoch();
{
// 1.
absl::WriterMutexLock publishGuard(&manager.m_publishLock);
// 2.
absl::WriterMutexLock propertiesGuard(&manager.m_mutex);
// Build the 'sampleCache' by iterating over the categories and
// collecting records for those categories.
for(CategoryIterator catIt = categoriesBegin; catIt != categoriesEnd;
++catIt)
{
if(!(*catIt)->enabled())
{
continue;
}
// Collect the metrics.
auto result = collect(manager, *catIt, now, clear);
const auto &records = result.records;
// If there are no collected records then this category can be
// ignored.
if(records.doubleRecords.empty() && records.intRecords.empty())
{
continue;
}
if(result.samplePeriod == absl::Duration())
{
std::cerr << "Invalid elapsed time interval of 0 for "
"published metrics.";
result.samplePeriod += absl::Nanoseconds(1);
}
// Append the collected records to the buffer of records.
auto dRecords =
std::make_shared< DoubleRecords >(records.doubleRecords);
doubleRecordBuffer.push_back(dRecords);
SampleGroup< double > doubleGroup(
absl::Span< const TaggedRecords< double > >(*dRecords),
result.samplePeriod);
auto iRecords = std::make_shared< IntRecords >(records.intRecords);
intRecordBuffer.push_back(iRecords);
SampleGroup< int > intGroup(
absl::Span< const TaggedRecords< int > >(*iRecords),
result.samplePeriod);
std::for_each(manager.m_publishers.globalBegin(),
manager.m_publishers.globalEnd(),
[&](const auto &ptr) {
updateSampleCache(sampleCache, ptr, doubleGroup,
intGroup, timeStamp);
});
std::for_each(manager.m_publishers.lowerBound(*catIt),
manager.m_publishers.upperBound(*catIt),
[&](const auto &val) {
updateSampleCache(sampleCache, val.second,
doubleGroup, intGroup, timeStamp);
});
}
}
for(auto &entry : sampleCache)
{
Publisher *publisher = entry.first.get();
publisher->publish(entry.second);
}
}
};
Sample
Manager::collectSample(Records &records,
absl::Span< const Category * > categories,
bool clear)
{
absl::Time timeStamp = absl::Now();
absl::Duration now = timeStamp - absl::UnixEpoch();
Sample sample;
sample.sampleTime(timeStamp);
// Use a tuple to hold 'references' to the collected records
using SampleDescription = std::tuple< size_t, size_t, absl::Duration >;
std::vector< SampleDescription > dSamples;
std::vector< SampleDescription > iSamples;
dSamples.reserve(categories.size());
iSamples.reserve(categories.size());
// 1
absl::WriterMutexLock publishGuard(&m_publishLock);
// 2
absl::WriterMutexLock propertiesGuard(&m_mutex);
for(const Category *const category : categories)
{
if(!category->enabled())
{
continue;
}
size_t dBeginIndex = records.doubleRecords.size();
size_t iBeginIndex = records.intRecords.size();
// Collect the metrics.
auto collectRes = PublisherHelper::collect(*this, category, now, clear);
DoubleRecords catDRecords = collectRes.records.doubleRecords;
IntRecords catIRecords = collectRes.records.intRecords;
absl::Duration elapsedTime = collectRes.samplePeriod;
records.doubleRecords.insert(records.doubleRecords.end(),
catDRecords.begin(), catDRecords.end());
records.intRecords.insert(records.intRecords.end(), catIRecords.begin(),
catIRecords.end());
size_t dSize = records.doubleRecords.size() - dBeginIndex;
size_t iSize = records.intRecords.size() - iBeginIndex;
// If there are no collected records then this category can be ignored.
if(dSize != 0)
{
dSamples.emplace_back(dBeginIndex, dSize, elapsedTime);
}
if(iSize != 0)
{
iSamples.emplace_back(iBeginIndex, iSize, elapsedTime);
}
}
// Now that we have all the records, we can build our sample
for(const SampleDescription &s : dSamples)
{
sample.pushGroup(&records.doubleRecords[std::get< 0 >(s)],
std::get< 1 >(s), std::get< 2 >(s));
}
for(const SampleDescription &s : iSamples)
{
sample.pushGroup(&records.intRecords[std::get< 0 >(s)],
std::get< 1 >(s), std::get< 2 >(s));
}
return sample;
}
void
Manager::publish(absl::Span< const Category * > categories, bool clear)
{
PublisherHelper::publish(*this, categories.begin(), categories.end(),
clear);
}
void
Manager::publish(const std::set< const Category * > &categories, bool clear)
{
PublisherHelper::publish(*this, categories.begin(), categories.end(),
clear);
}
Manager *DefaultManager::m_manager = nullptr;
struct PublisherSchedulerData
{
util::Mutex m_mutex;
thread::Scheduler::Handle m_handle GUARDED_BY(m_mutex);
std::set< const Category * > m_categories GUARDED_BY(m_mutex);
bool m_default GUARDED_BY(m_mutex){false};
std::set< const Category * > m_nonDefaultCategories GUARDED_BY(m_mutex);
PublisherSchedulerData() : m_handle(thread::Scheduler::INVALID_HANDLE)
{
}
};
// Reverts a publisher scheduler back to its default state
class PublisherSchedulerGuard
{
PublisherScheduler *m_scheduler;
public:
PublisherSchedulerGuard(PublisherScheduler *scheduler)
: m_scheduler(scheduler)
{
}
~PublisherSchedulerGuard()
{
if(m_scheduler != nullptr)
{
for(auto &repeat : m_scheduler->m_repeaters)
{
if(repeat.second->m_handle != thread::Scheduler::INVALID_HANDLE)
{
m_scheduler->m_scheduler.cancelRepeat(repeat.second->m_handle);
}
}
m_scheduler->m_defaultInterval = absl::Duration();
m_scheduler->m_repeaters.clear();
m_scheduler->m_categories.clear();
}
}
void
release()
{
m_scheduler = nullptr;
}
};
void
PublisherScheduler::publish(
const std::shared_ptr< PublisherSchedulerData > &data) const
{
util::Lock l(&data->m_mutex);
if(data->m_default)
{
m_manager->publishAllExcluding(data->m_nonDefaultCategories);
}
else if(!data->m_categories.empty())
{
m_manager->publish(data->m_categories);
}
}
void
PublisherScheduler::cancel(Categories::iterator it)
{
assert(it != m_categories.end());
auto repeatIt = m_repeaters.find(it->second);
assert(repeatIt != m_repeaters.end());
const Category *category = it->first;
m_categories.erase(it);
auto data = repeatIt->second;
util::Lock l(&data->m_mutex);
assert(data->m_categories.find(category) != data->m_categories.end());
data->m_categories.erase(category);
if(!data->m_default)
{
if(data->m_categories.empty())
{
m_scheduler.cancelRepeat(data->m_handle);
m_repeaters.erase(repeatIt);
}
if(m_defaultInterval != absl::Duration())
{
auto defaultIntervalIt = m_repeaters.find(m_defaultInterval);
assert(defaultIntervalIt != m_repeaters.end());
auto &defaultRepeater = defaultIntervalIt->second;
util::Lock lock(&defaultRepeater->m_mutex);
defaultRepeater->m_nonDefaultCategories.erase(category);
}
}
}
bool
PublisherScheduler::cancelDefault()
{
if(m_defaultInterval == absl::Duration())
{
return false;
}
absl::Duration interval = m_defaultInterval;
m_defaultInterval = absl::Duration();
auto repeatIt = m_repeaters.find(interval);
assert(repeatIt != m_repeaters.end());
auto data = repeatIt->second;
util::Lock l(&data->m_mutex);
if(data->m_categories.empty())
{
assert(data->m_handle != thread::Scheduler::INVALID_HANDLE);
m_scheduler.cancelRepeat(data->m_handle);
m_repeaters.erase(repeatIt);
}
else
{
data->m_default = false;
data->m_nonDefaultCategories.clear();
}
return true;
}
void
PublisherScheduler::schedule(const Category *category,
absl::Duration interval)
{
assert(absl::Seconds(0) < interval);
util::Lock l(&m_mutex);
auto catIt = m_categories.find(category);
if(catIt != m_categories.end())
{
if(catIt->second == interval)
{
return;
}
cancel(catIt);
}
// Make a guard, so if something throws, the scheduler is reset to a
// somewhat "sane" state (no metrics).
PublisherSchedulerGuard guard(this);
m_categories.emplace(category, interval);
auto repeatIt = m_repeaters.find(interval);
std::shared_ptr< PublisherSchedulerData > data;
// Create a new 'ClockData' object if one does not exist for the
// 'interval', otherwise update the existing 'data'.
if(repeatIt == m_repeaters.end())
{
data = std::make_shared< PublisherSchedulerData >();
util::Lock lock(&data->m_mutex);
data->m_categories.insert(category);
m_repeaters.emplace(interval, data);
data->m_handle = m_scheduler.scheduleRepeat(
interval, std::bind(&PublisherScheduler::publish, this, data));
}
else
{
data = repeatIt->second;
util::Lock lock(&data->m_mutex);
data->m_categories.insert(category);
}
// If this isn't being added to the default schedule, then add to the set
// of non-default categories in the default schedule.
util::Lock dataLock(&data->m_mutex);
if(!data->m_default && m_defaultInterval != absl::Duration())
{
auto defaultIntervalIt = m_repeaters.find(m_defaultInterval);
assert(defaultIntervalIt != m_repeaters.end());
auto &defaultInterval = defaultIntervalIt->second;
util::Lock lock(&defaultInterval->m_mutex);
defaultInterval->m_nonDefaultCategories.insert(category);
}
guard.release();
}
void
PublisherScheduler::setDefault(absl::Duration interval)
{
assert(absl::Seconds(0) < interval);
util::Lock l(&m_mutex);
// If its already this interval, return early.
if(interval == m_defaultInterval)
{
return;
}
cancelDefault();
m_defaultInterval = interval;
// Make a guard, so if something throws, the scheduler is reset to a
// somewhat "sane" state (no metrics).
PublisherSchedulerGuard guard(this);
std::shared_ptr< PublisherSchedulerData > data;
auto repeatIt = m_repeaters.find(interval);
if(repeatIt == m_repeaters.end())
{
data = std::make_shared< PublisherSchedulerData >();
m_repeaters.emplace(interval, data);
}
else
{
data = repeatIt->second;
}
util::Lock lock(&data->m_mutex);
data->m_default = true;
auto cIt = m_categories.begin();
for(; cIt != m_categories.end(); ++cIt)
{
if(cIt->second != interval)
{
data->m_nonDefaultCategories.insert(cIt->first);
}
}
if(data->m_handle == thread::Scheduler::INVALID_HANDLE)
{
data->m_handle = m_scheduler.scheduleRepeat(
interval, std::bind(&PublisherScheduler::publish, this, data));
}
guard.release();
}
bool
PublisherScheduler::cancel(const Category *category)
{
util::Lock l(&m_mutex);
auto it = m_categories.find(category);
if(it == m_categories.end())
{
// This category has no specific schedule.
return false;
}
cancel(it);
return true;
}
bool
PublisherScheduler::clearDefault()
{
util::Lock l(&m_mutex);
return cancelDefault();
}
void
PublisherScheduler::cancelAll()
{
util::Lock l(&m_mutex);
for(auto &repeat : m_repeaters)
{
util::Lock dataLock(&repeat.second->m_mutex);
m_scheduler.cancelRepeat(repeat.second->m_handle, true);
}
m_defaultInterval = absl::Duration();
m_repeaters.clear();
m_categories.clear();
}
nonstd::optional< absl::Duration >
PublisherScheduler::find(const Category *category) const
{
util::Lock l(&m_mutex);
auto it = m_categories.find(category);
if(it == m_categories.end())
{
return {};
}
return it->second;
}
nonstd::optional< absl::Duration >
PublisherScheduler::getDefault() const
{
util::Lock l(&m_mutex);
if(m_defaultInterval == absl::Duration())
{
return {};
}
return m_defaultInterval;
}
std::vector< std::pair< const Category *, absl::Duration > >
PublisherScheduler::getAll() const
{
util::Lock l(&m_mutex);
std::vector< std::pair< const Category *, absl::Duration > > result;
result.reserve(m_categories.size());
std::copy(m_categories.begin(), m_categories.end(),
std::back_inserter(result));
return result;
}
} // namespace metrics
} // namespace llarp

File diff suppressed because it is too large Load Diff

@ -1,178 +0,0 @@
#include <util/metrics/json_publisher.hpp>
#include <fstream>
#include <iomanip>
#include <iostream>
namespace llarp
{
namespace metrics
{
namespace
{
nlohmann::json
tagsToJson(const Tags &tags)
{
nlohmann::json result;
std::for_each(tags.begin(), tags.end(), [&](const auto &tag) {
absl::visit([&](const auto &t) { result[tag.first] = t; },
tag.second);
});
return result;
}
template < typename Value >
nlohmann::json
formatValue(const Record< Value > &record, const Tags &tags,
double elapsedTime, Publication::Type publicationType)
{
switch(publicationType)
{
case Publication::Type::Unspecified:
{
assert(false && "Invalid publication type");
}
break;
case Publication::Type::Total:
{
return {{"tags", tagsToJson(tags)}, {"total", record.total()}};
}
break;
case Publication::Type::Count:
{
return {{"tags", tagsToJson(tags)}, {"count", record.count()}};
}
break;
case Publication::Type::Min:
{
return {{"tags", tagsToJson(tags)}, {"min", record.min()}};
}
break;
case Publication::Type::Max:
{
return {{"tags", tagsToJson(tags)}, {"max", record.max()}};
}
break;
case Publication::Type::Avg:
{
return {{"tags", tagsToJson(tags)},
{"avg", record.total() / record.count()}};
}
break;
case Publication::Type::Rate:
{
return {{"tags", tagsToJson(tags)},
{"rate", record.total() / elapsedTime}};
}
break;
case Publication::Type::RateCount:
{
return {{"tags", tagsToJson(tags)},
{"rateCount", record.count() / elapsedTime}};
}
break;
}
return {};
}
template < typename Value >
nlohmann::json
recordToJson(const TaggedRecords< Value > &taggedRecord,
double elapsedTime)
{
nlohmann::json result;
result["id"] = taggedRecord.id.toString();
auto publicationType = taggedRecord.id.description()->type();
for(const auto &rec : taggedRecord.data)
{
const auto &record = rec.second;
if(publicationType != Publication::Type::Unspecified)
{
result["publicationType"] = Publication::repr(publicationType);
result["metrics"].push_back(
formatValue(record, rec.first, elapsedTime, publicationType));
}
else
{
nlohmann::json tmp;
tmp["tags"] = tagsToJson(rec.first);
tmp["count"] = record.count();
tmp["total"] = record.total();
if(Record< Value >::DEFAULT_MIN() != record.min())
{
tmp["min"] = record.min();
}
if(Record< Value >::DEFAULT_MAX() == record.max())
{
tmp["max"] = record.max();
}
result["metrics"].push_back(tmp);
}
}
return result;
}
} // namespace
void
JsonPublisher::publish(const Sample &values)
{
if(values.recordCount() == 0)
{
// nothing to publish
return;
}
nlohmann::json result;
result["sampleTime"] = absl::UnparseFlag(values.sampleTime());
result["recordCount"] = values.recordCount();
auto gIt = values.begin();
auto prev = values.begin();
for(; gIt != values.end(); ++gIt)
{
const double elapsedTime = absl::ToDoubleSeconds(samplePeriod(*gIt));
if(gIt == prev || samplePeriod(*gIt) != samplePeriod(*prev))
{
result["elapsedTime"] = elapsedTime;
}
absl::visit(
[&](const auto &x) -> void {
for(const auto &record : x)
{
result["record"].emplace_back(
recordToJson(record, elapsedTime));
}
},
*gIt);
prev = gIt;
}
m_publish(result);
}
void
JsonPublisher::directoryPublisher(const nlohmann::json &result,
const fs::path &path)
{
std::ofstream fstream(path.string(), std::ios_base::app);
if(!fstream)
{
std::cerr << "Skipping metrics publish, " << path << " is not a file\n";
return;
}
fstream << std::setw(0) << result << '\n';
fstream.close();
}
} // namespace metrics
} // namespace llarp

@ -1,40 +0,0 @@
#ifndef LLARP_METRICS_JSON_PUBLISHER_HPP
#define LLARP_METRICS_JSON_PUBLISHER_HPP
#include <util/fs.hpp>
#include <util/metrics/core.hpp>
#include <nlohmann/json.hpp>
#include <functional>
#include <iosfwd>
#include <utility>
namespace llarp
{
namespace metrics
{
class JsonPublisher final : public Publisher
{
public:
using PublishFunction = std::function< void(const nlohmann::json&) >;
private:
PublishFunction m_publish;
public:
JsonPublisher(PublishFunction publish) : m_publish(std::move(publish))
{
}
~JsonPublisher() override = default;
void
publish(const Sample& values) override;
static void
directoryPublisher(const nlohmann::json& result, const fs::path& path);
};
} // namespace metrics
} // namespace llarp
#endif

@ -1 +0,0 @@
#include <util/metrics/metrics.hpp>

@ -1,80 +0,0 @@
#ifndef LLARP_METRICS_HPP
#define LLARP_METRICS_HPP
#include <util/metrics/types.hpp>
#include <util/metrics/core.hpp>
#include <util/string_view.hpp>
namespace llarp
{
namespace metrics
{
struct MetricsHelper
{
static void
initContainer(CategoryContainer& container, const char* category)
{
Manager* manager = DefaultManager::instance();
Registry& registry = manager->registry();
registry.registerContainer(registry.get(category), container);
}
static void
setType(const Id& id, Publication::Type type)
{
Manager* manager = DefaultManager::instance();
return manager->registry().publicationType(id, type);
}
};
template < typename... TagVals >
void
integerTick(string_view category, string_view metric, int val,
TagVals&&... tags)
{
if(DefaultManager::instance())
{
CollectorRepo< int >& repository =
DefaultManager::instance()->intCollectorRepo();
IntCollector* collector = repository.defaultCollector(category, metric);
if(collector->id().category()->enabled())
{
collector->tick(val, tags...);
}
}
}
} // namespace metrics
} // namespace llarp
// Some MSVC flags mess with __LINE__, but __COUNTER__ is better anyway
#ifdef _MSC_VER
#define METRICS_UNIQ_NUMBER __COUNTER__
#else
#define METRICS_UNIQ_NUMBER __LINE__
#endif
// Use a level of indirection to force the preprocessor to expand args first.
#define METRICS_NAME_CAT_IMP(X, Y) X##Y
#define METRICS_NAME_CAT(X, Y) METRICS_NAME_CAT_IMP(X, Y)
#define METRICS_UNIQUE_NAME(X) METRICS_NAME_CAT(X, METRICS_UNIQ_NUMBER)
// For when the category/metric may change during the program run
#define METRICS_DYNAMIC_UPDATE(CAT, METRIC, ...) \
do \
{ \
using namespace llarp::metrics; \
if(DefaultManager::instance()) \
{ \
CollectorRepo< double >& repository = \
DefaultManager::instance()->doubleCollectorRepo(); \
DoubleCollector* collector = \
repository.defaultCollector((CAT), (METRIC)); \
if(collector->id().category()->enabled()) \
{ \
collector->tick(__VA_ARGS__); \
} \
} \
} while(false)
#endif

@ -1,430 +0,0 @@
#include <util/metrics/metrictank_publisher.hpp>
#include <util/logging/logger.hpp>
#include <util/meta/variant.hpp>
#include <cstdio>
#include <absl/strings/str_cat.h>
#include <absl/strings/str_join.h>
#ifndef _WIN32
#include <netdb.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
// bzero and friends graduated from /usr/ucb*
// not too long ago
#include <strings.h>
#else
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#include <wspiapi.h>
#include <lmcons.h>
#endif
namespace llarp
{
namespace metrics
{
namespace
{
nonstd::optional< std::string >
makeStr(double d)
{
if(std::isnan(d) || std::isinf(d))
{
return {};
}
return std::to_string(d);
}
nonstd::optional< std::string >
makeStr(int i)
{
if(i == std::numeric_limits< int >::min()
|| i == std::numeric_limits< int >::max())
{
return {};
}
return std::to_string(i);
}
template < typename Value >
nonstd::optional< std::string >
formatValue(const Record< Value > &record, double elapsedTime,
Publication::Type publicationType)
{
switch(publicationType)
{
case Publication::Type::Unspecified:
{
assert(false && "Invalid publication type");
}
break;
case Publication::Type::Total:
{
return makeStr(record.total());
}
break;
case Publication::Type::Count:
{
return std::to_string(record.count());
}
break;
case Publication::Type::Min:
{
return makeStr(record.min());
}
break;
case Publication::Type::Max:
{
return makeStr(record.max());
}
break;
case Publication::Type::Avg:
{
return makeStr(static_cast< double >(record.total())
/ static_cast< double >(record.count()));
}
break;
case Publication::Type::Rate:
{
return makeStr(record.total() / elapsedTime);
}
break;
case Publication::Type::RateCount:
{
return makeStr(record.count() / elapsedTime);
}
break;
}
assert(false && "Invalid publication type");
return {};
}
std::string
makeTagStr(const Tags &tags)
{
std::string tagStr;
auto overloaded = util::overloaded(
[](const std::string &str) { return str; },
[](double d) { return std::to_string(d); },
[](const std::int64_t i) { return std::to_string(i); });
for(const auto &tag : tags)
{
absl::StrAppend(&tagStr, ";", tag.first, "=",
absl::visit(overloaded, tag.second));
}
if(!tags.empty())
{
absl::StrAppend(&tagStr, ";");
}
return tagStr;
}
std::string
addName(string_view id, string_view name, const Tags &tags,
string_view suffix)
{
return absl::StrCat(id, ".", name, makeTagStr(tags), suffix);
}
constexpr bool
isValid(int val)
{
return val != std::numeric_limits< int >::min()
&& val != std::numeric_limits< int >::max();
}
constexpr bool
isValid(double val)
{
return Record< double >::DEFAULT_MIN() != val
&& Record< double >::DEFAULT_MAX() != val && !std::isnan(val)
&& !std::isinf(val);
}
template < typename Value >
std::vector< MetricTankPublisherInterface::PublishData >
recordToData(const TaggedRecords< Value > &taggedRecords, absl::Time time,
double elapsedTime, string_view suffix)
{
std::vector< MetricTankPublisherInterface::PublishData > result;
std::string id = taggedRecords.id.toString();
auto publicationType = taggedRecords.id.description()->type();
for(const auto &record : taggedRecords.data)
{
const auto &tags = record.first;
const auto &rec = record.second;
if(publicationType != Publication::Type::Unspecified)
{
auto val = formatValue(rec, elapsedTime, publicationType);
if(val)
{
result.emplace_back(
addName(id, Publication::repr(publicationType), tags, suffix),
val.value(), time);
}
}
else
{
result.emplace_back(addName(id, "count", tags, suffix),
std::to_string(rec.count()), time);
result.emplace_back(addName(id, "total", tags, suffix),
std::to_string(rec.total()), time);
if(isValid(rec.min()))
{
result.emplace_back(addName(id, "min", tags, suffix),
std::to_string(rec.min()), time);
}
if(isValid(rec.max()))
{
result.emplace_back(addName(id, "max", tags, suffix),
std::to_string(rec.max()), time);
}
}
}
return result;
}
#ifndef _WIN32
void
publishData(const std::vector< std::string > &toSend,
const std::string &host, short port)
{
struct addrinfo hints;
struct addrinfo *addrs;
bzero(&hints, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
const std::string portAsStr = std::to_string(port);
if(getaddrinfo(host.c_str(), portAsStr.c_str(), &hints, &addrs) != 0)
{
LogError("Failed to get address info");
return;
}
int sock =
::socket(addrs->ai_family, addrs->ai_socktype, addrs->ai_protocol);
if(sock < 0)
{
LogError("Failed to open socket");
freeaddrinfo(addrs);
return;
}
if(connect(sock, addrs->ai_addr, addrs->ai_addrlen) < 0)
{
LogError("Failed to connect to metrictank");
close(sock);
freeaddrinfo(addrs);
return;
}
freeaddrinfo(addrs);
for(const std::string &val : toSend)
{
ssize_t sentLen = 0;
do
{
sentLen =
::send(sock, val.c_str() + sentLen, val.size() - sentLen, 0);
if(sentLen == -1)
{
LogError("Error ", strerror(errno));
}
} while((0 <= sentLen)
&& (static_cast< size_t >(sentLen) < val.size()));
}
LogInfo("Sent ", toSend.size(), " metrics to metrictank");
shutdown(sock, SHUT_RDWR);
close(sock);
}
#else
void
publishData(const std::vector< std::string > &toSend,
const std::string &host, short port)
{
struct addrinfo *addrs = NULL, hints;
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
const std::string portAsStr = std::to_string(port);
if(getaddrinfo(host.c_str(), portAsStr.c_str(), &hints, &addrs) != 0)
{
LogError("Failed to get address info");
return;
}
SOCKET sock =
::socket(addrs->ai_family, addrs->ai_socktype, addrs->ai_protocol);
if(sock == INVALID_SOCKET)
{
LogError("Failed to open socket");
freeaddrinfo(addrs);
return;
}
if(connect(sock, addrs->ai_addr, addrs->ai_addrlen) == SOCKET_ERROR)
{
LogError("Failed to connect to metrictank");
closesocket(sock);
freeaddrinfo(addrs);
return;
}
freeaddrinfo(addrs);
for(const std::string &val : toSend)
{
int sentLen = 0;
do
{
sentLen =
::send(sock, val.c_str() + sentLen, val.size() - sentLen, 0);
if(sentLen == SOCKET_ERROR)
{
LogError("Error ", strerror(errno));
}
} while((0 <= sentLen)
&& (static_cast< size_t >(sentLen) < val.size()));
}
shutdown(sock, SD_SEND);
closesocket(sock);
}
#endif
MetricTankPublisherInterface::Tags
updateTags(MetricTankPublisherInterface::Tags tags)
{
if(tags.count("system") == 0)
{
#if defined(_WIN32) || defined(_WIN64) || defined(__NT__)
tags["system"] = "windows";
#elif defined(__APPLE__)
tags["system"] = "macos";
#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
tags["system"] = "bsd";
#elif defined(__sun)
tags["system"] = "solaris";
#elif defined(__linux__)
tags["system"] = "linux";
#else
tags["system"] = "unknown";
#endif
}
return tags;
}
} // namespace
std::string
MetricTankPublisherInterface::makeSuffix(const Tags &tags)
{
return absl::StrJoin(updateTags(tags), ";", absl::PairFormatter("="));
}
void
MetricTankPublisherInterface::publish(const Sample &values)
{
if(values.recordCount() == 0)
{
// nothing to publish
return;
}
absl::Time sampleTime = values.sampleTime();
std::vector< PublishData > result;
result.reserve(values.recordCount());
auto gIt = values.begin();
auto prev = values.begin();
for(; gIt != values.end(); ++gIt)
{
const double elapsedTime = absl::ToDoubleSeconds(samplePeriod(*gIt));
absl::visit(
[&](const auto &d) {
for(const auto &record : d)
{
auto partial =
recordToData(record, sampleTime, elapsedTime, m_suffix);
result.insert(result.end(), partial.begin(), partial.end());
}
},
*gIt);
prev = gIt;
}
publish(result);
}
void
MetricTankPublisher::publish(const std::vector< PublishData > &data)
{
if(m_queue.tryPushBack(data) == thread::QueueReturn::QueueFull)
{
LogWarn("Dropping metrictank logs!");
}
}
void
MetricTankPublisher::work()
{
while(true)
{
auto data = m_queue.popFront(); // block until we get something
// Finish
if(absl::holds_alternative< StopStruct >(data))
{
return;
}
assert(absl::holds_alternative< std::vector< PublishData > >(data));
auto vec = absl::get< std::vector< PublishData > >(data);
std::vector< std::string > toSend;
toSend.reserve(vec.size());
std::transform(vec.begin(), vec.end(), std::back_inserter(toSend),
[](const PublishData &d) -> std::string {
return absl::StrCat(
std::get< 0 >(d), " ", std::get< 1 >(d), " ",
absl::ToUnixSeconds(std::get< 2 >(d)), "\n");
});
publishData(toSend, m_host, m_port);
}
}
} // namespace metrics
} // namespace llarp

@ -1,90 +0,0 @@
#ifndef LLARP_METRICS_METRICTANK_PUBLISHER_HPP
#define LLARP_METRICS_METRICTANK_PUBLISHER_HPP
#include <util/metrics/core.hpp>
#include <util/thread/queue.hpp>
#include <absl/types/variant.h>
#include <string>
#include <thread>
#include <tuple>
#include <utility>
#include <vector>
namespace llarp
{
namespace metrics
{
class MetricTankPublisherInterface : public Publisher
{
public:
// Format for metrictank is <metric path, value, seconds since epoch>
// Metric path = metrics.namespaces.metric;key=value;key1=value2
using PublishData = std::tuple< std::string, std::string, absl::Time >;
using Tags = std::map< std::string, std::string >;
private:
const std::string m_suffix; // tags to send to metric tank
public:
MetricTankPublisherInterface(const Tags& tags)
: m_suffix(makeSuffix(tags))
{
}
~MetricTankPublisherInterface() override = default;
static std::string
makeSuffix(const Tags& tags);
void
publish(const Sample& values) override;
virtual void
publish(const std::vector< PublishData >& data) = 0;
};
class MetricTankPublisher final : public MetricTankPublisherInterface
{
private:
const std::string m_host;
const short m_port;
struct StopStruct
{
};
using Queue = thread::Queue<
absl::variant< std::vector< PublishData >, StopStruct > >;
Queue m_queue; // queue of things to publish
std::thread m_worker; // worker thread
void
work();
public:
MetricTankPublisher(const Tags& tags, std::string host, short port)
: MetricTankPublisherInterface(tags)
, m_host(std::move(host))
, m_port(port)
, m_queue(100)
, m_worker(&MetricTankPublisher::work, this)
{
}
~MetricTankPublisher() override
{
// Push back a signal value onto the queue
m_queue.pushBack(StopStruct());
}
void
publish(const std::vector< PublishData >& data) override;
};
} // namespace metrics
} // namespace llarp
#endif

@ -1,196 +0,0 @@
#include <util/metrics/stream_publisher.hpp>
#include <fstream>
#include <iostream>
#include <iomanip>
namespace llarp
{
namespace metrics
{
namespace
{
template < typename Value >
void
formatValue(std::ostream &stream, Value value,
const FormatSpec *formatSpec)
{
if(formatSpec)
{
FormatSpec::format(stream, static_cast< double >(value), *formatSpec);
}
else
{
stream << value;
}
}
template < typename Value >
void
formatValue(std::ostream &stream, const Record< Value > &record,
double elapsedTime, Publication::Type publicationType,
const FormatSpec *formatSpec)
{
switch(publicationType)
{
case Publication::Type::Unspecified:
{
assert(false && "Invalid publication type");
}
break;
case Publication::Type::Total:
{
formatValue(stream, record.total(), formatSpec);
}
break;
case Publication::Type::Count:
{
formatValue(stream, record.count(), formatSpec);
}
break;
case Publication::Type::Min:
{
formatValue(stream, record.min(), formatSpec);
}
break;
case Publication::Type::Max:
{
formatValue(stream, record.max(), formatSpec);
}
break;
case Publication::Type::Avg:
{
formatValue(stream, record.total() / record.count(), formatSpec);
}
break;
case Publication::Type::Rate:
{
formatValue(stream, record.total() / elapsedTime, formatSpec);
}
break;
case Publication::Type::RateCount:
{
formatValue(stream, record.count() / elapsedTime, formatSpec);
}
break;
}
}
template < typename Value >
void
publishRecord(std::ostream &stream,
const TaggedRecords< Value > &taggedRecords,
double elapsedTime)
{
auto publicationType = taggedRecords.id.description()->type();
std::shared_ptr< const Format > format =
taggedRecords.id.description()->format();
if(taggedRecords.data.empty())
{
return;
}
stream << "\t\t" << taggedRecords.id << " [";
for(const auto &rec : taggedRecords.data)
{
stream << "\n\t\t\t";
const auto &tags = rec.first;
const auto &record = rec.second;
{
Printer printer(stream, -1, -1);
printer.printValue(tags);
}
stream << " ";
if(publicationType != Publication::Type::Unspecified)
{
stream << Publication::repr(publicationType) << " = ";
const FormatSpec *formatSpec =
format ? format->specFor(publicationType) : nullptr;
formatValue(stream, record, elapsedTime, publicationType,
formatSpec);
}
else
{
const FormatSpec *countSpec = nullptr;
const FormatSpec *totalSpec = nullptr;
const FormatSpec *minSpec = nullptr;
const FormatSpec *maxSpec = nullptr;
if(format)
{
countSpec = format->specFor(Publication::Type::Count);
totalSpec = format->specFor(Publication::Type::Total);
minSpec = format->specFor(Publication::Type::Min);
maxSpec = format->specFor(Publication::Type::Max);
}
stream << "count = ";
formatValue(stream, record.count(), countSpec);
stream << ", total = ";
formatValue(stream, record.total(), totalSpec);
if(Record< Value >::DEFAULT_MIN() == record.min())
{
stream << ", min = undefined";
}
else
{
stream << ", min = ";
formatValue(stream, record.min(), minSpec);
}
if(Record< Value >::DEFAULT_MAX() == record.max())
{
stream << ", max = undefined";
}
else
{
stream << ", max = ";
formatValue(stream, record.max(), maxSpec);
}
}
}
stream << "\n\t\t]\n";
}
} // namespace
void
StreamPublisher::publish(const Sample &values)
{
if(values.recordCount() == 0)
{
// nothing to publish
return;
}
m_stream << values.sampleTime() << " " << values.recordCount()
<< " Records\n";
auto gIt = values.begin();
auto prev = values.begin();
for(; gIt != values.end(); ++gIt)
{
const double elapsedTime = absl::ToDoubleSeconds(samplePeriod(*gIt));
if(gIt == prev || samplePeriod(*gIt) != samplePeriod(*prev))
{
m_stream << "\tElapsed Time: " << elapsedTime << "s\n";
}
absl::visit(
[&](const auto &x) {
for(const auto &record : x)
{
publishRecord(m_stream, record, elapsedTime);
}
},
*gIt);
prev = gIt;
}
}
} // namespace metrics
} // namespace llarp

@ -1,30 +0,0 @@
#ifndef LLARP_METRICS_STREAM_PUBLISHER_HPP
#define LLARP_METRICS_STREAM_PUBLISHER_HPP
#include <util/metrics/core.hpp>
#include <iosfwd>
namespace llarp
{
namespace metrics
{
class StreamPublisher final : public Publisher
{
std::ostream& m_stream;
public:
StreamPublisher(std::ostream& stream) : m_stream(stream)
{
}
~StreamPublisher() override = default;
void
publish(const Sample& values) override;
};
} // namespace metrics
} // namespace llarp
#endif

@ -1,145 +0,0 @@
#include <util/metrics/types.hpp>
#include <util/printer.hpp>
#include <absl/strings/str_join.h>
namespace llarp
{
namespace metrics
{
std::ostream &
FormatSpec::format(std::ostream &stream, double data,
const FormatSpec &format)
{
static constexpr size_t INIT_SIZE = 32;
char buf[INIT_SIZE] = {0};
int rc = snprintf(buf, INIT_SIZE, format.m_format.data(),
data * format.m_scale);
if(rc < 0)
{
stream << "Bad format " << format.m_format << " applied to " << data;
return stream;
}
if(static_cast< size_t >(rc) < INIT_SIZE)
{
stream << buf;
return stream;
}
std::vector< char > vec(rc + 1);
rc = snprintf(vec.data(), vec.size(), format.m_format.data(),
data * format.m_scale);
if(static_cast< size_t >(rc) > vec.size())
{
stream << "Bad format " << format.m_format << " applied to " << data;
return stream;
}
stream << vec.data();
return stream;
}
string_view
Publication::repr(Type val)
{
switch(val)
{
case Type::Unspecified:
return "Unspecified";
case Type::Total:
return "Total";
case Type::Count:
return "Count";
case Type::Min:
return "Min";
case Type::Max:
return "Max";
case Type::Avg:
return "Avg";
case Type::Rate:
return "Rate";
case Type::RateCount:
return "RateCount";
default:
return "???";
}
}
std::ostream &
Publication::print(std::ostream &stream, Type val)
{
stream << repr(val);
return stream;
}
Category::~Category()
{
while(m_container)
{
auto next = m_container->m_nextCategory;
m_container->clear();
m_container = next;
}
}
void
Category::enabled(bool val)
{
// sync point
if(m_enabled != val)
{
auto cont = m_container;
while(cont)
{
cont->m_enabled = val;
cont = cont->m_nextCategory;
}
m_enabled = val;
}
}
void
Category::registerContainer(CategoryContainer *container)
{
container->m_enabled = m_enabled;
container->m_category = this;
container->m_nextCategory = m_container;
m_container = container;
}
std::ostream &
Category::print(std::ostream &stream, int level, int spaces) const
{
Printer printer(stream, level, spaces);
printer.printAttribute("name", m_name);
printer.printAttribute("enabled",
m_enabled.load(std::memory_order_relaxed));
return stream;
}
std::string
Description::toString() const
{
util::Lock l(&m_mutex);
return absl::StrCat(m_category->name(), ".", m_name);
}
std::ostream &
Description::print(std::ostream &stream) const
{
util::Lock l(&m_mutex);
stream << m_category->name() << '.' << m_name;
return stream;
}
} // namespace metrics
} // namespace llarp

@ -1,675 +0,0 @@
#ifndef LLARP_METRICS_TYPES_HPP
#define LLARP_METRICS_TYPES_HPP
#include <util/printer.hpp>
#include <util/string_view.hpp>
#include <util/thread/threading.hpp>
#include <util/types.hpp>
#include <util/meta/variant.hpp>
#include <absl/container/flat_hash_map.h>
#include <absl/container/flat_hash_set.h>
#include <absl/hash/hash.h>
#include <nonstd/optional.hpp>
#include <absl/types/span.h>
#include <absl/types/variant.h>
#include <cstring>
#include <iosfwd>
#include <memory>
#include <set>
#include <vector>
namespace llarp
{
namespace metrics
{
struct Publication
{
enum class Type : byte_t
{
Unspecified = 0, // no associated metric type
Total, // sum of seen values in the measurement period
Count, // count of seen events
Min, // Minimum value
Max, // Max value
Avg, // total / count
Rate, // total per second
RateCount // count per second
};
enum
{
MaxSize = static_cast< byte_t >(Type::RateCount) + 1
};
static string_view
repr(Type val);
static std::ostream &
print(std::ostream &stream, Type val);
};
struct FormatSpec
{
float m_scale{1.0};
string_view m_format;
static constexpr char DEFAULT_FORMAT[] = "%f";
constexpr FormatSpec() : m_format(DEFAULT_FORMAT)
{
}
constexpr FormatSpec(float scale, string_view format)
: m_scale(scale), m_format(format)
{
}
static std::ostream &
format(std::ostream &stream, double data, const FormatSpec &spec);
};
inline bool
operator==(const FormatSpec &lhs, const FormatSpec &rhs)
{
return std::make_tuple(lhs.m_scale, lhs.m_format)
== std::make_tuple(rhs.m_scale, rhs.m_format);
}
struct Format
{
using Spec = nonstd::optional< FormatSpec >;
std::array< Spec, Publication::MaxSize > m_specs;
constexpr Format() : m_specs()
{
}
void
setSpec(Publication::Type pub, const FormatSpec &spec)
{
m_specs[static_cast< size_t >(pub)].emplace(spec);
}
void
clear()
{
for(auto &s : m_specs)
s.reset();
}
constexpr const FormatSpec *
specFor(Publication::Type val) const
{
const auto &spec = m_specs[static_cast< size_t >(val)];
return spec ? &spec.value() : nullptr;
}
};
inline bool
operator==(const Format &lhs, const Format &rhs)
{
return lhs.m_specs == rhs.m_specs;
}
struct CategoryContainer;
/// Represents a category of grouped metrics
class Category
{
string_view m_name;
std::atomic_bool m_enabled;
CategoryContainer *m_container;
public:
Category(string_view name, bool enabled = true)
: m_name(name), m_enabled(enabled), m_container(nullptr)
{
}
~Category();
void
enabled(bool flag);
void
registerContainer(CategoryContainer *container);
const std::atomic_bool &
enabledRaw() const
{
return m_enabled;
}
string_view
name() const
{
return m_name;
}
bool
enabled() const
{
return m_enabled;
}
std::ostream &
print(std::ostream &stream, int level, int spaces) const;
};
inline std::ostream &
operator<<(std::ostream &stream, const Category &c)
{
return c.print(stream, -1, -1);
}
struct CategoryContainer
{
bool m_enabled;
const Category *m_category;
CategoryContainer *m_nextCategory;
constexpr void
clear()
{
m_enabled = false;
m_category = nullptr;
m_nextCategory = nullptr;
}
};
class Description
{
mutable util::Mutex m_mutex;
const Category *m_category GUARDED_BY(m_mutex);
string_view m_name GUARDED_BY(m_mutex);
Publication::Type m_type GUARDED_BY(m_mutex);
std::shared_ptr< Format > m_format GUARDED_BY(m_mutex);
Description(const Description &) = delete;
Description &
operator=(const Description &) = delete;
public:
Description(const Category *category, string_view name)
: m_category(category)
, m_name(name)
, m_type(Publication::Type::Unspecified)
{
}
void
category(const Category *c) LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
m_category = c;
}
const Category *
category() const LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
return m_category;
}
void
name(string_view n) LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
m_name = n;
}
string_view
name() const LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
return m_name;
}
void
type(Publication::Type t) LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
m_type = t;
}
Publication::Type
type() const LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
return m_type;
}
void
format(const std::shared_ptr< Format > &f) LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
m_format = f;
}
std::shared_ptr< Format >
format() const LOCKS_EXCLUDED(m_mutex)
{
util::Lock l(&m_mutex);
return m_format;
}
std::string
toString() const;
std::ostream &
print(std::ostream &stream) const;
};
inline std::ostream &
operator<<(std::ostream &stream, const Description &d)
{
return d.print(stream);
}
/// A metric id is what we will actually deal with in terms of metrics, in
/// order to make things like static initialisation cleaner.
class Id
{
const Description *m_description{nullptr};
public:
constexpr Id() = default;
constexpr Id(const Description *description) : m_description(description)
{
}
constexpr const Description *&
description()
{
return m_description;
}
constexpr const Description *const &
description() const
{
return m_description;
}
bool
valid() const noexcept
{
return m_description != nullptr;
}
explicit operator bool() const noexcept
{
return valid();
}
const Category *
category() const
{
assert(valid());
return m_description->category();
}
string_view
categoryName() const
{
assert(valid());
return m_description->category()->name();
}
string_view
metricName() const
{
assert(valid());
return m_description->name();
}
std::string
toString() const
{
if(m_description)
{
return m_description->toString();
;
}
return "INVALID_METRIC";
}
std::ostream &
print(std::ostream &stream, int, int) const
{
if(m_description)
{
stream << *m_description;
}
else
{
stream << "INVALID_METRIC";
}
return stream;
}
};
inline bool
operator==(const Id &lhs, const Id &rhs)
{
return lhs.description() == rhs.description();
}
inline bool
operator<(const Id &lhs, const Id &rhs)
{
return lhs.description() < rhs.description();
}
inline std::ostream &
operator<<(std::ostream &stream, const Id &id)
{
return id.print(stream, -1, -1);
}
// clang-format off
// Forwarding class to specialise for metric types
template<typename Type>
struct RecordMax {
};
template<>
struct RecordMax<double> {
static constexpr double min() { return std::numeric_limits< double >::infinity(); }
static constexpr double max() { return -std::numeric_limits< double >::infinity(); }
};
template<>
struct RecordMax<int> {
static constexpr int min() { return std::numeric_limits< int >::max(); }
static constexpr int max() { return std::numeric_limits< int >::min(); }
};
// clang-format on
template < typename Type >
class Record
{
size_t m_count{0};
Type m_total;
Type m_min;
Type m_max;
public:
// clang-format off
static constexpr Type DEFAULT_MIN() { return RecordMax<Type>::min(); };
static constexpr Type DEFAULT_MAX() { return RecordMax<Type>::max(); };
// clang-format on
Record() : m_total(0.0), m_min(DEFAULT_MIN()), m_max(DEFAULT_MAX())
{
}
Record(size_t count, double total, double min, double max)
: m_count(count), m_total(total), m_min(min), m_max(max)
{
}
// clang-format off
size_t count() const { return m_count; }
size_t& count() { return m_count; }
Type total() const { return m_total; }
Type& total() { return m_total; }
Type min() const { return m_min; }
Type& min() { return m_min; }
Type max() const { return m_max; }
Type& max() { return m_max; }
// clang-format on
void
clear()
{
m_count = 0;
m_total = 0;
m_min = DEFAULT_MIN();
m_max = DEFAULT_MAX();
}
std::ostream &
print(std::ostream &stream, int level, int spaces) const
{
Printer printer(stream, level, spaces);
printer.printAttribute("count", m_count);
printer.printAttribute("total", m_total);
printer.printAttribute("min", m_min);
printer.printAttribute("max", m_max);
return stream;
}
};
template < typename Type >
inline std::ostream &
operator<<(std::ostream &stream, const Record< Type > &rec)
{
return rec.print(stream, -1, -1);
}
template < typename Type >
inline bool
operator==(const Record< Type > &lhs, const Record< Type > &rhs)
{
return std::make_tuple(lhs.count(), lhs.total(), lhs.min(), lhs.max())
== std::make_tuple(rhs.count(), rhs.total(), rhs.min(), rhs.max());
}
template < typename Type >
inline bool
operator!=(const Record< Type > &lhs, const Record< Type > &rhs)
{
return !(lhs == rhs);
}
using Tag = std::string;
using TagValue = absl::variant< std::string, double, std::int64_t >;
using Tags = std::set< std::pair< Tag, TagValue > >;
template < typename Type >
using TaggedRecordsData = absl::flat_hash_map< Tags, Record< Type > >;
template < typename Type >
struct TaggedRecords
{
Id id;
TaggedRecordsData< Type > data;
explicit TaggedRecords(const Id &_id) : id(_id)
{
}
TaggedRecords(const Id &_id, const TaggedRecordsData< Type > &_data)
: id(_id), data(_data)
{
}
std::ostream &
print(std::ostream &stream, int level, int spaces) const
{
Printer printer(stream, level, spaces);
printer.printAttribute("id", id);
printer.printAttribute("data", data);
return stream;
}
};
template < typename Value >
bool
operator==(const TaggedRecords< Value > &lhs,
const TaggedRecords< Value > &rhs)
{
return std::tie(lhs.id, lhs.data) == std::tie(rhs.id, rhs.data);
}
template < typename Value >
std::ostream &
operator<<(std::ostream &stream, const TaggedRecords< Value > &rec)
{
return rec.print(stream, -1, -1);
}
template < typename Type >
class SampleGroup
{
public:
using RecordType = TaggedRecords< Type >;
using const_iterator =
typename absl::Span< const RecordType >::const_iterator;
private:
absl::Span< const RecordType > m_records;
absl::Duration m_samplePeriod;
public:
SampleGroup() : m_records(), m_samplePeriod()
{
}
SampleGroup(const RecordType *records, size_t size,
absl::Duration samplePeriod)
: m_records(records, size), m_samplePeriod(samplePeriod)
{
}
SampleGroup(const absl::Span< const RecordType > &records,
absl::Duration samplePeriod)
: m_records(records), m_samplePeriod(samplePeriod)
{
}
// clang-format off
void samplePeriod(absl::Duration duration) { m_samplePeriod = duration; }
absl::Duration samplePeriod() const { return m_samplePeriod; }
void records(absl::Span<const RecordType> recs) { m_records = recs; }
absl::Span<const RecordType> records() const { return m_records; }
bool empty() const { return m_records.empty(); }
size_t size() const { return m_records.size(); }
const_iterator begin() const { return m_records.begin(); }
const_iterator end() const { return m_records.end(); }
// clang-format on
std::ostream &
print(std::ostream &stream, int level, int spaces) const
{
Printer::PrintFunction< absl::Duration > durationPrinter =
[](std::ostream &os, const absl::Duration &duration, int,
int) -> std::ostream & {
os << duration;
return os;
};
Printer printer(stream, level, spaces);
printer.printAttribute("records", m_records);
printer.printForeignAttribute("samplePeriod", m_samplePeriod,
durationPrinter);
return stream;
}
};
template < typename Type >
inline std::ostream &
operator<<(std::ostream &stream, const SampleGroup< Type > &group)
{
return group.print(stream, -1, -1);
}
template < typename Type >
inline bool
operator==(const SampleGroup< Type > &lhs, const SampleGroup< Type > &rhs)
{
return lhs.records() == rhs.records()
&& lhs.samplePeriod() == rhs.samplePeriod();
}
class Sample
{
absl::Time m_sampleTime;
std::vector< absl::variant< SampleGroup< double >, SampleGroup< int > > >
m_samples;
size_t m_recordCount{0};
public:
using const_iterator = typename decltype(m_samples)::const_iterator;
Sample() : m_sampleTime()
{
}
// clang-format off
void sampleTime(const absl::Time& time) { m_sampleTime = time; }
absl::Time sampleTime() const { return m_sampleTime; }
template<typename Type>
void pushGroup(const SampleGroup<Type>& group) {
if (!group.empty()) {
m_samples.emplace_back(group);
m_recordCount += group.size();
}
}
template<typename Type>
void pushGroup(const TaggedRecords< Type > *records, size_t size, absl::Duration duration) {
if (size != 0) {
m_samples.emplace_back(SampleGroup<Type>(records, size, duration));
m_recordCount += size;
}
}
template<typename Type>
void pushGroup(const absl::Span< const TaggedRecords< Type > > &records,absl::Duration duration) {
if (!records.empty()) {
m_samples.emplace_back(SampleGroup<Type>(records, duration));
m_recordCount += records.size();
}
}
void clear() {
m_samples.clear();
m_recordCount = 0;
}
const absl::variant<SampleGroup<double>, SampleGroup<int> >& group(size_t index) {
assert(index < m_samples.size());
return m_samples[index];
}
const_iterator begin() const { return m_samples.begin(); }
const_iterator end() const { return m_samples.end(); }
size_t groupCount() const { return m_samples.size(); }
size_t recordCount() const { return m_recordCount; }
// clang-format on
};
inline absl::Duration
samplePeriod(
const absl::variant< SampleGroup< double >, SampleGroup< int > > &group)
{
return absl::visit([](const auto &x) { return x.samplePeriod(); }, group);
}
inline size_t
sampleSize(
const absl::variant< SampleGroup< double >, SampleGroup< int > > &group)
{
return absl::visit([](const auto &x) { return x.size(); }, group);
}
} // namespace metrics
} // namespace llarp
#endif

@ -5,7 +5,6 @@
#include <util/meta/traits.hpp>
#include <util/meta/variant.hpp>
#include <absl/types/variant.h>
#include <functional>
#include <iostream>
#include <cassert>
@ -197,11 +196,6 @@ namespace llarp
printType(std::ostream& stream, const std::tuple< Types... >& value,
int level, int spaces, traits::select::Case<>);
template < typename... Types >
static void
printType(std::ostream& stream, const absl::variant< Types... >& value,
int level, int spaces, traits::select::Case<>);
// Default type
template < typename Type >
static void
@ -493,17 +487,6 @@ namespace llarp
[&](const auto& x) { print.printValue(x); });
}
template < typename... Types >
inline void
PrintHelper::printType(std::ostream& stream,
const absl::variant< Types... >& value, int level,
int spaces, traits::select::Case<>)
{
Printer print(stream, level, spaces);
absl::visit([&](const auto& x) { print.printValue(x); }, value);
}
template < typename Type >
inline void
PrintHelper::printType(std::ostream& stream, const Type& value, int level,

@ -1 +0,0 @@
#include <util/stopwatch.hpp>

@ -1,53 +0,0 @@
#ifndef LLARP_STOPWATCH_HPP
#define LLARP_STOPWATCH_HPP
#include <nonstd/optional.hpp>
#include <absl/time/clock.h>
namespace llarp
{
namespace util
{
class Stopwatch
{
nonstd::optional< absl::Time > m_start;
nonstd::optional< absl::Time > m_stop;
public:
Stopwatch() = default;
void
start()
{
assert(!m_start);
assert(!m_stop);
m_start.emplace(absl::Now());
}
void
stop()
{
assert(m_start);
assert(!m_stop);
m_stop.emplace(absl::Now());
}
bool
done() const
{
return m_start && m_stop;
}
absl::Duration
time() const
{
assert(m_start);
assert(m_stop);
return m_stop.value() - m_start.value();
}
};
} // namespace util
} // namespace llarp
#endif

@ -0,0 +1,61 @@
#pragma once
// Clang thread safety analysis macros. Does nothing under non-clang compilers.
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(__clang__) && (!defined(SWIG))
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)

@ -0,0 +1,46 @@
#pragma once
#include <mutex>
#include <condition_variable>
namespace llarp
{
namespace util
{
/// Barrier class that blocks all threads until the high water mark of
/// threads (set during construction) is reached, then releases them all.
class Barrier
{
std::mutex mutex;
std::condition_variable cv;
unsigned pending;
public:
Barrier(unsigned threads) : pending{threads}
{
}
/// Returns true if *this* Block call is the one that releases all of
/// them; returns false (i.e. after unblocking) if some other thread
/// triggered the released.
bool
Block()
{
std::unique_lock< std::mutex > lock{mutex};
if(pending == 1)
{
pending = 0;
lock.unlock();
cv.notify_all();
return true;
}
else if(pending > 1)
{
pending--;
}
cv.wait(lock, [this] { return !pending; });
return false;
}
};
} // namespace util
} // namespace llarp

@ -1,7 +1,6 @@
#include <util/thread/logic.hpp>
#include <util/logging/logger.hpp>
#include <util/mem.h>
#include <util/metrics/metrics.hpp>
#include <future>
@ -53,28 +52,10 @@ namespace llarp
Logic::_traceLogicCall(std::function< void(void) > func, const char* tag,
int line)
{
#define TAG (tag ? tag : LOG_TAG)
#define LINE (line ? line : __LINE__)
// wrap the function so that we ensure that it's always calling stuff one at
// a time
#if defined(LOKINET_DEBUG)
#define METRIC(action) \
metrics::integerTick("logic", action, 1, "tag", TAG, "line", \
std::to_string(LINE))
#else
#define METRIC(action) \
do \
{ \
} while(false)
#endif
METRIC("queue");
auto f = [self = this, func, tag, line]() {
#if defined(LOKINET_DEBUG)
metrics::TimerGuard g("logic",
std::string(TAG) + ":" + std::to_string(LINE));
#endif
auto f = [self = this, func]() {
if(self->m_Queue)
{
func();
@ -86,7 +67,6 @@ namespace llarp
};
if(can_flush())
{
METRIC("fired");
f();
return true;
}
@ -97,21 +77,16 @@ namespace llarp
}
if(m_Thread->LooksFull(5))
{
LogErrorExplicit(TAG, LINE,
LogErrorExplicit(tag ? tag : LOG_TAG, line ? line : __LINE__,
"holy crap, we are trying to queue a job "
"onto the logic thread but it looks full");
METRIC("full");
std::abort();
}
auto ret = llarp_threadpool_queue_job(m_Thread, f);
if(not ret)
{
METRIC("dropped");
}
return ret;
#undef TAG
#undef LINE
#undef METRIC
}
void

@ -1,425 +0,0 @@
#include <util/thread/scheduler.hpp>
#include <utility>
namespace llarp
{
namespace thread
{
const Scheduler::Handle Scheduler::INVALID_HANDLE = -1;
void
Scheduler::dispatch()
{
using PendingRepeatItem = TimerQueueItem< RepeatDataPtr >;
std::vector< PendingRepeatItem > pendingRepeats;
while(true)
{
{
util::Lock l(&m_mutex);
if(!m_running.load(std::memory_order_relaxed))
{
return;
}
m_iterationCount++;
size_t newRepeatSize = 0, newEventSize = 0;
absl::Time now = m_clock();
static constexpr size_t MAX_PENDING_REPEAT = 64;
static constexpr size_t MAX_PENDING_EVENTS = 64;
absl::Time minRepeat, minEvent;
m_repeatQueue.popLess(now, MAX_PENDING_REPEAT, &pendingRepeats,
&newRepeatSize, &minRepeat);
m_eventQueue.popLess(now, MAX_PENDING_EVENTS, &m_events,
&newEventSize, &minEvent);
// If there are no pending events to process...
if(pendingRepeats.empty() && m_events.empty())
{
// if there are none in the queue *at all* block until woken
if(newRepeatSize == 0 && newEventSize == 0)
{
m_condition.Wait(&m_mutex);
}
else
{
absl::Time minTime;
if(newRepeatSize == 0)
{
minTime = minEvent;
}
else if(newEventSize == 0)
{
minTime = minRepeat;
}
else
{
minTime = std::min(minRepeat, minEvent);
}
m_condition.WaitWithDeadline(&m_mutex, minTime);
}
continue;
}
}
auto repeatIt = pendingRepeats.begin();
m_eventIt = m_events.begin();
while(repeatIt != pendingRepeats.end() && m_eventIt != m_events.end())
{
auto repeatTime = repeatIt->time();
auto eventTime = m_eventIt->time();
if(repeatTime < eventTime)
{
auto data = repeatIt->value();
if(!data->m_isCancelled)
{
m_dispatcher(data->m_callback);
if(!data->m_isCancelled)
{
data->m_handle =
m_repeatQueue.add(repeatTime + data->m_period, data);
}
}
repeatIt++;
}
else
{
m_eventCount--;
m_dispatcher(m_eventIt->value());
m_eventIt++;
}
}
// We've eaten one of the queues.
while(repeatIt != pendingRepeats.end())
{
auto repeatTime = repeatIt->time();
auto data = repeatIt->value();
if(!data->m_isCancelled)
{
m_dispatcher(data->m_callback);
if(!data->m_isCancelled)
{
data->m_handle =
m_repeatQueue.add(repeatTime + data->m_period, data);
}
}
repeatIt++;
}
while(m_eventIt != m_events.end())
{
m_eventCount--;
m_dispatcher(m_eventIt->value());
m_eventIt++;
}
pendingRepeats.clear();
m_events.clear();
}
}
void
Scheduler::yield()
{
if(m_running.load(std::memory_order_relaxed))
{
if(std::this_thread::get_id() != m_thread.get_id())
{
size_t iterations = m_iterationCount.load(std::memory_order_relaxed);
while(iterations == m_iterationCount.load(std::memory_order_relaxed)
&& m_running.load(std::memory_order_relaxed))
{
m_condition.Signal();
std::this_thread::yield();
}
}
}
}
Scheduler::Scheduler(EventDispatcher dispatcher, Clock clock)
: m_clock(std::move(clock))
, m_dispatcher(std::move(dispatcher))
, m_running(false)
, m_iterationCount(0)
, m_eventIt()
, m_repeatCount(0)
, m_eventCount(0)
{
}
Scheduler::~Scheduler()
{
stop();
}
bool
Scheduler::start()
{
util::Lock threadLock(&m_threadMutex);
util::Lock lock(&m_mutex);
if(m_running.load(std::memory_order_relaxed))
{
return true;
}
m_thread = std::thread(&Scheduler::dispatch, this);
m_running = true;
return true;
}
void
Scheduler::stop()
{
util::Lock threadLock(&m_threadMutex);
// Can't join holding the lock. <_<
{
util::Lock lock(&m_mutex);
if(!m_running.load(std::memory_order_relaxed))
{
return;
}
m_running = false;
m_condition.Signal();
}
m_thread.join();
}
Scheduler::Handle
Scheduler::schedule(absl::Time time,
const std::function< void() >& callback,
const EventKey& key)
{
Handle handle;
{
util::Lock lock(&m_mutex);
bool isAtHead = false;
handle = m_eventQueue.add(time, callback, key, &isAtHead);
if(handle == -1)
{
return INVALID_HANDLE;
}
m_eventCount++;
// If we have an event at the top of the queue, wake the dispatcher.
if(isAtHead)
{
m_condition.Signal();
}
}
return handle;
}
bool
Scheduler::reschedule(Handle handle, absl::Time time, bool wait)
{
bool result = false;
{
util::Lock lock(&m_mutex);
bool isAtHead = false;
result = m_eventQueue.update(handle, time, &isAtHead);
if(isAtHead)
{
m_condition.Signal();
}
}
if(result && wait)
{
yield();
}
return result;
}
bool
Scheduler::reschedule(Handle handle, const EventKey& key, absl::Time time,
bool wait)
{
bool result = false;
{
util::Lock lock(&m_mutex);
bool isAtHead = false;
result = m_eventQueue.update(handle, key, time, &isAtHead);
if(isAtHead)
{
m_condition.Signal();
}
}
if(result && wait)
{
yield();
}
return result;
}
bool
Scheduler::cancel(Handle handle, const EventKey& key, bool wait)
{
if(m_eventQueue.remove(handle, key))
{
m_eventCount--;
return true;
}
// Optimise for the dispatcher thread cancelling a pending event.
// On the dispatch thread, so we don't have to lock.
if(std::this_thread::get_id() == m_thread.get_id())
{
for(auto it = m_events.begin() + m_eventCount; it != m_events.end();
++it)
{
if(it->handle() == handle && it->key() == key)
{
m_eventCount--;
m_events.erase(it);
return true;
}
}
// We didn't find it.
return false;
}
if(handle != INVALID_HANDLE && wait)
{
yield();
}
return false;
}
void
Scheduler::cancelAll(bool wait)
{
std::vector< EventItem > events;
m_eventQueue.removeAll(&events);
m_eventCount -= events.size();
if(wait)
{
yield();
}
}
Scheduler::Handle
Scheduler::scheduleRepeat(absl::Duration interval,
const std::function< void() >& callback,
absl::Time startTime)
{
// Assert that we're not giving an empty duration
assert(interval != absl::Duration());
if(startTime == absl::Time())
{
startTime = interval + m_clock();
}
auto repeatData = std::make_shared< RepeatData >(callback, interval);
{
util::Lock l(&m_mutex);
bool isAtHead = false;
repeatData->m_handle =
m_repeatQueue.add(startTime, repeatData, &isAtHead);
if(repeatData->m_handle == -1)
{
return INVALID_HANDLE;
}
m_repeatCount++;
if(isAtHead)
{
m_condition.Signal();
}
}
return m_repeats.add(repeatData);
}
bool
Scheduler::cancelRepeat(Handle handle, bool wait)
{
RepeatDataPtr data;
if(!m_repeats.remove(handle, &data))
{
return false;
}
m_repeatCount--;
if(!m_repeatQueue.remove(data->m_handle))
{
data->m_isCancelled = true;
if(wait)
{
yield();
}
}
return true;
}
void
Scheduler::cancelAllRepeats(bool wait)
{
std::vector< RepeatDataPtr > repeats;
m_repeats.removeAll(&repeats);
m_repeatCount -= m_repeats.size();
for(auto& repeat : repeats)
{
repeat->m_isCancelled = true;
}
// if we fail to remove something, we *may* have a pending repeat event in
// the dispatcher
bool somethingFailed = false;
for(auto& repeat : repeats)
{
if(!m_repeatQueue.remove(repeat->m_handle))
{
somethingFailed = true;
}
}
if(wait && somethingFailed)
{
yield();
}
}
} // namespace thread
} // namespace llarp

@ -1,231 +0,0 @@
#ifndef LLARP_SCHEDULER_HPP
#define LLARP_SCHEDULER_HPP
#include <util/meta/object.hpp>
#include <util/thread/timerqueue.hpp>
#include <absl/time/time.h>
#include <atomic>
#include <functional>
#include <thread>
#include <utility>
#include <vector>
namespace llarp
{
namespace thread
{
/// This is a general purpose event scheduler, supporting both one-off and
/// repeated events.
///
/// Notes:
/// - Events should not be started before their begin time
/// - Events may start an arbitrary amount of time after they are scheduled,
/// if there is a previous long running event.
class Scheduler
{
public:
using Callback = std::function< void() >;
using Handle = int;
static const Handle INVALID_HANDLE;
// Define our own clock so we can test easier
using Clock = std::function< absl::Time() >;
private:
/// struct for repeated events
struct RepeatData
{
Callback m_callback;
absl::Duration m_period;
std::atomic_bool m_isCancelled;
Handle m_handle;
RepeatData(Callback callback, absl::Duration period)
: m_callback(std::move(callback))
, m_period(period)
, m_isCancelled(false)
, m_handle(0)
{
}
};
using RepeatDataPtr = std::shared_ptr< RepeatData >;
using RepeatQueue = TimerQueue< RepeatDataPtr >;
// Just for naming purposes.
using Event = Callback;
using EventQueue = TimerQueue< Event >;
using EventItem = TimerQueueItem< Event >;
public:
// Looks more horrible than it is.
using EventDispatcher = std::function< void(const Callback&) >;
using EventKey = EventQueue::Key;
private:
Clock m_clock;
EventQueue m_eventQueue;
RepeatQueue m_repeatQueue;
object::Catalog< RepeatDataPtr > m_repeats;
util::Mutex m_threadMutex ACQUIRED_BEFORE(m_mutex); // protects running
util::Mutex m_mutex ACQUIRED_AFTER(m_threadMutex); // master mutex
absl::CondVar m_condition;
EventDispatcher m_dispatcher;
std::thread m_thread;
std::atomic_bool m_running;
std::atomic_size_t m_iterationCount;
std::vector< EventItem > m_events;
std::vector< EventItem >::iterator m_eventIt;
std::atomic_size_t m_repeatCount;
std::atomic_size_t m_eventCount;
Scheduler(const Scheduler&) = delete;
Scheduler&
operator=(const Scheduler&) = delete;
friend class DispatcherImpl;
friend class Tardis;
/// Dispatch thread function
void
dispatch();
/// Yield to the dispatch thread
void
yield();
public:
/// Return the epoch from which to create `Durations` from.
static absl::Time
epoch()
{
return absl::UnixEpoch();
}
static EventDispatcher
defaultDispatcher()
{
return [](const Callback& callback) { callback(); };
}
static Clock
defaultClock()
{
return &absl::Now;
}
Scheduler() : Scheduler(defaultDispatcher(), defaultClock())
{
}
explicit Scheduler(const EventDispatcher& dispatcher)
: Scheduler(dispatcher, defaultClock())
{
}
explicit Scheduler(const Clock& clock)
: Scheduler(defaultDispatcher(), clock)
{
}
Scheduler(EventDispatcher dispatcher, Clock clock);
~Scheduler();
/// Start the scheduler
/// Note that currently this "can't fail" and return `false`. If thread
/// spawning fails, an exception will be thrown.
bool
start();
void
stop();
Handle
schedule(absl::Time time, const Callback& callback,
const EventKey& key = EventKey(nullptr));
bool
reschedule(Handle handle, absl::Time time, bool wait = false);
bool
reschedule(Handle handle, const EventKey& key, absl::Time time,
bool wait = false);
bool
cancel(Handle handle, bool wait = false)
{
return cancel(handle, EventKey(nullptr), wait);
}
bool
cancel(Handle handle, const EventKey& key, bool wait = false);
void
cancelAll(bool wait = false);
Handle
scheduleRepeat(absl::Duration interval, const Callback& callback,
absl::Time startTime = absl::Time());
bool
cancelRepeat(Handle handle, bool wait = false);
void
cancelAllRepeats(bool wait = false);
size_t
repeatCount() const
{
return m_repeatCount;
}
size_t
eventCount() const
{
return m_eventCount;
}
};
class Tardis
{
mutable util::Mutex m_mutex;
absl::Time m_time;
Scheduler& m_scheduler;
public:
Tardis(Scheduler& scheduler) : m_time(absl::Now()), m_scheduler(scheduler)
{
m_scheduler.m_clock = std::bind(&Tardis::now, this);
}
void
advanceTime(absl::Duration duration)
{
{
absl::WriterMutexLock l(&m_mutex);
m_time += duration;
}
{
absl::WriterMutexLock l(&m_scheduler.m_mutex);
m_scheduler.m_condition.Signal();
}
}
absl::Time
now() const
{
absl::ReaderMutexLock l(&m_mutex);
return m_time;
}
};
} // namespace thread
} // namespace llarp
#endif

@ -64,22 +64,25 @@ namespace llarp
void
ThreadPool::waitThreads()
{
util::Lock lock(&m_gateMutex);
m_gateMutex.Await(absl::Condition(this, &ThreadPool::allThreadsReady));
std::unique_lock< std::mutex > lock(m_gateMutex);
m_numThreadsCV.wait(lock, [this] { return allThreadsReady(); });
}
void
ThreadPool::releaseThreads()
{
util::Lock lock(&m_gateMutex);
m_numThreadsReady = 0;
++m_gateCount;
{
std::lock_guard< std::mutex > lock(m_gateMutex);
m_numThreadsReady = 0;
++m_gateCount;
}
m_gateCV.notify_all();
}
void
ThreadPool::interrupt()
{
util::Lock lock(&m_gateMutex);
std::lock_guard< std::mutex > lock(m_gateMutex);
size_t count = m_idleThreads;
@ -93,23 +96,19 @@ namespace llarp
ThreadPool::worker()
{
// Lock will be valid until the end of the statement
size_t gateCount = (absl::ReaderMutexLock(&m_gateMutex), m_gateCount);
size_t gateCount =
(std::lock_guard< std::mutex >(m_gateMutex), m_gateCount);
util::SetThreadName(m_name);
for(;;)
{
{
util::Lock lock(&m_gateMutex);
std::unique_lock< std::mutex > lock(m_gateMutex);
++m_numThreadsReady;
m_numThreadsCV.notify_one();
using CondArg = std::pair< size_t, ThreadPool* >;
CondArg args(gateCount, this);
m_gateMutex.Await(absl::Condition(
+[](CondArg* x) SHARED_LOCKS_REQUIRED(x->second->m_gateMutex) {
return x->first != x->second->m_gateCount;
},
&args));
m_gateCV.wait(lock, [&] { return gateCount != m_gateCount; });
gateCount = m_gateCount;
}
@ -236,7 +235,7 @@ namespace llarp
void
ThreadPool::drain()
{
util::Lock lock(&m_mutex);
util::Lock lock(m_mutex);
if(m_status.load(std::memory_order_relaxed) == Status::Run)
{
@ -254,7 +253,7 @@ namespace llarp
void
ThreadPool::shutdown()
{
util::Lock lock(&m_mutex);
util::Lock lock(m_mutex);
if(m_status.load(std::memory_order_relaxed) == Status::Run)
{
@ -271,7 +270,7 @@ namespace llarp
bool
ThreadPool::start()
{
util::Lock lock(&m_mutex);
util::Lock lock(m_mutex);
if(m_status.load(std::memory_order_relaxed) != Status::Stop)
{
@ -307,7 +306,7 @@ namespace llarp
void
ThreadPool::stop()
{
util::Lock lock(&m_mutex);
util::Lock lock(m_mutex);
if(m_status.load(std::memory_order_relaxed) == Status::Run)
{

@ -46,7 +46,9 @@ namespace llarp
size_t m_numThreadsReady
GUARDED_BY(m_gateMutex); // Threads ready to go through the gate.
util::Mutex m_gateMutex;
std::mutex m_gateMutex;
std::condition_variable m_gateCV;
std::condition_variable m_numThreadsCV;
std::string m_name;
std::vector< std::thread > m_threads;
@ -77,7 +79,7 @@ namespace llarp
spawn();
bool
allThreadsReady() const SHARED_LOCKS_REQUIRED(m_gateMutex)
allThreadsReady() const REQUIRES_SHARED(m_gateMutex)
{
return m_numThreadsReady == m_threads.size();
}

@ -1,10 +1,12 @@
#ifndef LLARP_THREADING_HPP
#define LLARP_THREADING_HPP
#include <absl/synchronization/barrier.h>
#include <absl/synchronization/mutex.h>
#include <thread>
#include <shared_mutex>
#include <mutex>
#include <nonstd/optional.hpp>
#include <absl/time/time.h>
#include "annotations.hpp"
#include <iostream>
#include <thread>
@ -20,10 +22,8 @@ using pid_t = int;
#ifdef TRACY_ENABLE
#include "Tracy.hpp"
#define DECLARE_LOCK(type, var, ...) TracyLockable(type, var)
#define ACQUIRE_LOCK(lock, mtx) lock(mtx)
#else
#define DECLARE_LOCK(type, var, ...) type var __VA_ARGS__
#define ACQUIRE_LOCK(lock, mtx) lock(&mtx)
#endif
namespace llarp
@ -39,7 +39,7 @@ namespace llarp
///
/// the idea is to "turn off" the mutexes and see where they are actually
/// needed.
struct LOCKABLE NullMutex
struct CAPABILITY("mutex") NullMutex
{
#ifdef LOKINET_DEBUG
/// in debug mode, we implement lock() to enforce that any lock is only
@ -70,39 +70,81 @@ namespace llarp
{
}
#endif
// Does nothing; once locked the mutex belongs to that thread forever
void
unlock() const
{
}
};
/// a lock that does nothing
struct SCOPED_LOCKABLE NullLock
struct SCOPED_CAPABILITY NullLock
{
NullLock(const NullMutex* mtx) EXCLUSIVE_LOCK_FUNCTION(mtx)
NullLock(NullMutex& mtx) ACQUIRE(mtx)
{
mtx->lock();
mtx.lock();
}
~NullLock() UNLOCK_FUNCTION()
~NullLock() RELEASE()
{
(void)this; // trick clang-tidy
}
};
using Mutex = absl::Mutex;
using Lock = absl::MutexLock;
/// Default mutex type, supporting shared and exclusive locks.
using Mutex = std::shared_timed_mutex;
/// Basic RAII lock type for the default mutex type.
using Lock = std::lock_guard< Mutex >;
/// Returns a unique lock around the given lockable (typically a mutex)
/// which gives exclusive control and is unlockable/relockable. Any extra
/// argument (e.g. std::defer_lock) is forwarded to the unique_lock
/// constructor.
template < typename Mutex, typename... Args >
#ifdef __GNUG__
[[gnu::warn_unused_result]]
#endif
std::unique_lock< Mutex >
unique_lock(Mutex& lockable, Args&&... args)
{
return std::unique_lock< Mutex >(lockable, std::forward< Args >(args)...);
}
/// Returns a shared lock around the given lockable (typically a mutex)
/// which gives "reader" access (i.e. which can be shared with other reader
/// locks but not unique locks). Any extra argument (e.g. std::defer_lock)
/// is forwarded to the std::shared_lock constructor.
template < typename Mutex, typename... Args >
#ifdef __GNUG__
[[gnu::warn_unused_result]]
#endif
std::shared_lock< Mutex >
shared_lock(Mutex& lockable, Args&&... args)
{
return std::shared_lock< Mutex >(lockable, std::forward< Args >(args)...);
}
using ReleasableLock = absl::ReleasableMutexLock;
using Condition = absl::CondVar;
/// Obtains multiple unique locks simultaneously and atomically. Returns a
/// tuple of all the held locks.
template < typename... Mutex >
#ifdef __GNUG__
[[gnu::warn_unused_result]]
#endif
std::tuple< std::unique_lock< Mutex >... >
unique_locks(Mutex&... lockables)
{
std::lock(lockables...);
return std::make_tuple(
std::unique_lock< Mutex >(lockables, std::adopt_lock)...);
}
class Semaphore
{
private:
Mutex m_mutex; // protects m_count
std::mutex m_mutex; // protects m_count
size_t m_count GUARDED_BY(m_mutex);
bool
ready() const SHARED_LOCKS_REQUIRED(m_mutex)
{
return m_count > 0;
}
std::condition_variable m_cv;
public:
Semaphore(size_t count) : m_count(count)
@ -110,39 +152,35 @@ namespace llarp
}
void
notify() LOCKS_EXCLUDED(m_mutex)
notify() EXCLUDES(m_mutex)
{
Lock lock(&m_mutex);
m_count++;
{
std::lock_guard< std::mutex > lock(m_mutex);
m_count++;
}
m_cv.notify_one();
}
void
wait() LOCKS_EXCLUDED(m_mutex)
wait() EXCLUDES(m_mutex)
{
Lock lock(&m_mutex);
m_mutex.Await(absl::Condition(this, &Semaphore::ready));
auto lock = unique_lock(m_mutex);
m_cv.wait(lock, [this] { return m_count > 0; });
m_count--;
}
bool
waitFor(absl::Duration timeout) LOCKS_EXCLUDED(m_mutex)
waitFor(std::chrono::microseconds timeout) EXCLUDES(m_mutex)
{
Lock lock(&m_mutex);
if(!m_mutex.AwaitWithTimeout(absl::Condition(this, &Semaphore::ready),
timeout))
{
auto lock = unique_lock(m_mutex);
if(!m_cv.wait_for(lock, timeout, [this] { return m_count > 0; }))
return false;
}
m_count--;
return true;
}
};
using Barrier = absl::Barrier;
void
SetThreadName(const std::string& name);
@ -163,11 +201,11 @@ namespace llarp
void
TryAccess(F visit) const
#if defined(LOKINET_DEBUG)
LOCKS_EXCLUDED(_access)
EXCLUDES(_access)
#endif
{
#if defined(LOKINET_DEBUG)
NullLock lock(&_access);
NullLock lock(_access);
#endif
visit();
}

@ -5,9 +5,9 @@
#include <util/thread/queue.hpp>
#include <util/thread/thread_pool.hpp>
#include <util/thread/threading.hpp>
#include <util/thread/annotations.hpp>
#include <util/types.hpp>
#include <absl/base/thread_annotations.h>
#include <memory>
#include <queue>

@ -1 +0,0 @@
#include <util/thread/timerqueue.hpp>

@ -1,750 +0,0 @@
#ifndef LLARP_UTIL_TIMERQUEUE_HPP
#define LLARP_UTIL_TIMERQUEUE_HPP
#include <util/meta/object.hpp>
#include <util/thread/threading.hpp>
#include <atomic>
#include <absl/time/time.h>
#include <nonstd/optional.hpp>
#include <map>
#include <utility>
namespace llarp
{
namespace thread
{
template < typename Value >
class TimerQueueItem;
template < typename Value >
class TimerQueue
{
static constexpr int INDEX_BITS_MIN = 8;
static constexpr int INDEX_BITS_MAX = 24;
static constexpr int INDEX_BITS_DEFAULT = 17;
public:
using Handle = int;
static constexpr Handle INVALID_HANDLE = -1;
class Key
{
const void* m_key;
public:
explicit Key(const void* key) : m_key(key)
{
}
explicit Key(int value) : m_key(reinterpret_cast< const void* >(value))
{
}
bool
operator==(const Key& other) const
{
return m_key == other.m_key;
}
bool
operator!=(const Key& other) const
{
return m_key != other.m_key;
}
};
private:
struct Node
{
int m_index{0};
absl::Time m_time;
Key m_key;
Node* m_prev;
Node* m_next;
object::Buffer< Value > m_value;
Node()
: m_time()
, m_key(nullptr)
, m_prev(nullptr)
, m_next(nullptr)
, m_value()
{
}
explicit Node(const absl::Time& time)
: m_time(time)
, m_key(nullptr)
, m_prev(nullptr)
, m_next(nullptr)
, m_value()
{
}
};
using NodeMap = std::map< absl::Time, Node* >;
using MapIterator = typename NodeMap::iterator;
const int m_indexMask;
const int m_indexIterationMask;
const int m_indexIterationInc;
mutable util::Mutex m_mutex;
std::vector< Node* > m_nodes GUARDED_BY(m_mutex);
std::atomic< Node* > m_nextNode;
NodeMap m_nodeMap GUARDED_BY(m_mutex);
std::atomic_size_t m_size;
void
freeNode(Node* node)
{
node->m_index =
((node->m_index + m_indexIterationInc) & m_indexIterationMask)
| (node->m_index & m_indexMask);
if(!(node->m_index & m_indexIterationMask))
{
node->m_index += m_indexIterationInc;
}
node->m_prev = nullptr;
}
void
putFreeNode(Node* node)
{
// destroy in place
node->m_value.value().~Value();
Node* nextFreeNode = m_nextNode;
node->m_next = nextFreeNode;
while(!m_nextNode.compare_exchange_strong(nextFreeNode, node))
{
nextFreeNode = m_nextNode;
node->m_next = nextFreeNode;
}
}
void
putFreeNodeList(Node* node)
{
if(node)
{
node->m_value.value().~Value();
Node* end = node;
while(end->m_next)
{
end = end->m_next;
end->m_value.value().~Value();
}
Node* nextFreeNode = m_nextNode;
end->m_next = nextFreeNode;
while(!m_nextNode.compare_exchange_strong(nextFreeNode, node))
{
nextFreeNode = m_nextNode;
end->m_next = nextFreeNode;
}
}
}
TimerQueue(const TimerQueue&) = delete;
TimerQueue&
operator=(const TimerQueue&) = delete;
public:
TimerQueue()
: m_indexMask((1 << INDEX_BITS_DEFAULT) - 1)
, m_indexIterationMask(~m_indexMask)
, m_indexIterationInc(m_indexMask + 1)
, m_nextNode(nullptr)
, m_size(0)
{
}
explicit TimerQueue(int indexBits)
: m_indexMask((1 << indexBits) - 1)
, m_indexIterationMask(~m_indexMask)
, m_indexIterationInc(m_indexMask + 1)
, m_nextNode(nullptr)
, m_size(0)
{
assert(INDEX_BITS_MIN <= indexBits && indexBits <= INDEX_BITS_MAX);
}
~TimerQueue()
{
removeAll();
for(Node* node : m_nodes)
{
delete node;
}
}
/// Add a new `value` to the queue, scheduled for `time`. If not null:
/// - set `isAtHead` to true if the new item is at the front of the
/// queue (eg the item with the lowest `time` value).
/// - set `newSize` to be the length of the new queue.
Handle
add(absl::Time time, const Value& value, bool* isAtHead = nullptr,
size_t* newSize = nullptr)
{
return add(time, value, Key(nullptr), isAtHead, newSize);
}
Handle
add(absl::Time time, const Value& value, const Key& key,
bool* isAtHead = nullptr, size_t* newSize = nullptr);
Handle
add(const TimerQueueItem< Value >& value, bool* isAtHead = nullptr,
size_t* newSize = nullptr);
/// Pop the front of the queue into `item` (if not null).
bool
popFront(TimerQueueItem< Value >* item = nullptr,
size_t* newSize = nullptr, absl::Time* newMinTime = nullptr);
/// Append all records which are less than *or* equal to `time`.
void
popLess(absl::Time time,
std::vector< TimerQueueItem< Value > >* items = nullptr,
size_t* newSize = nullptr, absl::Time* newMinTime = nullptr);
void
popLess(absl::Time time, size_t maxItems,
std::vector< TimerQueueItem< Value > >* items = nullptr,
size_t* newSize = nullptr, absl::Time* newMinTime = nullptr);
bool
remove(Handle handle, TimerQueueItem< Value >* item = nullptr,
size_t* newSize = nullptr, absl::Time* newMinTime = nullptr)
{
return remove(handle, Key(nullptr), item, newSize, newMinTime);
}
bool
remove(Handle handle, const Key& key,
TimerQueueItem< Value >* item = nullptr, size_t* newSize = nullptr,
absl::Time* newMinTime = nullptr);
void
removeAll(std::vector< TimerQueueItem< Value > >* items = nullptr);
/// Update the `time` for the item referred to by the handle
bool
update(Handle handle, absl::Time time, bool* isNewTop = nullptr)
{
return update(handle, Key(nullptr), time, isNewTop);
}
bool
update(Handle handle, const Key& key, absl::Time time,
bool* isNewTop = nullptr);
size_t
size() const
{
return m_size;
}
bool
isValid(Handle handle) const
{
return isValid(handle, Key(nullptr));
}
bool
isValid(Handle handle, const Key& key) const
{
absl::ReaderMutexLock lock(&m_mutex);
int index = (handle & m_indexMask) - 1;
if(0 > index || index >= static_cast< int >(m_nodes.size()))
{
return false;
}
Node* node = m_nodes[index];
if(node->m_index != handle || node->m_key != key)
{
return false;
}
return true;
}
nonstd::optional< absl::Time >
nextTime() const
{
absl::ReaderMutexLock lock(&m_mutex);
if(m_nodeMap.empty())
{
return {};
}
return m_nodeMap.begin()->first;
}
};
template < typename Value >
class TimerQueueItem
{
public:
using Handle = typename TimerQueue< Value >::Handle;
using Key = typename TimerQueue< Value >::Key;
private:
absl::Time m_time;
Value m_value;
Handle m_handle;
Key m_key;
public:
TimerQueueItem() : m_time(), m_value(), m_handle(0), m_key(nullptr)
{
}
TimerQueueItem(absl::Time time, const Value& value, Handle handle)
: m_time(time), m_value(value), m_handle(handle), m_key(nullptr)
{
}
TimerQueueItem(absl::Time time, Value value, Handle handle,
const Key& key)
: m_time(time)
, m_value(std::move(value))
, m_handle(handle)
, m_key(key)
{
}
// clang-format off
absl::Time& time() { return m_time; }
absl::Time time() const { return m_time; }
Value& value() { return m_value; }
const Value& value() const { return m_value; }
Handle& handle() { return m_handle; }
Handle handle() const { return m_handle; }
Key& key() { return m_key; }
const Key& key() const { return m_key; }
// clang-format on
};
template < typename Value >
typename TimerQueue< Value >::Handle
TimerQueue< Value >::add(absl::Time time, const Value& value,
const Key& key, bool* isAtHead, size_t* newSize)
{
absl::WriterMutexLock lock(&m_mutex);
Node* node;
if(m_nextNode)
{
// Even though we lock, other threads might be freeing nodes
node = m_nextNode;
Node* next = node->m_next;
while(!m_nextNode.compare_exchange_strong(node, next))
{
node = m_nextNode;
next = node->m_next;
}
}
else
{
// The number of nodes cannot grow to a size larger than the range of
// available indices.
if((int)m_nodes.size() >= m_indexMask - 1)
{
return INVALID_HANDLE;
}
node = new Node;
m_nodes.push_back(node);
node->m_index =
static_cast< int >(m_nodes.size()) | m_indexIterationInc;
}
node->m_time = time;
node->m_key = key;
new(node->m_value.buffer()) Value(value);
{
auto it = m_nodeMap.find(time);
if(m_nodeMap.end() == it)
{
node->m_prev = node;
node->m_next = node;
m_nodeMap[time] = node;
}
else
{
node->m_prev = it->second->m_prev;
it->second->m_prev->m_next = node;
node->m_next = it->second;
it->second->m_prev = node;
}
}
++m_size;
if(isAtHead)
{
*isAtHead = m_nodeMap.begin()->second == node && node->m_prev == node;
}
if(newSize)
{
*newSize = m_size;
}
assert(-1 != node->m_index);
return node->m_index;
}
template < typename Value >
typename TimerQueue< Value >::Handle
TimerQueue< Value >::add(const TimerQueueItem< Value >& value,
bool* isAtHead, size_t* newSize)
{
return add(value.time(), value.value(), value.key(), isAtHead, newSize);
}
template < typename Value >
bool
TimerQueue< Value >::popFront(TimerQueueItem< Value >* item,
size_t* newSize, absl::Time* newMinTime)
{
Node* node = nullptr;
{
absl::WriterMutexLock lock(&m_mutex);
auto it = m_nodeMap.begin();
if(m_nodeMap.end() == it)
{
return false;
}
node = it->second;
if(item)
{
item->time() = node->m_time;
item->value() = node->m_value.value();
item->handle() = node->m_index;
item->key() = node->m_key;
}
if(node->m_next != node)
{
node->m_prev->m_next = node->m_next;
node->m_next->m_prev = node->m_prev;
if(it->second == node)
{
it->second = node->m_next;
}
}
else
{
m_nodeMap.erase(it);
}
freeNode(node);
--m_size;
if(m_size && newMinTime && !m_nodeMap.empty())
{
*newMinTime = m_nodeMap.begin()->first;
}
if(newSize)
{
*newSize = m_size;
}
}
putFreeNode(node);
return true;
}
template < typename Value >
void
TimerQueue< Value >::popLess(absl::Time time,
std::vector< TimerQueueItem< Value > >* items,
size_t* newSize, absl::Time* newMinTime)
{
Node* begin = nullptr;
{
absl::WriterMutexLock lock(&m_mutex);
auto it = m_nodeMap.begin();
while(m_nodeMap.end() != it && it->first <= time)
{
Node* const first = it->second;
Node* const last = first->m_prev;
Node* node = first;
do
{
if(items)
{
items->emplace_back(it->first, node->m_value.value(),
node->m_index, node->m_key);
}
freeNode(node);
node = node->m_next;
--m_size;
} while(node != first);
last->m_next = begin;
begin = first;
auto condemned = it;
++it;
m_nodeMap.erase(condemned);
}
if(newSize)
{
*newSize = m_size;
}
if(m_nodeMap.end() != it && newMinTime)
{
*newMinTime = it->first;
}
}
putFreeNodeList(begin);
}
template < typename Value >
void
TimerQueue< Value >::popLess(absl::Time time, size_t maxItems,
std::vector< TimerQueueItem< Value > >* items,
size_t* newSize, absl::Time* newMinTime)
{
Node* begin = nullptr;
{
absl::WriterMutexLock lock(&m_mutex);
auto it = m_nodeMap.begin();
while(m_nodeMap.end() != it && it->first <= time && 0 < maxItems)
{
Node* const first = it->second;
Node* const last = first->m_prev;
Node* node = first;
Node* prevNode = first->m_prev;
do
{
if(items)
{
items->emplace_back(it->first, node->m_value.value(),
node->m_index, node->m_key);
}
freeNode(node);
prevNode = node;
node = node->m_next;
--m_size;
--maxItems;
} while(0 < maxItems && node != first);
prevNode->m_next = begin;
begin = first;
if(node == first)
{
auto condemned = it;
++it;
m_nodeMap.erase(condemned);
}
else
{
node->m_prev = last;
last->m_next = node;
it->second = node;
break;
}
}
if(newSize)
{
*newSize = m_size;
}
if(m_nodeMap.end() != it && newMinTime)
{
*newMinTime = it->first;
}
}
putFreeNodeList(begin);
}
template < typename Value >
bool
TimerQueue< Value >::remove(Handle handle, const Key& key,
TimerQueueItem< Value >* item, size_t* newSize,
absl::Time* newMinTime)
{
Node* node = nullptr;
{
absl::WriterMutexLock lock(&m_mutex);
int index = (handle & m_indexMask) - 1;
if(index < 0 || index >= (int)m_nodes.size())
{
return false;
}
node = m_nodes[index];
if(node->m_index != (int)handle || node->m_key != key
|| nullptr == node->m_prev)
{
return false;
}
if(item)
{
item->time() = node->m_time;
item->value() = node->m_value.value();
item->handle() = node->m_index;
item->key() = node->m_key;
}
if(node->m_next != node)
{
node->m_prev->m_next = node->m_next;
node->m_next->m_prev = node->m_prev;
auto it = m_nodeMap.find(node->m_time);
if(it->second == node)
{
it->second = node->m_next;
}
}
else
{
m_nodeMap.erase(node->m_time);
}
freeNode(node);
--m_size;
if(newSize)
{
*newSize = m_size;
}
if(m_size && newMinTime)
{
assert(!m_nodeMap.empty());
*newMinTime = m_nodeMap.begin()->first;
}
}
putFreeNode(node);
return true;
}
template < typename Value >
void
TimerQueue< Value >::removeAll(
std::vector< TimerQueueItem< Value > >* items)
{
Node* begin = nullptr;
{
absl::WriterMutexLock lock(&m_mutex);
auto it = m_nodeMap.begin();
while(m_nodeMap.end() != it)
{
Node* const first = it->second;
Node* const last = first->m_prev;
Node* node = first;
do
{
if(items)
{
items->emplace_back(it->first, node->m_value.value(),
node->m_index, node->m_key);
}
freeNode(node);
node = node->m_next;
--m_size;
} while(node != first);
last->m_next = begin;
begin = first;
auto condemned = it;
++it;
m_nodeMap.erase(condemned);
}
}
putFreeNodeList(begin);
}
template < typename Value >
bool
TimerQueue< Value >::update(Handle handle, const Key& key, absl::Time time,
bool* isNewTop)
{
absl::WriterMutexLock lock(&m_mutex);
int index = (handle & m_indexMask) - 1;
if(index < 0 || index >= (int)m_nodes.size())
{
return false;
}
Node* node = m_nodes[index];
if(node->m_index != handle || node->m_key != key)
{
return false;
}
if(node->m_prev != node)
{
node->m_prev->m_next = node->m_next;
node->m_next->m_prev = node->m_prev;
auto it = m_nodeMap.find(node->m_time);
if(it->second == node)
{
it->second = node->m_next;
}
}
else
{
m_nodeMap.erase(node->m_time);
}
node->m_time = time;
auto it = m_nodeMap.find(time);
if(m_nodeMap.end() == it)
{
node->m_prev = node;
node->m_next = node;
m_nodeMap[time] = node;
}
else
{
node->m_prev = it->second->m_prev;
it->second->m_prev->m_next = node;
node->m_next = it->second;
it->second->m_prev = node;
}
if(isNewTop)
{
*isNewTop = m_nodeMap.begin()->second == node && node->m_prev == node;
}
return true;
}
} // namespace thread
} // namespace llarp
#endif

@ -34,10 +34,6 @@ list(APPEND TEST_SRC
util/meta/test_llarp_util_memfn.cpp
util/meta/test_llarp_util_object.cpp
util/meta/test_llarp_util_traits.cpp
util/metrics/test_llarp_metrics_metricktank.cpp
util/metrics/test_llarp_metrics_publisher.cpp
util/metrics/test_llarp_util_metrics_core.cpp
util/metrics/test_llarp_util_metrics_types.cpp
util/test_llarp_util_aligned.cpp
util/test_llarp_util_bencode.cpp
util/test_llarp_util_bits.cpp
@ -49,8 +45,6 @@ list(APPEND TEST_SRC
util/thread/test_llarp_util_queue_manager.cpp
util/thread/test_llarp_util_queue.cpp
util/thread/test_llarp_util_thread_pool.cpp
util/thread/test_llarp_util_timerqueue.cpp
util/thread/test_llarp_utils_scheduler.cpp
)
add_executable(${TEST_EXE}

@ -1,7 +1,5 @@
#include <gtest/gtest.h>
#include <absl/synchronization/mutex.h>
#ifdef _WIN32
#include <winsock2.h>
int
@ -27,7 +25,6 @@ main(int argc, char** argv)
return -1;
#endif
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
::testing::InitGoogleTest(&argc, argv);
int r = RUN_ALL_TESTS();
#ifdef _WIN32

@ -1,4 +1,5 @@
#include <util/meta/object.hpp>
#include <util/thread/barrier.hpp>
#include <array>
#include <thread>

@ -1,21 +0,0 @@
#include <util/metrics/metrictank_publisher.hpp>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using namespace llarp;
using namespace ::testing;
using Interface = metrics::MetricTankPublisherInterface;
TEST(MetricTank, maketags)
{
Interface::Tags tags;
std::string result = Interface::makeSuffix(tags);
ASSERT_THAT(result, Not(IsEmpty()));
tags["user"] = "Thanos";
result = Interface::makeSuffix(tags);
ASSERT_THAT(result, HasSubstr(";user=Thanos"));
}

@ -1,37 +0,0 @@
#include <util/metrics/stream_publisher.hpp>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using namespace llarp;
using namespace metrics;
TEST(MetricsPublisher, StreamPublisher)
{
Category myCategory("MyCategory");
Description descA(&myCategory, "MetricA");
Description descB(&myCategory, "MetricB");
Id metricA(&descA);
Id metricB(&descB);
std::stringstream stream;
StreamPublisher myPublisher(stream);
std::vector< TaggedRecords< double > > records;
records.emplace_back(
metricA,
TaggedRecordsData< double >{{{}, Record< double >(5, 25.0, 6.0, 25.0)}});
records.emplace_back(
metricB,
TaggedRecordsData< double >{{{}, Record< double >(2, 7.0, 3.0, 11.0)}});
Sample sample;
sample.sampleTime(absl::Now());
sample.pushGroup(records.data(), records.size(), absl::Seconds(5));
myPublisher.publish(sample);
std::cout << stream.str();
}

@ -1,877 +0,0 @@
#include <util/metrics/core.hpp>
#include <array>
#include <thread>
#include <test_util.hpp>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using namespace llarp;
using namespace metrics;
using namespace ::testing;
MATCHER(IsValid, "")
{
return arg.valid();
}
static const Category STAT_CAT("A", true);
static const Description desc_A(&STAT_CAT, "A");
static const Description *DESC_A = &desc_A;
static const Description desc_B(&STAT_CAT, "B");
static const Description *DESC_B = &desc_B;
static const Id METRIC_A(DESC_A);
static const Id METRIC_B(DESC_B);
template < typename T >
class CollectorTest : public ::testing::Test
{
};
TYPED_TEST_SUITE_P(CollectorTest);
TYPED_TEST_P(CollectorTest, Collector)
{
TypeParam collector1(METRIC_A);
TypeParam collector2(METRIC_B);
ASSERT_EQ(METRIC_A, collector1.id().description());
ASSERT_EQ(METRIC_B, collector2.id().description());
auto record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, IsEmpty());
auto record2 = collector2.load();
ASSERT_EQ(METRIC_B, record2.id.description());
ASSERT_THAT(record2.data, IsEmpty());
const Tags tags;
collector1.tick(1);
record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, Contains(Key(tags)));
ASSERT_EQ(1, record1.data.at(tags).count());
ASSERT_EQ(1, record1.data.at(tags).total());
ASSERT_EQ(1, record1.data.at(tags).min());
ASSERT_EQ(1, record1.data.at(tags).max());
collector1.tick(2);
record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, Contains(Key(tags)));
ASSERT_EQ(2, record1.data.at(tags).count());
ASSERT_EQ(3, record1.data.at(tags).total());
ASSERT_EQ(1, record1.data.at(tags).min());
ASSERT_EQ(2, record1.data.at(tags).max());
collector1.tick(-5);
record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, Contains(Key(tags)));
ASSERT_EQ(3, record1.data.at(tags).count());
ASSERT_EQ(-2, record1.data.at(tags).total());
ASSERT_EQ(-5, record1.data.at(tags).min());
ASSERT_EQ(2, record1.data.at(tags).max());
collector1.clear();
record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, IsEmpty());
collector1.tick(3);
record1 = collector1.loadAndClear();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, Contains(Key(tags)));
ASSERT_EQ(1, record1.data.at(tags).count());
ASSERT_EQ(3, record1.data.at(tags).total());
ASSERT_EQ(3, record1.data.at(tags).min());
ASSERT_EQ(3, record1.data.at(tags).max());
record1 = collector1.load();
ASSERT_EQ(METRIC_A, record1.id.description());
ASSERT_THAT(record1.data, IsEmpty());
}
REGISTER_TYPED_TEST_SUITE_P(CollectorTest, Collector);
using CollectorTestTypes = ::testing::Types< DoubleCollector, IntCollector >;
INSTANTIATE_TYPED_TEST_SUITE_P(MetricsCore, CollectorTest, CollectorTestTypes);
TEST(MetricsCore, Registry)
{
Registry registry;
Id idA = registry.add("MyCategory", "MetricA");
Id invalidId = registry.add("MyCategory", "MetricA");
ASSERT_THAT(invalidId, Not(IsValid()));
Id idA_copy1 = registry.get("MyCategory", "MetricA");
ASSERT_THAT(idA_copy1, IsValid());
ASSERT_EQ(idA_copy1, idA);
Id idA_copy2 = registry.findId("MyCategory", "MetricA");
ASSERT_THAT(idA_copy2, IsValid());
ASSERT_EQ(idA_copy2, idA);
Id idB = registry.get("MyCategory", "MetricB");
ASSERT_THAT(idB, IsValid());
ASSERT_EQ(idB, registry.get("MyCategory", "MetricB"));
ASSERT_EQ(idB, registry.findId("MyCategory", "MetricB"));
ASSERT_THAT(registry.add("MyCategory", "MetricB"), Not(IsValid()));
const Category *myCategory = registry.get("MyCategory");
ASSERT_EQ(myCategory, idA.category());
ASSERT_EQ(myCategory, idB.category());
ASSERT_TRUE(myCategory->enabled());
registry.enable(myCategory, false);
ASSERT_FALSE(myCategory->enabled());
}
TEST(MetricsCore, RegistryAddr)
{
Registry registry;
const Category *CAT_A = registry.add("A");
const Category *CAT_B = registry.get("B");
Id METRIC_AA = registry.add("A", "A");
Id METRIC_AB = registry.add("A", "B");
Id METRIC_AC = registry.add("A", "C");
Id METRIC_BA = registry.get("B", "A");
Id METRIC_BB = registry.get("B", "B");
Id METRIC_BD = registry.get("B", "D");
const Category *CAT_C = registry.add("C");
const Category *CAT_D = registry.add("D");
Id METRIC_EE = registry.add("E", "E");
Id METRIC_FF = registry.get("F", "F");
ASSERT_EQ(CAT_A->name(), METRIC_AA.metricName());
ASSERT_EQ(CAT_B->name(), METRIC_AB.metricName());
ASSERT_EQ(CAT_A->name(), METRIC_BA.metricName());
ASSERT_EQ(CAT_B->name(), METRIC_BB.metricName());
ASSERT_EQ(CAT_C->name(), METRIC_AC.metricName());
ASSERT_EQ(CAT_D->name(), METRIC_BD.metricName());
ASSERT_EQ(METRIC_EE.metricName(), METRIC_EE.categoryName());
ASSERT_EQ(METRIC_FF.metricName(), METRIC_FF.categoryName());
}
TEST(MetricsCore, RegistryOps)
{
struct
{
const char *d_category;
const char *d_name;
} METRICS[] = {
{
"",
"",
},
{"C0", "M0"},
{"C0", "M1"},
{"C1", "M2"},
{"C3", "M3"},
};
const size_t NUM_METRICS = sizeof METRICS / sizeof *METRICS;
{
std::set< std::string > categoryNames;
Registry registry;
for(size_t i = 0; i < NUM_METRICS; ++i)
{
const char *CATEGORY = METRICS[i].d_category;
const char *NAME = METRICS[i].d_name;
categoryNames.insert(CATEGORY);
// Add a new id and verify the returned properties.
Id id = registry.add(CATEGORY, NAME);
ASSERT_TRUE(id.valid()) << id;
ASSERT_NE(nullptr, id.description());
ASSERT_NE(nullptr, id.category());
ASSERT_EQ(id.metricName(), NAME);
ASSERT_EQ(id.categoryName(), CATEGORY);
ASSERT_TRUE(id.category()->enabled());
// Attempt to find the id.
Id foundId = registry.findId(CATEGORY, NAME);
ASSERT_TRUE(foundId.valid());
ASSERT_EQ(foundId, id);
// Attempt to add the id a second time
Id invalidId = registry.add(CATEGORY, NAME);
ASSERT_FALSE(invalidId.valid());
// Attempt to find the category.
const Category *foundCat = registry.findCategory(CATEGORY);
ASSERT_EQ(id.category(), foundCat);
ASSERT_EQ(nullptr, registry.add(CATEGORY));
ASSERT_EQ(i + 1, registry.metricCount());
ASSERT_EQ(categoryNames.size(), registry.categoryCount());
}
ASSERT_EQ(NUM_METRICS, registry.metricCount());
ASSERT_EQ(categoryNames.size(), registry.categoryCount());
const Category *NEW_CAT = registry.add("NewCategory");
ASSERT_NE(nullptr, NEW_CAT);
ASSERT_EQ("NewCategory", NEW_CAT->name());
ASSERT_TRUE(NEW_CAT->enabled());
}
const char *CATEGORIES[] = {"", "A", "B", "CAT_A", "CAT_B", "name"};
const size_t NUM_CATEGORIES = sizeof CATEGORIES / sizeof *CATEGORIES;
{
Registry registry;
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
const char *CATEGORY = CATEGORIES[i];
const Category *cat = registry.add(CATEGORY);
ASSERT_NE(nullptr, cat);
ASSERT_EQ(cat->name(), CATEGORY);
ASSERT_TRUE(cat->enabled());
ASSERT_EQ(nullptr, registry.add(CATEGORY));
ASSERT_EQ(cat, registry.findCategory(CATEGORY));
Id id = registry.add(CATEGORY, "Metric");
ASSERT_TRUE(id.valid());
ASSERT_EQ(cat, id.category());
ASSERT_EQ(id.categoryName(), CATEGORY);
ASSERT_EQ(id.metricName(), "Metric");
ASSERT_EQ(i + 1, registry.metricCount());
ASSERT_EQ(i + 1, registry.categoryCount());
}
}
}
MATCHER_P6(RecordEq, category, name, count, total, min, max, "")
{
// clang-format off
return (
arg.id.categoryName() == std::string(category) &&
arg.id.metricName() == std::string(name) &&
arg.data.find(Tags()) != arg.data.end() &&
arg.data.at(Tags()).count() == count &&
arg.data.at(Tags()).total() == total &&
arg.data.at(Tags()).min() == min &&
arg.data.at(Tags()).max() == max
);
// clang-format on
}
MATCHER_P5(RecordEq, id, count, total, min, max, "")
{
// clang-format off
return (
arg.id == id &&
arg.data.at(Tags()).count() == count &&
arg.data.at(Tags()).total() == total &&
arg.data.at(Tags()).min() == min &&
arg.data.at(Tags()).max() == max
);
// clang-format on
}
MATCHER_P4(RecordEq, count, total, min, max, "")
{
// clang-format off
return (
arg.data.at(Tags()).count() == count &&
arg.data.at(Tags()).total() == total &&
arg.data.at(Tags()).min() == min &&
arg.data.at(Tags()).max() == max
);
// clang-format on
}
MATCHER_P5(RecordCatEq, category, count, total, min, max, "")
{
// clang-format off
return (
arg.id.categoryName() == std::string(category) &&
arg.data.at(Tags()).count() == count &&
arg.data.at(Tags()).total() == total &&
arg.data.at(Tags()).min() == min &&
arg.data.at(Tags()).max() == max
);
// clang-format on
}
TEST(MetricsCore, RepoBasic)
{
Registry registry;
CollectorRepo< double > repo(&registry);
DoubleCollector *collector1 = repo.defaultCollector("Test", "C1");
DoubleCollector *collector2 = repo.defaultCollector("Test", "C2");
ASSERT_NE(collector1, collector2);
ASSERT_EQ(collector1, repo.defaultCollector("Test", "C1"));
collector1->tick(1.0);
collector1->tick(2.0);
collector2->tick(4.0);
std::vector< TaggedRecords< double > > records =
repo.collectAndClear(registry.get("Test"));
EXPECT_THAT(records, SizeIs(2));
// clang-format off
EXPECT_THAT(
records,
ElementsAre(
RecordEq("Test", "C1", 2u, 3, 1, 2),
RecordEq("Test", "C2", 1u, 4, 4, 4)
)
);
// clang-format on
for(const auto &rec : records)
{
std::cout << rec << std::endl;
}
}
TEST(MetricsCore, RepoCollect)
{
Registry registry;
std::array< const char *, 3 > CATEGORIES = {"A", "B", "C"};
std::array< const char *, 3 > METRICS = {"A", "B", "C"};
const int NUM_COLS = 3;
for(int i = 0; i < static_cast< int >(CATEGORIES.size()); ++i)
{
CollectorRepo< int > repo(&registry);
for(int j = 0; j < static_cast< int >(CATEGORIES.size()); ++j)
{
const char *CATEGORY = CATEGORIES[j];
for(int k = 0; k < static_cast< int >(METRICS.size()); ++k)
{
Id metric = registry.get(CATEGORY, METRICS[k]);
for(int l = 0; l < NUM_COLS; ++l)
{
IntCollector *iCol = repo.addCollector(metric).get();
if(i == j)
{
iCol->set(k, 2 * k, -k, k);
}
else
{
iCol->set(100, 100, 100, 100);
}
}
}
}
// Collect records for the metrics we're testing
{
const char *CATEGORY = CATEGORIES[i];
const Category *category = registry.get(CATEGORY);
std::vector< TaggedRecords< int > > records = repo.collect(category);
ASSERT_THAT(records, SizeIs(METRICS.size()));
// clang-format off
ASSERT_THAT(
records,
UnorderedElementsAre(
RecordEq(CATEGORY, "A", 0u, 0, 0, 0),
RecordEq(CATEGORY, "B", 3u, 6, -1, 1),
RecordEq(CATEGORY, "C", 6u, 12, -2, 2)
)
);
// clang-format on
// Validate initial values.
for(int j = 0; j < static_cast< int >(METRICS.size()); ++j)
{
Id metric = registry.get(CATEGORY, METRICS[j]);
auto collectors = repo.allCollectors(metric);
for(int k = 0; k < static_cast< int >(collectors.size()); ++k)
{
TaggedRecords< int > EI(metric);
EI.data[Tags()] = Record< int >(j, 2 * j, -j, j);
TaggedRecords< int > record = collectors[k]->load();
ASSERT_EQ(record, EI);
}
}
}
// Verify the collectors for other categories haven't changed.
for(int j = 0; j < static_cast< int >(CATEGORIES.size()); ++j)
{
if(i == j)
{
continue;
}
const char *CATEGORY = CATEGORIES[j];
for(int k = 0; k < static_cast< int >(METRICS.size()); ++k)
{
Id metric = registry.get(CATEGORY, METRICS[j]);
auto collectors = repo.allCollectors(metric);
for(int l = 0; l < static_cast< int >(collectors.size()); ++l)
{
TaggedRecords< int > record = collectors[k]->load();
ASSERT_THAT(record, RecordEq(metric, 100u, 100, 100, 100));
}
}
}
}
}
MATCHER_P2(WithinWindow, expectedTime, window, "")
{
auto begin = expectedTime - window;
auto end = expectedTime + window;
return (begin < arg && arg < end);
}
const Category *
firstCategory(
const absl::variant< SampleGroup< double >, SampleGroup< int > > &g)
{
return absl::visit(
[](const auto &group) -> const Category * {
EXPECT_THAT(group, Not(IsEmpty()));
const Category *value = group.begin()->id.category();
for(const auto &record : group.records())
{
EXPECT_EQ(value, record.id.category());
}
return value;
},
g);
}
TEST(MetricsCore, ManagerCollectSample1)
{
const char *CATEGORIES[] = {"A", "B", "C", "Test", "12312category"};
const int NUM_CATEGORIES = sizeof(CATEGORIES) / sizeof(*CATEGORIES);
const char *METRICS[] = {"A", "B", "C", "MyMetric", "90123metric"};
const int NUM_METRICS = sizeof(METRICS) / sizeof(*METRICS);
Manager manager;
CollectorRepo< double > &rep = manager.doubleCollectorRepo();
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
for(int j = 0; j < NUM_METRICS; ++j)
{
rep.defaultCollector(CATEGORIES[i], METRICS[j])->tick(1);
}
}
absl::Time start = absl::Now();
std::this_thread::sleep_for(std::chrono::microseconds(100000));
Records records;
Sample sample = manager.collectSample(records, false);
absl::Duration window = absl::Now() - start;
absl::Time now = absl::Now();
ASSERT_EQ(NUM_CATEGORIES * NUM_METRICS, records.doubleRecords.size());
ASSERT_EQ(NUM_CATEGORIES * NUM_METRICS, sample.recordCount());
ASSERT_EQ(NUM_CATEGORIES, sample.groupCount());
ASSERT_THAT(sample.sampleTime(), WithinWindow(now, absl::Milliseconds(10)));
for(size_t i = 0; i < sample.groupCount(); ++i)
{
const SampleGroup< double > &group =
absl::get< SampleGroup< double > >(sample.group(i));
ASSERT_EQ(NUM_METRICS, group.size());
ASSERT_THAT(group.samplePeriod(),
WithinWindow(window, absl::Milliseconds(10)))
<< group;
string_view name = group.records()[0].id.categoryName();
for(const auto &record : group.records())
{
ASSERT_THAT(record, RecordCatEq(name, 1u, 1, 1, 1));
}
}
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
for(size_t j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col = rep.defaultCollector(CATEGORIES[i], METRICS[j]);
auto record = col->load();
ASSERT_THAT(record, RecordEq(1u, 1, 1, 1));
}
}
records.doubleRecords.clear();
records.intRecords.clear();
sample = manager.collectSample(records, true);
ASSERT_EQ(NUM_CATEGORIES * NUM_METRICS, records.doubleRecords.size());
ASSERT_EQ(NUM_CATEGORIES * NUM_METRICS, sample.recordCount());
ASSERT_EQ(NUM_CATEGORIES, sample.groupCount());
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
for(size_t j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col = rep.defaultCollector(CATEGORIES[i], METRICS[j]);
auto record = col->load();
ASSERT_EQ(TaggedRecords< double >(record.id), record);
}
}
}
TEST(MetricsCore, ManagerCollectSample2)
{
const char *CATEGORIES[] = {"A", "B", "C", "Test", "12312category"};
const int NUM_CATEGORIES = sizeof(CATEGORIES) / sizeof(*CATEGORIES);
const char *METRICS[] = {"A", "B", "C", "MyMetric", "90123metric"};
const int NUM_METRICS = sizeof(METRICS) / sizeof(*METRICS);
Manager manager;
std::vector< const Category * > allCategories;
CollectorRepo< double > &rep = manager.doubleCollectorRepo();
Registry &reg = manager.registry();
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
const Category *cat = reg.get(CATEGORIES[i]);
ASSERT_NE(nullptr, cat);
allCategories.push_back(cat);
}
test::CombinationIterator< const Category * > combIt{allCategories};
do
{
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
for(size_t j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col = rep.defaultCollector(CATEGORIES[i], METRICS[j]);
col->clear();
col->tick(1);
}
}
// Test without a reset.
std::vector< const Category * > cats = combIt.currentCombo;
Records records;
Sample sample = manager.collectSample(
records, absl::Span< const Category * >{cats}, false);
ASSERT_EQ(NUM_METRICS * cats.size(), sample.recordCount());
ASSERT_EQ(cats.size(), sample.groupCount());
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
// Verify the correct categories are in the sample (once)
const Category *CATEGORY = allCategories[i];
bool found = false;
for(size_t j = 0; j < sample.groupCount(); ++j)
{
if(CATEGORY == firstCategory(sample.group(j)))
{
found = true;
}
}
ASSERT_EQ(found, combIt.includesElement(i));
}
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
for(size_t j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col = rep.defaultCollector(CATEGORIES[i], METRICS[j]);
TaggedRecords< double > record = col->load();
ASSERT_THAT(record, RecordEq(1u, 1, 1, 1));
}
}
Records records2;
// Test with a reset.
sample = manager.collectSample(records2,
absl::Span< const Category * >{cats}, true);
ASSERT_EQ(NUM_METRICS * cats.size(), sample.recordCount());
ASSERT_EQ(cats.size(), sample.groupCount());
ASSERT_EQ(records, records2);
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
// Verify the correct categories are in the sample
const Category *CATEGORY = allCategories[i];
bool found = false;
for(size_t j = 0; j < sample.groupCount(); ++j)
{
if(CATEGORY == firstCategory(sample.group(j)))
{
found = true;
}
}
ASSERT_EQ(found, combIt.includesElement(i));
}
for(size_t i = 0; i < NUM_CATEGORIES; ++i)
{
for(size_t j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col = rep.defaultCollector(CATEGORIES[i], METRICS[j]);
TaggedRecords< double > record = col->load();
if(combIt.includesElement(i))
{
ASSERT_EQ(TaggedRecords< double >(record.id), record);
}
else
{
ASSERT_THAT(record, RecordEq(1u, 1, 1, 1));
}
}
}
} while(combIt.next());
}
struct MockPublisher : public Publisher
{
std::atomic_int invocations;
std::vector< TaggedRecords< double > > recordBuffer;
std::vector< TaggedRecords< double > > sortedRecords;
Sample m_sample;
std::set< absl::Duration > times;
void
publish(const Sample &sample) override
{
invocations++;
m_sample.clear();
recordBuffer.clear();
sortedRecords.clear();
times.clear();
m_sample.sampleTime(sample.sampleTime());
if(sample.recordCount() == 0)
{
return;
}
recordBuffer.reserve(sample.recordCount());
for(const auto &_s : sample)
{
ASSERT_TRUE(absl::holds_alternative< SampleGroup< double > >(_s));
const auto &s = absl::get< SampleGroup< double > >(_s);
auto git = s.begin();
ASSERT_NE(git, s.end());
recordBuffer.push_back(*git);
TaggedRecords< double > *head = &recordBuffer.back();
for(++git; git != s.end(); ++git)
{
recordBuffer.push_back(*git);
}
m_sample.pushGroup(head, s.size(), samplePeriod(s));
times.insert(s.samplePeriod());
}
sortedRecords = recordBuffer;
std::sort(sortedRecords.begin(), sortedRecords.end(),
[](const auto &lhs, const auto &rhs) { return lhs.id < rhs.id; });
}
void
reset()
{
invocations = 0;
m_sample.clear();
recordBuffer.clear();
sortedRecords.clear();
times.clear();
}
int
indexOf(const Id &id)
{
TaggedRecords< double > searchRecord(id);
auto it = std::lower_bound(
sortedRecords.begin(), sortedRecords.end(), searchRecord,
[](const auto &lhs, const auto &rhs) { return lhs.id < rhs.id; });
if(it == sortedRecords.end())
{
return -1;
}
return (it->id == id) ? it - sortedRecords.begin() : -1;
}
bool
contains(const Id &id)
{
return indexOf(id) != -1;
}
};
TEST(MetricsCore, ManagerAddCatPub)
{
const char *CATEGORIES[] = {"A", "B", "C", "Test", "12312category"};
const int NUM_CATEGORIES = sizeof(CATEGORIES) / sizeof(*CATEGORIES);
const int NUM_PUBLISHERS = 4;
std::multimap< const char *, std::shared_ptr< Publisher > > publishers;
Manager manager;
Registry &registry = manager.registry();
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
for(int j = 0; j < NUM_PUBLISHERS; ++j)
{
auto globalPub = std::make_shared< MockPublisher >();
manager.addPublisher(CATEGORIES[i], globalPub);
publishers.emplace(CATEGORIES[i], globalPub);
}
}
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
const char *CATEGORY = CATEGORIES[i];
const Category *CAT = registry.get(CATEGORY);
std::vector< Publisher * > results = manager.publishersForCategory(CAT);
ASSERT_EQ(NUM_PUBLISHERS, results.size());
auto it = publishers.lower_bound(CATEGORY);
for(const auto &pub : results)
{
ASSERT_EQ(pub, it->second.get());
++it;
}
}
}
TEST(MetricsCore, ManagerEnableAll)
{
const char *CATEGORIES[] = {"A", "B", "C", "Test", "12312category"};
const int NUM_CATEGORIES = sizeof(CATEGORIES) / sizeof(*CATEGORIES);
Manager manager;
Registry &registry = manager.registry();
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
const Category *CAT = registry.get(CATEGORIES[i]);
ASSERT_TRUE(CAT->enabled());
manager.enableCategory(CAT, false);
ASSERT_FALSE(CAT->enabled());
manager.enableCategory(CAT, true);
ASSERT_TRUE(CAT->enabled());
manager.enableCategory(CATEGORIES[i], false);
ASSERT_FALSE(CAT->enabled());
manager.enableCategory(CATEGORIES[i], true);
ASSERT_TRUE(CAT->enabled());
}
manager.enableAll(false);
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
ASSERT_FALSE(registry.get(CATEGORIES[i])->enabled());
}
manager.enableAll(true);
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
ASSERT_TRUE(registry.get(CATEGORIES[i])->enabled());
}
}
TEST(MetricsCore, PublishAll)
{
const char *CATEGORIES[] = {"A", "B", "C", "Test", "12312category"};
const int NUM_CATEGORIES = sizeof(CATEGORIES) / sizeof(*CATEGORIES);
const char *METRICS[] = {"A", "B", "C", "MyMetric", "903metric"};
const int NUM_METRICS = sizeof(METRICS) / sizeof(*METRICS);
Manager manager;
Registry &registry = manager.registry();
CollectorRepo< double > &repository = manager.doubleCollectorRepo();
auto globalPub = std::make_shared< MockPublisher >();
manager.addGlobalPublisher(globalPub);
std::vector< const Category * > allCategories;
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
const Category *CAT = registry.get(CATEGORIES[i]);
auto mockPubCat = std::make_shared< MockPublisher >();
manager.addPublisher(CAT, mockPubCat);
allCategories.push_back(CAT);
}
test::CombinationIterator< const Category * > combIt(allCategories);
do
{
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
for(int j = 0; j < NUM_METRICS; ++j)
{
DoubleCollector *col =
repository.defaultCollector(CATEGORIES[i], METRICS[j]);
col->clear();
col->tick(1);
}
}
std::set< const Category * > excludedSet;
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
if(!combIt.includesElement(i))
{
excludedSet.insert(allCategories[i]);
}
}
ASSERT_EQ(allCategories.size(),
excludedSet.size() + combIt.currentCombo.size());
// Publish the records.
absl::Time tmStamp = absl::Now();
manager.publishAllExcluding(excludedSet);
if(combIt.currentCombo.empty())
{
ASSERT_EQ(0, globalPub->invocations.load());
}
else
{
ASSERT_EQ(1, globalPub->invocations.load());
ASSERT_THAT(globalPub->m_sample.sampleTime(),
WithinWindow(tmStamp, absl::Milliseconds(10)));
ASSERT_EQ(combIt.currentCombo.size(), globalPub->m_sample.groupCount());
}
// Verify the correct "specific" publishers have been invoked.
for(int i = 0; i < NUM_CATEGORIES; ++i)
{
for(int j = 0; j < NUM_METRICS; ++j)
{
Id id = registry.get(CATEGORIES[i], METRICS[j]);
ASSERT_EQ(combIt.includesElement(i), globalPub->contains(id));
}
const int EXP_INV = combIt.includesElement(i) ? 1 : 0;
std::vector< Publisher * > pubs =
manager.publishersForCategory(allCategories[i]);
MockPublisher *specPub = (MockPublisher *)pubs.front();
ASSERT_EQ(EXP_INV, specPub->invocations.load());
specPub->reset();
}
globalPub->reset();
} while(combIt.next());
}

@ -1,376 +0,0 @@
#include <util/metrics/types.hpp>
#include <array>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using namespace llarp;
using namespace metrics;
using namespace ::testing;
using RecordT = metrics::Record< double >;
using TagRecordT = metrics::TaggedRecords< double >;
using SampleGroupT = metrics::SampleGroup< double >;
struct MetricFormatSpecTestData
{
float m_scale;
const char *m_spec;
double m_value;
const char *m_expected;
};
struct MetricFormatSpecTest : public TestWithParam< MetricFormatSpecTestData >
{
};
TEST_P(MetricFormatSpecTest, print)
{
auto d = GetParam();
metrics::FormatSpec spec(d.m_scale, d.m_spec);
std::ostringstream stream;
metrics::FormatSpec::format(stream, d.m_value, spec);
ASSERT_EQ(d.m_expected, stream.str());
}
MetricFormatSpecTestData metricFormatTestData[] = {
MetricFormatSpecTestData{0.0, "", 1.5, ""},
MetricFormatSpecTestData{1.0, "%.4f", 1.5, "1.5000"},
MetricFormatSpecTestData{1.0, "%.0f", 2.0, "2"},
MetricFormatSpecTestData{1.0, "%.0f", 1.1, "1"},
MetricFormatSpecTestData{1.0, "%.0f", 1.5, "2"},
MetricFormatSpecTestData{1.0, "%.0f", 1.7, "2"},
MetricFormatSpecTestData{1.0, "%.0f", 3.0, "3"},
MetricFormatSpecTestData{2.0, "%.0f", 3.0, "6"},
MetricFormatSpecTestData{2.0, "%.1f", 1.1, "2.2"}};
INSTANTIATE_TEST_SUITE_P(MetricsTypes, MetricFormatSpecTest,
ValuesIn(metricFormatTestData));
TEST(MetricsTypes, Format)
{
metrics::Format format;
format.setSpec(metrics::Publication::Type::Max,
metrics::FormatSpec(1.0, "%0.2f"));
format.setSpec(metrics::Publication::Type::Total,
metrics::FormatSpec(2.0, "%0.3f"));
ASSERT_EQ(nullptr, format.specFor(metrics::Publication::Type::Avg));
auto ptr = format.specFor(metrics::Publication::Type::Total);
ASSERT_NE(nullptr, ptr);
ASSERT_EQ("%0.3f", ptr->m_format);
ASSERT_DOUBLE_EQ(2.0, ptr->m_scale);
ptr = format.specFor(metrics::Publication::Type::Max);
ASSERT_NE(nullptr, ptr);
ASSERT_EQ("%0.2f", ptr->m_format);
ASSERT_DOUBLE_EQ(1.0, ptr->m_scale);
format.clear();
ASSERT_EQ(nullptr, format.specFor(metrics::Publication::Type::Total));
ASSERT_EQ(nullptr, format.specFor(metrics::Publication::Type::Max));
}
TEST(MetricsTypes, CatContainer)
{
std::array< metrics::CategoryContainer, 10 > containers;
{
metrics::Category c("A");
for(size_t i = 0; i < containers.size(); ++i)
{
c.registerContainer(&containers[i]);
metrics::CategoryContainer *next = (0 == i) ? 0 : &containers[i - 1];
ASSERT_EQ(&c, containers[i].m_category);
ASSERT_TRUE(containers[i].m_enabled);
ASSERT_EQ(next, containers[i].m_nextCategory);
}
for(size_t i = 0; i < containers.size(); ++i)
{
metrics::CategoryContainer *next = (0 == i) ? 0 : &containers[i - 1];
ASSERT_EQ(&c, containers[i].m_category);
ASSERT_TRUE(containers[i].m_enabled);
ASSERT_EQ(next, containers[i].m_nextCategory);
}
const std::atomic_bool *enabled = &c.enabledRaw();
c.enabled(false);
ASSERT_FALSE(*enabled);
ASSERT_EQ(&c.enabledRaw(), enabled);
for(size_t i = 0; i < containers.size(); ++i)
{
metrics::CategoryContainer *next = (0 == i) ? 0 : &containers[i - 1];
ASSERT_EQ(&c, containers[i].m_category);
ASSERT_FALSE(containers[i].m_enabled);
ASSERT_EQ(next, containers[i].m_nextCategory);
}
c.enabled(true);
ASSERT_TRUE(*enabled);
ASSERT_EQ(&c.enabledRaw(), enabled);
for(size_t i = 0; i < containers.size(); ++i)
{
metrics::CategoryContainer *next = (0 == i) ? 0 : &containers[i - 1];
ASSERT_EQ(&c, containers[i].m_category);
ASSERT_TRUE(containers[i].m_enabled);
ASSERT_EQ(next, containers[i].m_nextCategory);
}
}
for(const auto &container : containers)
{
ASSERT_THAT(container.m_category, IsNull());
ASSERT_FALSE(container.m_enabled);
ASSERT_THAT(container.m_nextCategory, IsNull());
}
}
TEST(MetricsTypes, Record)
{
RecordT r;
ASSERT_GT(r.min(), r.max());
}
TEST(MetricsTypes, Sample)
{
metrics::Category myCategory("MyCategory");
metrics::Description descA(&myCategory, "MetricA");
metrics::Description descB(&myCategory, "MetricB");
metrics::Description descC(&myCategory, "MetricC");
metrics::Id metricA(&descA);
metrics::Id metricB(&descB);
metrics::Id metricC(&descC);
absl::Time timeStamp = absl::Now();
RecordT recordA(0, 0, 0, 0);
RecordT recordB(1, 2, 3, 4);
RecordT recordC(4, 3, 2, 1);
TagRecordT tagRecordA(metricA, {{{}, recordA}});
TagRecordT tagRecordB(metricB, {{{}, recordB}});
TagRecordT tagRecordC(metricC, {{{}, recordC}});
TagRecordT buffer1[] = {tagRecordA, tagRecordB};
std::vector< TagRecordT > buffer2;
buffer2.push_back(tagRecordC);
metrics::Sample sample;
sample.sampleTime(timeStamp);
sample.pushGroup(buffer1, sizeof(buffer1) / sizeof(*buffer1),
absl::Seconds(1.0));
sample.pushGroup(buffer2.data(), buffer2.size(), absl::Seconds(2.0));
ASSERT_EQ(timeStamp, sample.sampleTime());
ASSERT_EQ(2u, sample.groupCount());
ASSERT_EQ(3u, sample.recordCount());
ASSERT_TRUE(absl::holds_alternative< SampleGroupT >(sample.group(0)));
ASSERT_TRUE(absl::holds_alternative< SampleGroupT >(sample.group(1)));
const SampleGroupT s0 = absl::get< SampleGroupT >(sample.group(0));
const SampleGroupT s1 = absl::get< SampleGroupT >(sample.group(1));
ASSERT_EQ(absl::Seconds(1), s0.samplePeriod());
ASSERT_EQ(buffer1, s0.records().data());
ASSERT_EQ(2, s0.size());
ASSERT_EQ(absl::Seconds(2), s1.samplePeriod());
ASSERT_EQ(buffer2.data(), s1.records().data());
ASSERT_EQ(1, s1.size());
for(auto sampleIt = sample.begin(); sampleIt != sample.end(); ++sampleIt)
{
const auto &s = absl::get< SampleGroupT >(*sampleIt);
for(auto groupIt = s.begin(); groupIt != s.end(); ++groupIt)
{
std::cout << *groupIt << std::endl;
}
}
}
struct SampleTest
: public ::testing::TestWithParam< std::pair< absl::Time, std::string > >
{
metrics::Category cat_A;
metrics::Description DESC_A;
metrics::Description DESC_B;
metrics::Description DESC_C;
metrics::Description DESC_D;
metrics::Description DESC_E;
metrics::Description DESC_F;
metrics::Description DESC_G;
metrics::Id id_A;
metrics::Id id_B;
metrics::Id id_C;
metrics::Id id_D;
metrics::Id id_E;
metrics::Id id_F;
metrics::Id id_G;
std::vector< TagRecordT > recordBuffer;
SampleTest()
: cat_A("A", true)
, DESC_A(&cat_A, "A")
, DESC_B(&cat_A, "B")
, DESC_C(&cat_A, "C")
, DESC_D(&cat_A, "D")
, DESC_E(&cat_A, "E")
, DESC_F(&cat_A, "F")
, DESC_G(&cat_A, "G")
, id_A(&DESC_A)
, id_B(&DESC_B)
, id_C(&DESC_C)
, id_D(&DESC_D)
, id_E(&DESC_E)
, id_F(&DESC_F)
, id_G(&DESC_G)
{
recordBuffer.emplace_back(
metrics::Id(0),
TaggedRecordsData< double >{{metrics::Tags(), RecordT(1, 1, 1, 1)}});
recordBuffer.emplace_back(
id_A,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(2, 2, 2, 2)}});
recordBuffer.emplace_back(
id_B,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(3, 3, 3, 3)}});
recordBuffer.emplace_back(
id_C,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(4, 4, 4, 4)}});
recordBuffer.emplace_back(
id_D,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(5, 5, 5, 5)}});
recordBuffer.emplace_back(
id_E,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(6, 6, 6, 6)}});
recordBuffer.emplace_back(
id_F,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(7, 7, 7, 7)}});
recordBuffer.emplace_back(
id_G,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(8, 8, 8, 8)}});
recordBuffer.emplace_back(
id_A,
TaggedRecordsData< double >{{metrics::Tags(), RecordT(9, 9, 9, 9)}});
}
};
std::pair< std::vector< metrics::SampleGroup< double > >, size_t >
generate(const std::string &specification,
const std::vector< TagRecordT > &recordBuffer)
{
const char *c = specification.c_str();
std::vector< metrics::SampleGroup< double > > groups;
size_t size = 0;
const TagRecordT *head = recordBuffer.data();
const TagRecordT *current = head;
while(*c)
{
int numRecords = *(c + 1) - '0';
int elapsedTime = *(c + 3) - '0';
if(head + recordBuffer.size() < current + numRecords)
{
current = head;
}
groups.emplace_back(current, numRecords, absl::Seconds(elapsedTime));
size += numRecords;
current += numRecords;
c += 4;
}
return {groups, size};
}
TEST_P(SampleTest, basics)
{
absl::Time timestamp;
std::string spec;
std::tie(timestamp, spec) = GetParam();
std::vector< metrics::SampleGroup< double > > groups;
size_t size;
std::tie(groups, size) = generate(spec, recordBuffer);
// Create the sample.
metrics::Sample sample;
sample.sampleTime(timestamp);
for(size_t j = 0; j < groups.size(); ++j)
{
sample.pushGroup(groups[j]);
}
// Test the sample.
ASSERT_EQ(timestamp, sample.sampleTime());
ASSERT_EQ(groups.size(), sample.groupCount());
ASSERT_EQ(size, sample.recordCount());
for(size_t j = 0; j < sample.groupCount(); ++j)
{
ASSERT_EQ(groups[j],
absl::get< metrics::SampleGroup< double > >(sample.group(j)));
}
}
TEST_P(SampleTest, append)
{
absl::Time timestamp;
std::string spec;
std::tie(timestamp, spec) = GetParam();
std::vector< metrics::SampleGroup< double > > groups;
size_t size;
std::tie(groups, size) = generate(spec, recordBuffer);
// Create the sample.
metrics::Sample sample;
sample.sampleTime(timestamp);
std::for_each(groups.begin(), groups.end(), [&](const auto &group) {
sample.pushGroup(group.records(), group.samplePeriod());
});
// Test the sample.
ASSERT_EQ(timestamp, sample.sampleTime());
ASSERT_EQ(groups.size(), sample.groupCount());
ASSERT_EQ(size, sample.recordCount());
for(size_t j = 0; j < sample.groupCount(); ++j)
{
ASSERT_EQ(groups[j],
absl::get< metrics::SampleGroup< double > >(sample.group(j)));
}
}
absl::Time
fromYYMMDD(int year, int month, int day)
{
return absl::FromCivil(absl::CivilDay(year, month, day), absl::UTCTimeZone());
}
std::pair< absl::Time, std::string > sampleTestData[] = {
{fromYYMMDD(1900, 1, 1), ""},
{fromYYMMDD(1999, 1, 1), "R1E1"},
{fromYYMMDD(1999, 2, 1), "R2E2"},
{fromYYMMDD(2001, 9, 9), "R1E1R2E2"},
{fromYYMMDD(2001, 9, 9), "R3E3R3E3"},
{fromYYMMDD(2009, 9, 9), "R2E4R1E1"},
{fromYYMMDD(2001, 9, 9), "R1E1R2E2R3E3"},
{fromYYMMDD(2001, 9, 9), "R4E1R3E2R2E3R1E4"},
{fromYYMMDD(2001, 9, 9), "R1E1R2E2R1E1R2E2R1E1R2E1R1E2"}};
INSTANTIATE_TEST_SUITE_P(MetricsTypes, SampleTest,
::testing::ValuesIn(sampleTestData));

@ -1,5 +1,6 @@
#include <util/thread/queue.hpp>
#include <util/thread/threading.hpp>
#include <util/thread/barrier.hpp>
#include <array>
#include <condition_variable>
@ -11,7 +12,9 @@
using namespace llarp;
using namespace llarp::thread;
using LockGuard = absl::MutexLock;
using namespace std::literals;
using LockGuard = std::unique_lock<std::mutex>;
class Element
{
@ -50,7 +53,8 @@ class Args
public:
std::condition_variable startCond;
std::condition_variable runCond;
absl::Mutex mutex;
std::mutex mutex;
std::condition_variable cv;
ObjQueue queue;
@ -80,21 +84,13 @@ class Args
}
};
using CondArgs = std::pair< Args*, size_t >;
bool
waitFunc(CondArgs* a)
{
return a->first->count != a->second;
}
void
popFrontTester(Args& args)
{
{
LockGuard guard(&args.mutex);
LockGuard lock(args.mutex);
args.count++;
args.mutex.Await(absl::Condition(&args, &Args::signal));
args.cv.wait(lock, [&] { return args.signal(); });
}
for(;;)
@ -111,10 +107,9 @@ void
pushBackTester(Args& args)
{
{
LockGuard guard(&args.mutex);
LockGuard lock(args.mutex);
args.count++;
args.mutex.Await(absl::Condition(&args, &Args::signal));
args.cv.wait(lock, [&] { return args.signal(); });
}
for(size_t i = 0; i < args.iterations; ++i)
@ -176,11 +171,9 @@ struct ExceptionTester
std::atomic< std::thread::id > ExceptionTester::throwFrom = {std::thread::id()};
void
sleepNWait(size_t microseconds, util::Barrier& barrier)
sleepNWait(std::chrono::microseconds microseconds, util::Barrier& barrier)
{
std::this_thread::sleep_for(
std::chrono::duration< double, std::micro >(microseconds));
std::this_thread::sleep_for(microseconds);
barrier.Block();
}
@ -285,14 +278,12 @@ TEST(TestQueue, singleProducerManyConsumer)
Args args{iterations};
{
LockGuard lock(&args.mutex);
LockGuard lock(args.mutex);
for(size_t i = 0; i < threads.size(); ++i)
{
threads[i] = std::thread(std::bind(&popFrontTester, std::ref(args)));
CondArgs cArgs(&args, i + 1);
args.mutex.Await(absl::Condition(&waitFunc, &cArgs));
args.cv.wait(lock, [&] { return args.count != i+1; });
}
args.runSignal++;
@ -328,23 +319,19 @@ TEST(TestQueue, manyProducerManyConsumer)
Args args{iterations};
{
LockGuard lock(&args.mutex);
LockGuard lock(args.mutex);
for(size_t i = 0; i < numThreads; ++i)
{
threads[i] = std::thread(std::bind(&popFrontTester, std::ref(args)));
CondArgs cArgs(&args, i + 1);
args.mutex.Await(absl::Condition(+waitFunc, &cArgs));
args.cv.wait(lock, [&] { return args.count != i+1; });
}
for(size_t i = 0; i < numThreads; ++i)
{
threads[i + numThreads] =
std::thread(std::bind(&pushBackTester, std::ref(args)));
CondArgs cArgs(&args, numThreads + i + 1);
args.mutex.Await(absl::Condition(+waitFunc, &cArgs));
args.cv.wait(lock, [&] { return args.count != numThreads+i+1; });
}
args.runSignal++;
@ -386,12 +373,13 @@ TEST(TestQueue, ABAEmpty)
nextValue[j] = block + (numValues * j);
lastValue[j] = block + (numValues * (j + 1)) - 1;
threads[j] = std::thread(std::bind(&abaThread, nextValue[j], lastValue[j],
std::ref(queue), std::ref(barrier)));
threads[j] = std::thread([&, n=nextValue[j], l=lastValue[j]] { abaThread(n, l, queue, barrier); });
}
threads[numThreads] =
std::thread(std::bind(&sleepNWait, 100, std::ref(barrier)));
threads[numThreads] = std::thread([&] {
std::this_thread::sleep_for(100us);
barrier.Block();
});
for(size_t j = 0; j < numEntries; ++j)
{
@ -448,12 +436,13 @@ TEST(TestQueue, generationCount)
nextValue[j] = block + (numValues * j);
lastValue[j] = block + (numValues * (j + 1)) - 1;
threads[j] = std::thread(std::bind(&abaThread, nextValue[j], lastValue[j],
std::ref(queue), std::ref(barrier)));
threads[j] = std::thread([&, n=nextValue[j], l=lastValue[j]] { abaThread(n, l, queue, barrier); });
}
threads[numThreads] =
std::thread(std::bind(&sleepNWait, 100, std::ref(barrier)));
threads[numThreads] = std::thread([&] {
std::this_thread::sleep_for(100ms);
barrier.Block();
});
for(size_t j = 0; j < numEntries; ++j)
{
@ -516,15 +505,16 @@ TEST(TestQueue, exceptionSafety)
ASSERT_THROW({ (void)queue.popFront(); }, Exception);
using namespace std::literals;
// Now the queue is not full, and the producer thread can start adding items.
ASSERT_TRUE(semaphore.waitFor(absl::Seconds(1)));
ASSERT_TRUE(semaphore.waitFor(1s));
ASSERT_EQ(queueSize, queue.size());
ASSERT_THROW({ (void)queue.popFront(); }, Exception);
// Now the queue is not full, and the producer thread can start adding items.
ASSERT_TRUE(semaphore.waitFor(absl::Seconds(1)));
ASSERT_TRUE(semaphore.waitFor(1s));
ASSERT_EQ(queueSize, queue.size());
@ -534,7 +524,7 @@ TEST(TestQueue, exceptionSafety)
// pop an item to unblock the pusher
(void)queue.popFront();
ASSERT_TRUE(semaphore.waitFor(absl::Seconds(1)));
ASSERT_TRUE(semaphore.waitFor(1s));
ASSERT_EQ(1u, caught);

@ -1,5 +1,6 @@
#include <util/thread/thread_pool.hpp>
#include <util/thread/threading.hpp>
#include <util/thread/barrier.hpp>
#include <condition_variable>
#include <mutex>

@ -1,339 +0,0 @@
#include <util/thread/timerqueue.hpp>
#include <thread>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using CharQueue = llarp::thread::TimerQueue< const char* >;
using CharItem = llarp::thread::TimerQueueItem< const char* >;
TEST(TimerQueue, smoke)
{
CharQueue queue;
const absl::Time TA = absl::Time();
const absl::Time TB = TA + absl::Seconds(1);
const absl::Time TC = TB + absl::Seconds(1);
const absl::Time TD = TC + absl::Seconds(1);
const absl::Time TE = TD + absl::Seconds(1);
const char* VA = "hello";
const char* VB = "world,";
const char* VC = "how";
const char* VD = "are";
const char* VE = "you";
int HA = queue.add(TA, VA);
int HB = queue.add(TB, VB);
int HC = queue.add(TC, VC);
int HD = queue.add(TD, VD);
int HE = queue.add(TE, VE);
CharItem tItem;
absl::Time newMinTime;
size_t newSize;
ASSERT_TRUE(queue.popFront(&tItem));
ASSERT_EQ(VA, tItem.value());
ASSERT_EQ(TA, tItem.time());
ASSERT_EQ(HA, tItem.handle());
ASSERT_TRUE(queue.popFront(&tItem, &newSize, &newMinTime));
ASSERT_EQ(3, newSize);
ASSERT_EQ(TC, newMinTime);
ASSERT_EQ(TB, tItem.time());
ASSERT_EQ(VB, tItem.value());
ASSERT_EQ(HB, tItem.handle());
std::vector< CharItem > a1;
queue.popLess(TD, &a1, &newSize, &newMinTime);
ASSERT_EQ(2, a1.size());
ASSERT_EQ(1, newSize);
ASSERT_EQ(TE, newMinTime);
ASSERT_EQ(TC, a1[0].time());
ASSERT_EQ(VC, a1[0].value());
ASSERT_EQ(HC, a1[0].handle());
ASSERT_EQ(TD, a1[1].time());
ASSERT_EQ(VD, a1[1].value());
ASSERT_EQ(HD, a1[1].handle());
std::vector< CharItem > a2;
queue.popLess(TD, &a2, &newSize, &newMinTime);
ASSERT_EQ(0, a2.size());
ASSERT_EQ(1, newSize);
ASSERT_EQ(TE, newMinTime);
std::vector< CharItem > a3;
queue.popLess(TE, &a3, &newSize, &newMinTime);
ASSERT_EQ(1, a3.size());
ASSERT_EQ(0, newSize);
ASSERT_EQ(TE, a3[0].time());
ASSERT_EQ(VE, a3[0].value());
ASSERT_EQ(HE, a3[0].handle());
}
TEST(TimerQueue, KeySmoke)
{
CharQueue x1;
const absl::Time TA = absl::Time();
const absl::Time TB = TA + absl::Seconds(1);
const absl::Time TC = TB + absl::Seconds(1);
const absl::Time TD = TC + absl::Seconds(1);
const absl::Time TE = TD + absl::Seconds(1);
const char* VA = "hello";
const char* VB = "world,";
const char* VC = "how";
const char* VD = "are";
const char* VE = "you";
typedef CharQueue::Key Key;
const Key KA = Key(&TA);
const Key KB = Key(&TB);
const Key KC = Key(382);
const Key KD = Key(123);
const Key KE = Key(&VE);
int HA = x1.add(TA, VA, KA);
int HB = x1.add(TB, VB, KB);
int HC = x1.add(TC, VC, KC);
int HD = x1.add(TD, VD, KD);
int HE = x1.add(TE, VE, KE);
ASSERT_FALSE(x1.remove(HA, KB));
ASSERT_TRUE(x1.isValid(HA, KA));
ASSERT_FALSE(x1.update(HC, KD, TE));
CharItem tItem;
absl::Time newMinTime;
size_t newSize;
ASSERT_TRUE(x1.popFront(&tItem));
ASSERT_EQ(VA, tItem.value());
ASSERT_EQ(TA, tItem.time());
ASSERT_EQ(HA, tItem.handle());
ASSERT_EQ(KA, tItem.key());
ASSERT_TRUE(x1.popFront(&tItem, &newSize, &newMinTime));
ASSERT_EQ(3, newSize);
ASSERT_EQ(TC, newMinTime);
ASSERT_EQ(TB, tItem.time());
ASSERT_EQ(VB, tItem.value());
ASSERT_EQ(HB, tItem.handle());
ASSERT_EQ(KB, tItem.key());
std::vector< CharItem > a1;
x1.popLess(TD, &a1, &newSize, &newMinTime);
ASSERT_EQ(2, a1.size());
ASSERT_EQ(1, newSize);
ASSERT_EQ(TE, newMinTime);
ASSERT_EQ(TC, a1[0].time());
ASSERT_EQ(VC, a1[0].value());
ASSERT_EQ(HC, a1[0].handle());
ASSERT_EQ(KC, a1[0].key());
ASSERT_EQ(TD, a1[1].time());
ASSERT_EQ(VD, a1[1].value());
ASSERT_EQ(HD, a1[1].handle());
ASSERT_EQ(KD, a1[1].key());
std::vector< CharItem > a2;
x1.popLess(TD, &a2, &newSize, &newMinTime);
ASSERT_EQ(0, a2.size());
ASSERT_EQ(1, newSize);
ASSERT_EQ(TE, newMinTime);
std::vector< CharItem > a3;
x1.popLess(TE, &a3, &newSize, &newMinTime);
ASSERT_EQ(1, a3.size());
ASSERT_EQ(0, newSize);
ASSERT_EQ(TE, a3[0].time());
ASSERT_EQ(VE, a3[0].value());
ASSERT_EQ(HE, a3[0].handle());
ASSERT_EQ(KE, a3[0].key());
}
TEST(TimerQueue, Update)
{
const char VA[] = "A";
const char VB[] = "B";
const char VC[] = "C";
const char VD[] = "D";
const char VE[] = "E";
// clang-format off
static const struct
{
int m_secs;
int m_nsecs;
const char* m_value;
int m_updsecs;
int m_updnsecs;
bool m_isNewTop;
} VALUES[] = {
{2, 1000000, VA, 0, 1000000, false},
{2, 1000000, VB, 3, 1000000, false},
{2, 1000000, VC, 0, 4000, false},
{2, 1000001, VB, 0, 3999, true},
{1, 9999998, VC, 4, 9999998, false},
{1, 9999999, VD, 0, 0, true},
{0, 4000, VE, 10, 4000, false}};
// clang-format on
static const int POP_ORDER[] = {5, 3, 2, 0, 1, 4, 6};
const int NUM_VALUES = sizeof VALUES / sizeof *VALUES;
int handles[NUM_VALUES];
CharQueue queue;
{
CharItem item;
ASSERT_FALSE(queue.popFront(&item));
}
for(int i = 0; i < NUM_VALUES; ++i)
{
const char* VAL = VALUES[i].m_value;
const int SECS = VALUES[i].m_secs;
const int NSECS = VALUES[i].m_nsecs;
absl::Time TIME =
absl::Time() + absl::Seconds(SECS) + absl::Nanoseconds(NSECS);
handles[i] = queue.add(TIME, VAL);
ASSERT_EQ(i + 1, queue.size());
ASSERT_TRUE(queue.isValid(handles[i]));
}
for(int i = 0; i < NUM_VALUES; ++i)
{
const int UPDSECS = VALUES[i].m_updsecs;
const bool EXPNEWTOP = VALUES[i].m_isNewTop;
const int UPDNSECS = VALUES[i].m_updnsecs;
absl::Time UPDTIME =
absl::Time() + absl::Seconds(UPDSECS) + absl::Nanoseconds(UPDNSECS);
bool isNewTop;
CharItem item;
ASSERT_TRUE(queue.isValid(handles[i])) << i;
ASSERT_TRUE(queue.update(handles[i], UPDTIME, &isNewTop)) << i;
EXPECT_EQ(EXPNEWTOP, isNewTop) << i;
ASSERT_TRUE(queue.isValid(handles[i])) << i;
}
for(int i = 0; i < NUM_VALUES; ++i)
{
const int I = POP_ORDER[i];
const char* EXPVAL = VALUES[I].m_value;
const int EXPSECS = VALUES[I].m_updsecs;
const int EXPNSECS = VALUES[I].m_updnsecs;
absl::Time EXPTIME =
absl::Time() + absl::Seconds(EXPSECS) + absl::Nanoseconds(EXPNSECS);
CharItem item;
ASSERT_TRUE(queue.isValid(handles[I]));
ASSERT_TRUE(queue.popFront(&item));
ASSERT_EQ(EXPTIME, item.time());
ASSERT_EQ(EXPVAL, item.value());
ASSERT_FALSE(queue.isValid(handles[I]));
}
}
TEST(TimerQueue, ThreadSafety)
{
using Data = std::string;
using StringQueue = llarp::thread::TimerQueue< std::string >;
using StringItem = llarp::thread::TimerQueueItem< std::string >;
using Info = std::pair< int, std::vector< StringItem >* >;
static constexpr size_t NUM_THREADS = 10;
static constexpr size_t NUM_ITERATIONS = 1000;
static constexpr size_t NUM_REMOVE_ALL = NUM_ITERATIONS / 2;
Info info[NUM_THREADS];
std::thread threads[NUM_THREADS + 1];
std::vector< StringItem > items[NUM_THREADS];
absl::Barrier barrier(NUM_THREADS + 1);
StringQueue queue;
for(size_t i = 0; i < NUM_THREADS; ++i)
{
info[i].first = i;
info[i].second = &items[i];
threads[i] = std::thread(
[](Info* nfo, absl::Barrier* b, StringQueue* q) {
const int THREAD_ID = nfo->first;
std::vector< StringItem >* vPtr = nfo->second;
// We stagger the removeAll steps among the threads.
const unsigned int STEP_REMOVE_ALL =
THREAD_ID * NUM_REMOVE_ALL / NUM_THREADS;
std::ostringstream oss;
oss << THREAD_ID;
Data V(oss.str());
b->Block();
size_t newSize;
absl::Time newMinTime;
StringItem item;
for(size_t j = 0; j < NUM_ITERATIONS; ++j)
{
const absl::Time TIME =
absl::Time() + absl::Seconds((j * (j + 3)) % NUM_ITERATIONS);
int h = q->add(TIME, V);
q->update(h, TIME);
if(q->popFront(&item, &newSize, &newMinTime))
{
vPtr->push_back(item);
}
h = q->add(newMinTime, V);
q->popLess(newMinTime, vPtr);
if(q->remove(h, &item, &newSize, &newMinTime))
{
vPtr->push_back(item);
}
if(j % NUM_REMOVE_ALL == STEP_REMOVE_ALL)
{
q->removeAll(vPtr);
}
}
},
&info[i], &barrier, &queue);
}
threads[NUM_THREADS] = std::thread(
[](absl::Barrier* b, StringQueue* q) {
b->Block();
for(size_t i = 0; i < NUM_ITERATIONS; ++i)
{
size_t size = q->size();
ASSERT_GE(size, 0);
ASSERT_LE(size, NUM_THREADS);
}
},
&barrier, &queue);
size_t size = 0;
for(size_t i = 0; i < NUM_THREADS; ++i)
{
threads[i].join();
size += static_cast< int >(items[i].size());
}
threads[NUM_THREADS].join();
ASSERT_EQ(0, queue.size());
ASSERT_EQ(1000 * NUM_THREADS * 2, size);
}

@ -1,194 +0,0 @@
#include <util/thread/scheduler.hpp>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
using namespace llarp;
using thread::Scheduler;
using thread::Tardis;
TEST(SchedulerTest, smoke)
{
Scheduler scheduler;
ASSERT_TRUE(scheduler.start());
scheduler.stop();
}
struct TestCallback
{
std::atomic_size_t m_startCount;
std::atomic_size_t m_execCount;
absl::Duration executeTime;
TestCallback() : m_startCount(0), m_execCount(0), executeTime()
{
}
void
callback()
{
m_startCount++;
if(executeTime != absl::Duration())
{
std::this_thread::sleep_for(absl::ToChronoSeconds(executeTime));
}
m_execCount++;
}
void
waitFor(absl::Duration duration, size_t attemptCount,
size_t executeCount) const
{
for(size_t i = 0; i < attemptCount; ++i)
{
if(executeCount + 1 <= m_execCount)
{
return;
}
std::this_thread::sleep_until(absl::ToChronoTime(absl::Now() + duration));
std::this_thread::yield();
}
}
};
TEST(SchedulerTest, fakeTime)
{
// Just test we can mock out Time itself
Scheduler scheduler;
Tardis time{scheduler};
absl::Time now = time.now();
TestCallback callback1, callback2;
Scheduler::Handle handle = scheduler.schedule(
now + absl::Seconds(30), std::bind(&TestCallback::callback, &callback1));
ASSERT_NE(Scheduler::INVALID_HANDLE, handle);
handle = scheduler.scheduleRepeat(
absl::Seconds(60), std::bind(&TestCallback::callback, &callback2));
ASSERT_NE(Scheduler::INVALID_HANDLE, handle);
scheduler.start();
time.advanceTime(absl::Seconds(35));
ASSERT_EQ(time.now(), now + absl::Seconds(35));
callback1.waitFor(absl::Milliseconds(10), 100, 0);
ASSERT_EQ(1u, callback1.m_execCount);
ASSERT_EQ(0u, callback2.m_execCount);
// jump forward another 30 seconds, the repeat event should kick off
time.advanceTime(absl::Seconds(30));
ASSERT_EQ(time.now(), now + absl::Seconds(65));
callback2.waitFor(absl::Milliseconds(10), 100, 0);
ASSERT_EQ(1u, callback1.m_execCount);
ASSERT_EQ(1u, callback2.m_execCount);
// jump forward another minute, the repeat event should have run again
time.advanceTime(absl::Seconds(60));
callback2.waitFor(absl::Milliseconds(10), 100, 1);
ASSERT_EQ(1u, callback1.m_execCount);
ASSERT_EQ(2u, callback2.m_execCount);
scheduler.stop();
}
TEST(SchedulerTest, func1)
{
Scheduler scheduler;
scheduler.start();
TestCallback callback1, callback2;
absl::Time now = absl::Now();
scheduler.scheduleRepeat(absl::Milliseconds(30),
std::bind(&TestCallback::callback, &callback1));
scheduler.schedule(now + absl::Milliseconds(60),
std::bind(&TestCallback::callback, &callback2));
std::this_thread::yield();
std::this_thread::sleep_for(absl::ToChronoSeconds(absl::Milliseconds(40)));
callback1.waitFor(absl::Milliseconds(10), 100, 0);
scheduler.stop();
absl::Duration elapsed = absl::Now() - now;
size_t count1 = callback1.m_execCount;
size_t count2 = callback2.m_execCount;
if(elapsed < absl::Milliseconds(60))
{
ASSERT_EQ(1u, count1);
ASSERT_EQ(0u, count2);
}
else
{
ASSERT_LE(1u, count1);
}
callback1.waitFor(absl::Milliseconds(10), 100, 0);
size_t count = callback1.m_execCount;
ASSERT_EQ(count1, count);
count = callback2.m_execCount;
ASSERT_EQ(count2, count);
if(count2 == 0)
{
// callback2 not executed
scheduler.start();
std::this_thread::yield();
std::this_thread::sleep_for(absl::ToChronoSeconds(absl::Milliseconds(40)));
callback2.waitFor(absl::Milliseconds(10), 100, count2);
count = callback2.m_execCount;
ASSERT_LE(count2 + 1, count);
}
else
{
ASSERT_LT(absl::Milliseconds(60), elapsed);
}
}
TEST(SchedulerTest, cancelAllRepeats)
{
Scheduler scheduler;
scheduler.start();
TestCallback callback1, callback2;
const Scheduler::Handle handle1 = scheduler.scheduleRepeat(
absl::Milliseconds(30), std::bind(&TestCallback::callback, &callback1));
const Scheduler::Handle handle2 = scheduler.scheduleRepeat(
absl::Milliseconds(30), std::bind(&TestCallback::callback, &callback2));
scheduler.cancelAllRepeats();
ASSERT_FALSE(scheduler.cancelRepeat(handle1));
ASSERT_FALSE(scheduler.cancelRepeat(handle2));
const size_t count1 = callback1.m_execCount;
const size_t count2 = callback2.m_execCount;
std::this_thread::yield();
std::this_thread::sleep_for(absl::ToChronoSeconds(absl::Milliseconds(100)));
size_t count = callback1.m_execCount;
ASSERT_EQ(count1, count);
count = callback2.m_execCount;
ASSERT_EQ(count2, count);
scheduler.stop();
}
Loading…
Cancel
Save