more docs

more logging
pull/1/head
Jeff Becker 6 years ago
parent d8c42ff244
commit a00a0622d1
No known key found for this signature in database
GPG Key ID: F357B3B42F6F9B05

@ -6,16 +6,32 @@ H(x) is 512 bit blake2b digest of x
HS(x) is 256 bit blake2b digest of x
MD(x, k) is 512 bit blake2b hmac of x with secret value k
MDS(x, k) is 256 bit blake2b hmac of x with secret value k
NE(k, x) is sntrup4591761 encrypt data x to public key k
ND(k, x) is sntrup4591761 decrypt data x with private key k
SE(k, n, x) is chacha20 encrypt data x using symettric key k and nounce n
SD(k, n, x) is chacha20 dectypt data x using symettric key k and nounce n
S(k, x) is sign x with ed25519 using seed k
ECKG() is generate ec keypair (p, s) public key p, seed s, both 32 bytes
S(k, x) is sign x with ed25519 using secret key k
EDKG() is generate ec keypair (p, s) public key p (32 bytes), secret key s (643 bytes)
V(k, x, sig) is verify x data using signature sig using public key k
DH(x, y) is a ecdh key exchange using ed25519 scalarmult between public keys x
and y
KE(x, y) is a ecdh key exchange using HS(x + y + DH(x, y))
PKE(x, y, n) is a path key exchange using MDS(n, KE(x, y))
TKE(x, y, n) is a transport key exchange using MDS(n, KE(x, y))
EDDH(a, b) is curve25519 scalar multiplication of a and b
HKE(a, b, x) is hashed key exchange between a and b using a secret key x HS(a + b + EDDH(x, b))
TKE(a, b, sk, n) is a transport shared secret kdf using MDS(n, HKE(a, b, sk))
when A is client and B is server where n is a 32 bytes shared random
client computes TKE(A.pk, B.pk, A.sk, n)
server computes TKE(A.pk, B.pk, B.sk, n)
PDH(a, b, x) is path shared secret generation HS(a + b + curve41417_scalar_mult(x, b))
PKE(a, b, x, n) is a path shared secret kdf using MDS(n, PDH(a, b, x))
given A is the path creator and B is a hop in the path and n is 32 bytes shared random
A computes PKE(A.pk, B.pk, A.sk, n) as S_a
B computes PKE(A.pk, B.pk, B.sk, n) as S_b
S_a is equal to S_b
RAND(n) is n random bytes

@ -120,7 +120,8 @@ router's full identity
{
a: [ one, or, many, AI, here ... ],
k: "<32 bytes public signing/encryption identity key>",
k: "<32 bytes public long term identity signing key>",
p: "<32 bytes public path encryption key>",
u: last_updated_seconds_since_epoch_uint64,
v: 0,
x: [ Exit, Infos ],
@ -140,7 +141,6 @@ if x is included it MUST be less than or equal to 16 bytes, any larger and it is
considered invalid.
{
n: "<optional claimed name>",
s: "<32 bytes public signing key>",
v: 0,
x: "<optional nounce for vanity>"
@ -163,7 +163,7 @@ v is the protocol version
x is the timestamp seconds since epoch that this introducer expires at
{
i: "<32 bytes public key of router>",
i: "<32 bytes public identity key of router>",
p: path_id_uint64,
v: 0,
x: time_expires_seconds_since_epoch_uint64
@ -181,7 +181,7 @@ service's signing key.
{
a: SI,
e: "<1218 bytes ntru public encryption key>",
e: "<52 bytes curve41417 public encryption key>",
i: [ I, I, I, ... ],
v: 0,
z: "<64 bytes signature using service info signing key>"

@ -133,69 +133,62 @@ namespace llarp
void
Context::SigINT()
{
if(logic)
llarp_logic_stop(logic);
if(mainloop)
llarp_ev_loop_stop(mainloop);
for(auto &t : netio_threads)
{
t.join();
}
netio_threads.clear();
Close();
}
void
Context::Close()
{
progress();
if(mainloop)
llarp_ev_loop_stop(mainloop);
llarp::Debug(__FILE__, "stop router");
if(router)
llarp_stop_router(router);
progress();
llarp::Debug(__FILE__, "stop workers");
if(worker)
llarp_threadpool_stop(worker);
progress();
llarp::Debug(__FILE__, "join workers");
if(worker)
llarp_threadpool_join(worker);
progress();
llarp::Debug(__FILE__, "stop logic");
if(logic)
llarp_logic_stop(logic);
progress();
if(router)
llarp_stop_router(router);
progress();
llarp_free_router(&router);
progress();
llarp::Debug(__FILE__, "free config");
llarp_free_config(&config);
progress();
llarp_ev_loop_free(&mainloop);
progress();
llarp::Debug(__FILE__, "free workers");
llarp_free_threadpool(&worker);
progress();
llarp::Debug(__FILE__, "free nodedb");
llarp_nodedb_free(&nodedb);
llarp_free_logic(&logic);
for(size_t i = 0; i < netio_threads.size(); ++i)
{
if(mainloop)
{
llarp::Debug(__FILE__, "stopping event loop thread ", i);
llarp_ev_loop_stop(mainloop);
}
}
progress();
llarp_nodedb_free(&nodedb);
llarp::Debug(__FILE__, "free router");
llarp_free_router(&router);
llarp::Debug(__FILE__, "free logic");
llarp_free_logic(&logic);
for(auto &t : netio_threads)
{
progress();
llarp::Debug(__FILE__, "join netio thread");
t.join();
}
progress();
netio_threads.clear();
out << std::endl << "done" << std::endl;
llarp::Debug(__FILE__, "free mainloop");
llarp_ev_loop_free(&mainloop);
}
bool
@ -217,7 +210,7 @@ struct llarp_main *
llarp_main_init(const char *fname)
{
if(!fname)
return nullptr;
fname = "daemon.ini";
llarp_main *m = new llarp_main;
m->ctx.reset(new llarp::Context(std::cout));
@ -245,7 +238,6 @@ llarp_main_run(struct llarp_main *ptr)
void
llarp_main_free(struct llarp_main *ptr)
{
ptr->ctx->Close();
delete ptr;
}
}

@ -2,6 +2,7 @@
#define EV_EPOLL_HPP
#include <llarp/buffer.h>
#include <llarp/net.h>
#include <signal.h>
#include <sys/epoll.h>
#include <unistd.h>
#include <cstdio>
@ -60,13 +61,23 @@ namespace llarp
struct llarp_epoll_loop : public llarp_ev_loop
{
int epollfd;
int pipefds[2];
llarp_epoll_loop() : epollfd(-1)
{
pipefds[0] = -1;
pipefds[1] = -1;
}
~llarp_epoll_loop()
{
if(pipefds[0] != -1)
close(pipefds[0]);
if(pipefds[1] != -1)
close(pipefds[1]);
if(epollfd != -1)
close(epollfd);
}
bool
@ -74,7 +85,17 @@ struct llarp_epoll_loop : public llarp_ev_loop
{
if(epollfd == -1)
epollfd = epoll_create1(EPOLL_CLOEXEC);
return epollfd != -1;
if(epollfd != -1)
{
if(pipe(pipefds) == -1)
return false;
epoll_event sig_ev;
sig_ev.data.fd = pipefds[0];
sig_ev.events = EPOLLIN;
return epoll_ctl(epollfd, EPOLL_CTL_ADD, pipefds[0], &sig_ev) != -1;
}
return false;
}
int
@ -91,6 +112,12 @@ struct llarp_epoll_loop : public llarp_ev_loop
int idx = 0;
while(idx < result)
{
// handle signalfd
if(events[idx].data.fd == pipefds[0])
{
llarp::Debug(__FILE__, "exiting epoll loop");
return 0;
}
llarp::ev_io* ev = static_cast< llarp::ev_io* >(events[idx].data.ptr);
if(events[idx].events & EPOLLIN)
{
@ -104,7 +131,7 @@ struct llarp_epoll_loop : public llarp_ev_loop
++idx;
}
}
} while(result != -1);
} while(epollfd != -1);
return result;
}
@ -199,10 +226,9 @@ struct llarp_epoll_loop : public llarp_ev_loop
void
stop()
{
if(epollfd != -1)
::close(epollfd);
epollfd = -1;
int i = 1;
auto val = write(pipefds[1], &i, sizeof(i));
(void)val;
}
};

@ -29,6 +29,8 @@ namespace iwp
// session activity timeout is 10s
constexpr llarp_time_t SESSION_TIMEOUT = 10000;
constexpr size_t MAX_PAD = 256;
enum msgtype
{
eALIV = 0x00,
@ -500,7 +502,6 @@ namespace iwp
struct session
{
llarp_alloc *mem;
llarp_udp_io *udp;
llarp_crypto *crypto;
llarp_async_iwp *iwp;
@ -528,7 +529,7 @@ namespace iwp
frame_state frame;
byte_t token[32];
byte_t workbuf[256];
byte_t workbuf[MAX_PAD + 128];
enum State
{
@ -545,10 +546,9 @@ namespace iwp
State state;
session(llarp_alloc *m, llarp_udp_io *u, llarp_async_iwp *i,
llarp_crypto *c, llarp_logic *l, const byte_t *seckey,
const llarp::Addr &a)
: mem(m), udp(u), crypto(c), iwp(i), logic(l), addr(a), state(eInitial)
session(llarp_udp_io *u, llarp_async_iwp *i, llarp_crypto *c,
llarp_logic *l, const byte_t *seckey, const llarp::Addr &a)
: udp(u), crypto(c), iwp(i), logic(l), addr(a), state(eInitial)
{
if(seckey)
memcpy(eph_seckey, seckey, sizeof(llarp_seckey_t));
@ -680,7 +680,7 @@ namespace iwp
{
session *self = static_cast< session * >(user);
// all zeros means keepalive
byte_t tmp[64] = {0};
byte_t tmp[MAX_PAD + 8] = {0};
// 8 bytes iwp header overhead
int padsz = rand() % (sizeof(tmp) - 8);
auto buf = llarp::StackBuffer< decltype(tmp) >(tmp);
@ -807,7 +807,7 @@ namespace iwp
void
session_start()
{
size_t w2sz = rand() % 32;
size_t w2sz = rand() % MAX_PAD;
start.buf = workbuf;
start.sz = w2sz + (32 * 3);
start.nonce = workbuf + 32;
@ -922,7 +922,7 @@ namespace iwp
void
intro_ack()
{
uint16_t w1sz = rand() % 32;
uint16_t w1sz = rand() % MAX_PAD;
introack.buf = workbuf;
introack.sz = (32 * 3) + w1sz;
// randomize padding
@ -1039,7 +1039,7 @@ namespace iwp
{
memcpy(remote, pub, 32);
intro.buf = workbuf;
size_t w0sz = (rand() % 32);
size_t w0sz = (rand() % MAX_PAD);
intro.sz = (32 * 3) + w0sz;
// randomize w0
if(w0sz)
@ -1076,7 +1076,6 @@ namespace iwp
typedef std::lock_guard< mtx_t > lock_t;
llarp_router *router;
llarp_alloc *mem;
llarp_logic *logic;
llarp_crypto *crypto;
llarp_ev_loop *netloop;
@ -1111,7 +1110,7 @@ namespace iwp
session *
create_session(const llarp::Addr &src, const byte_t *seckey)
{
return new session(mem, &udp, iwp, crypto, logic, seckey, src);
return new session(&udp, iwp, crypto, logic, seckey, src);
}
bool
@ -1269,8 +1268,7 @@ namespace iwp
const void *buf, ssize_t sz)
{
server *link = static_cast< server * >(udp->user);
llarp::Addr addr(*saddr);
session *s = link->ensure_session(addr);
session *s = link->ensure_session(*saddr);
s->recv(buf, sz);
}
@ -1477,6 +1475,8 @@ namespace iwp
{
server *link = static_cast< server * >(l->impl);
link->cancel_timer();
llarp_ev_close_udp(&link->udp);
link->clear_sessions();
return true;
}
@ -1524,8 +1524,6 @@ namespace iwp
link_free(struct llarp_link *l)
{
server *link = static_cast< server * >(l->impl);
llarp_ev_close_udp(&link->udp);
link->clear_sessions();
delete link;
}
}

@ -1,5 +1,6 @@
#include <llarp/logic.h>
#include <llarp/mem.h>
#include "logger.hpp"
struct llarp_logic
{
@ -26,19 +27,26 @@ llarp_free_logic(struct llarp_logic** logic)
{
if(*logic)
{
llarp_free_threadpool(&(*logic)->thread);
llarp_free_timer(&(*logic)->timer);
// llarp_free_timer(&(*logic)->timer);
delete *logic;
*logic = nullptr;
}
*logic = nullptr;
}
void
llarp_logic_stop(struct llarp_logic* logic)
{
llarp_timer_stop(logic->timer);
llarp_threadpool_stop(logic->thread);
llarp_threadpool_join(logic->thread);
llarp::Debug(__FILE__, "logic thread stop");
if(logic->thread)
{
llarp_threadpool_stop(logic->thread);
llarp_threadpool_join(logic->thread);
}
llarp_free_threadpool(&logic->thread);
llarp::Debug(__FILE__, "logic timer stop");
if(logic->timer)
llarp_timer_stop(logic->timer);
}
void

@ -116,10 +116,11 @@ llarp_nodedb_free(struct llarp_nodedb **n)
{
if(*n)
{
(*n)->Clear();
delete *n;
auto i = *n;
*n = nullptr;
i->Clear();
delete i;
}
*n = nullptr;
}
bool

@ -144,10 +144,13 @@ llarp_router::SaveRC()
void
llarp_router::Close()
{
for(auto link : links)
for(auto &link : links)
{
link->stop_link(link);
link->free_impl(link);
delete link;
}
links.clear();
}
void
@ -423,11 +426,6 @@ llarp_free_router(struct llarp_router **router)
{
if(*router)
{
for(auto &link : (*router)->links)
{
link->free_impl(link);
delete link;
}
delete *router;
}
*router = nullptr;

@ -1,6 +1,7 @@
#include "threadpool.hpp"
#include <pthread.h>
#include <cstring>
#include "logger.hpp"
namespace llarp
{
@ -45,7 +46,6 @@ namespace llarp
stop = true;
}
condition.notify_all();
done.notify_all();
}
void
@ -54,6 +54,7 @@ namespace llarp
for(auto &t : threads)
t.join();
threads.clear();
done.notify_all();
}
void
@ -97,6 +98,7 @@ llarp_init_threadpool(int workers, const char *name)
void
llarp_threadpool_join(struct llarp_threadpool *pool)
{
llarp::Debug(__FILE__, "threadpool join");
pool->impl.Join();
}
@ -108,6 +110,7 @@ llarp_threadpool_start(struct llarp_threadpool *pool)
void
llarp_threadpool_stop(struct llarp_threadpool *pool)
{
llarp::Debug(__FILE__, "threadpool stop");
pool->impl.Stop();
}
@ -115,6 +118,7 @@ void
llarp_threadpool_wait(struct llarp_threadpool *pool)
{
std::mutex mtx;
llarp::Debug(__FILE__, "threadpool wait");
{
std::unique_lock< std::mutex > lock(mtx);
pool->impl.done.wait(lock);
@ -133,7 +137,6 @@ llarp_free_threadpool(struct llarp_threadpool **pool)
{
if(*pool)
{
(*pool)->impl.Join();
delete *pool;
}
*pool = nullptr;

@ -4,6 +4,8 @@
#include <list>
#include <map>
#include "logger.hpp"
namespace llarp
{
struct timer
@ -35,6 +37,16 @@ namespace llarp
{
}
timer(const timer& other)
{
parent = other.parent;
user = other.user;
started = other.started;
timeout = other.timeout;
func = other.func;
id = other.id;
}
void
exec();
@ -59,29 +71,34 @@ struct llarp_timer_context
std::condition_variable ticker;
std::chrono::milliseconds nextTickLen = std::chrono::milliseconds(10);
uint32_t ids = 0;
std::atomic< bool > _run = true;
uint32_t ids = 0;
bool _run = true;
bool
run()
{
return _run.load();
return _run;
}
void
stop()
{
_run.store(false);
_run = false;
}
void
cancel(uint32_t id)
{
auto itr = timers.find(id);
if(itr != timers.end())
llarp::timer t;
{
itr->second.exec();
std::unique_lock< std::mutex > lock(timersMutex);
auto itr = timers.find(id);
if(itr == timers.end())
return;
t = itr->second;
timers.erase(itr);
}
t.exec();
}
void
@ -98,7 +115,7 @@ struct llarp_timer_context
{
std::unique_lock< std::mutex > lock(timersMutex);
uint32_t id = ++ids;
timers[id] = llarp::timer(this, timeout_ms, user, func, id);
timers.emplace(id, llarp::timer(this, timeout_ms, user, func, id));
return id;
}
@ -155,18 +172,13 @@ llarp_timer_remove_job(struct llarp_timer_context* t, uint32_t id)
void
llarp_timer_stop(struct llarp_timer_context* t)
{
// destroy all timers
// don't call callbacks on timers
llarp::Debug(__FILE__, "clear timers");
t->timers.clear();
t->stop();
{
std::unique_lock< std::mutex > lock(t->timersMutex);
// destroy all timers
// don't call callbacks on timers
auto itr = t->timers.begin();
while(itr != t->timers.end())
{
itr = t->timers.erase(itr);
}
}
llarp::Debug(__FILE__, "stop timers");
t->ticker.notify_all();
}
void
@ -181,19 +193,23 @@ llarp_timer_run(struct llarp_timer_context* t, struct llarp_threadpool* pool)
t->threadpool = pool;
while(t->run())
{
std::unique_lock< std::mutex > lock(t->timersMutex);
t->ticker.wait_for(lock, t->nextTickLen);
// we woke up
auto now = llarp::timer::now();
auto itr = t->timers.begin();
while(itr != t->timers.end())
{
if(now - itr->second.started >= itr->second.timeout)
std::unique_lock< std::mutex > lock(t->timersMutex);
t->ticker.wait_for(lock, t->nextTickLen,
[t]() -> bool { return t->timers.size() == 0; });
// we woke up
auto now = llarp::timer::now();
auto itr = t->timers.begin();
while(itr != t->timers.end())
{
// timer hit
llarp_threadpool_queue_job(pool, itr->second);
if(now - itr->second.started >= itr->second.timeout)
{
// timer hit
llarp_threadpool_queue_job(pool, itr->second);
}
++itr;
}
++itr;
}
}
}

Loading…
Cancel
Save