From c2106b674ca632f7c0bffd7cab4b1940f74d353c Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Thu, 11 Feb 2016 21:53:55 +0100 Subject: Unix: Fix bug in syslog name handling Pointer to current_log_name has to be changed even if the name is the same, because the old one will be invalid/freed after reconfiguration. --- sysdep/unix/log.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'sysdep') diff --git a/sysdep/unix/log.c b/sysdep/unix/log.c index 7cb26360..b90bbbd2 100644 --- a/sysdep/unix/log.c +++ b/sysdep/unix/log.c @@ -284,17 +284,18 @@ log_switch(int debug, list *l, char *new_syslog_name) current_log_list = l; #ifdef HAVE_SYSLOG - if (current_syslog_name && new_syslog_name && - !strcmp(current_syslog_name, new_syslog_name)) + char *old_syslog_name = current_syslog_name; + current_syslog_name = new_syslog_name; + + if (old_syslog_name && new_syslog_name && + !strcmp(old_syslog_name, new_syslog_name)) return; - if (current_syslog_name) + if (old_syslog_name) closelog(); if (new_syslog_name) openlog(new_syslog_name, LOG_CONS | LOG_NDELAY, LOG_DAEMON); - - current_syslog_name = new_syslog_name; #endif } -- cgit v1.2.3 From e1c13a5a7b86f2ba09178300bad960224658c833 Mon Sep 17 00:00:00 2001 From: Jan Moskyto Matejka Date: Wed, 9 Mar 2016 12:12:02 +0100 Subject: Unix: Rework of select-loop to poll-loop This should lift the limit of FD_SETSIZE and allow more than 1024 fd's. FD_SETSIZE limit doesn't matter now when creating new sockets. --- sysdep/unix/io.c | 101 ++++++++++++++++++++++++++----------------------------- 1 file changed, 47 insertions(+), 54 deletions(-) (limited to 'sysdep') diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index b636e799..bc212de1 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -41,12 +42,12 @@ #include "lib/sysio.h" /* Maximum number of calls of tx handler for one socket in one - * select iteration. Should be small enough to not monopolize CPU by + * poll iteration. Should be small enough to not monopolize CPU by * one protocol instance. */ #define MAX_STEPS 4 -/* Maximum number of calls of rx handler for all sockets in one select +/* Maximum number of calls of rx handler for all sockets in one poll iteration. RX callbacks are often much more costly so we limit this to gen small latencies */ #define MAX_RX_STEPS 4 @@ -1022,7 +1023,6 @@ sk_log_error(sock *s, const char *p) static list sock_list; static struct birdsock *current_sock; static struct birdsock *stored_sock; -static int sock_recalc_fdsets_p; static inline sock * sk_next(sock *s) @@ -1078,7 +1078,6 @@ sk_free(resource *r) if (s == stored_sock) stored_sock = sk_next(s); rem_node(&s->n); - sock_recalc_fdsets_p = 1; } } @@ -1276,7 +1275,6 @@ static void sk_insert(sock *s) { add_tail(&sock_list, &s->n); - sock_recalc_fdsets_p = 1; } static void @@ -1328,18 +1326,6 @@ sk_passive_connected(sock *s, int type) log(L_WARN "SOCK: Cannot get remote IP address for TCP<"); } - if (fd >= FD_SETSIZE) - { - /* FIXME: Call err_hook instead ? */ - log(L_ERR "SOCK: Incoming connection from %I%J (port %d) %s", - t->daddr, ipa_is_link_local(t->daddr) ? t->iface : NULL, - t->dport, "rejected due to FD_SETSIZE limit"); - close(fd); - t->fd = -1; - rfree(t); - return 1; - } - if (sk_setup(t) < 0) { /* FIXME: Call err_hook instead ? */ @@ -1416,9 +1402,6 @@ sk_open(sock *s) if (fd < 0) ERR("socket"); - if (fd >= FD_SETSIZE) - ERR2("FD_SETSIZE limit reached"); - s->af = af; s->fd = fd; @@ -2062,15 +2045,15 @@ static int short_loops = 0; void io_loop(void) { - fd_set rd, wr; - struct timeval timo; + int poll_tout; time_t tout; - int hi, events; + int nfds, events; sock *s; node *n; + int fdmax = 256; + struct pollfd *pfd = xmalloc(fdmax * sizeof(struct pollfd)); watchdog_start1(); - sock_recalc_fdsets_p = 1; for(;;) { events = ev_run_list(&global_event_list); @@ -2081,43 +2064,43 @@ io_loop(void) tm_shot(); continue; } - timo.tv_sec = events ? 0 : MIN(tout - now, 3); - timo.tv_usec = 0; + poll_tout = (events ? 0 : MIN(tout - now, 3)) * 1000; /* Time in milliseconds */ io_close_event(); - if (sock_recalc_fdsets_p) - { - sock_recalc_fdsets_p = 0; - FD_ZERO(&rd); - FD_ZERO(&wr); - } - - hi = 0; + nfds = 0; WALK_LIST(n, sock_list) { + pfd[nfds] = (struct pollfd) { .fd = -1 }; /* everything other set to 0 by this */ s = SKIP_BACK(sock, n, n); if (s->rx_hook) { - FD_SET(s->fd, &rd); - if (s->fd > hi) - hi = s->fd; + pfd[nfds].fd = s->fd; + pfd[nfds].events |= POLLIN; } - else - FD_CLR(s->fd, &rd); if (s->tx_hook && s->ttx != s->tpos) { - FD_SET(s->fd, &wr); - if (s->fd > hi) - hi = s->fd; + pfd[nfds].fd = s->fd; + pfd[nfds].events |= POLLOUT; + } + if (pfd[nfds].fd != -1) + { + s->index = nfds; + nfds++; } else - FD_CLR(s->fd, &wr); + s->index = -1; + + if (nfds >= fdmax) + { + fdmax *= 2; + pfd = xrealloc(pfd, fdmax * sizeof(struct pollfd)); + } } /* * Yes, this is racy. But even if the signal comes before this test - * and entering select(), it gets caught on the next timer tick. + * and entering poll(), it gets caught on the next timer tick. */ if (async_config_flag) @@ -2142,18 +2125,18 @@ io_loop(void) continue; } - /* And finally enter select() to find active sockets */ + /* And finally enter poll() to find active sockets */ watchdog_stop(); - hi = select(hi+1, &rd, &wr, NULL, &timo); + events = poll(pfd, nfds, poll_tout); watchdog_start(); - if (hi < 0) + if (events < 0) { if (errno == EINTR || errno == EAGAIN) continue; - die("select: %m"); + die("poll: %m"); } - if (hi) + if (events) { /* guaranteed to be non-empty */ current_sock = SKIP_BACK(sock, n, HEAD(sock_list)); @@ -2161,11 +2144,17 @@ io_loop(void) while (current_sock) { sock *s = current_sock; + if (s->index == -1) + { + current_sock = sk_next(s); + goto next; + } + int e; int steps; steps = MAX_STEPS; - if ((s->type >= SK_MAGIC) && FD_ISSET(s->fd, &rd) && s->rx_hook) + if ((s->type >= SK_MAGIC) && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) do { steps--; @@ -2177,7 +2166,7 @@ io_loop(void) while (e && s->rx_hook && steps); steps = MAX_STEPS; - if (FD_ISSET(s->fd, &wr)) + if (pfd[s->index].revents & POLLOUT) do { steps--; @@ -2204,13 +2193,17 @@ io_loop(void) while (current_sock && count < MAX_RX_STEPS) { sock *s = current_sock; - int e UNUSED; + if (s->index == -1) + { + current_sock = sk_next(s); + goto next2; + } - if ((s->type < SK_MAGIC) && FD_ISSET(s->fd, &rd) && s->rx_hook) + if ((s->type < SK_MAGIC) && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) { count++; io_log_event(s->rx_hook, s->data); - e = sk_read(s); + sk_read(s); if (s != current_sock) goto next2; } -- cgit v1.2.3 From fd926ed4eea319b94bd0e09e093b90846bcb169b Mon Sep 17 00:00:00 2001 From: Jan Moskyto Matejka Date: Tue, 15 Mar 2016 14:57:49 +0100 Subject: Poll: Prevent the improbable case of EAGAIN after POLLIN --- proto/bfd/io.c | 4 ++-- sysdep/unix/io.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'sysdep') diff --git a/proto/bfd/io.c b/proto/bfd/io.c index fb150040..79ed9af7 100644 --- a/proto/bfd/io.c +++ b/proto/bfd/io.c @@ -576,7 +576,7 @@ sockets_close_fds(struct birdloop *loop) loop->close_scheduled = 0; } -int sk_read(sock *s); +int sk_read(sock *s, int revents); int sk_write(sock *s); static void @@ -605,7 +605,7 @@ sockets_fire(struct birdloop *loop) if (pfd->revents & POLLIN) while (e && *psk && (*psk)->rx_hook) - e = sk_read(*psk); + e = sk_read(*psk, 0); e = 1; if (pfd->revents & POLLOUT) diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index bc212de1..b769de58 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -1760,7 +1760,7 @@ sk_send_full(sock *s, unsigned len, struct iface *ifa, /* sk_read() and sk_write() are called from BFD's event loop */ int -sk_read(sock *s) +sk_read(sock *s, int revents) { switch (s->type) { @@ -1779,6 +1779,11 @@ sk_read(sock *s) { if (errno != EINTR && errno != EAGAIN) s->err_hook(s, errno); + else if (errno == EAGAIN && !(revents & POLLIN)) + { + log(L_ERR "Got EAGAIN from read when revents=%x (without POLLIN)", revents); + s->err_hook(s, 0); + } } else if (!c) s->err_hook(s, 0); @@ -2159,7 +2164,7 @@ io_loop(void) { steps--; io_log_event(s->rx_hook, s->data); - e = sk_read(s); + e = sk_read(s, pfd[s->index].revents); if (s != current_sock) goto next; } @@ -2203,7 +2208,7 @@ io_loop(void) { count++; io_log_event(s->rx_hook, s->data); - sk_read(s); + sk_read(s, pfd[s->index].revents); if (s != current_sock) goto next2; } -- cgit v1.2.3 From 9c92f69272de3795f7289969e815d99a93d0d9b3 Mon Sep 17 00:00:00 2001 From: Jan Moskyto Matejka Date: Fri, 18 Mar 2016 11:44:28 +0100 Subject: Unix: Substituted select -> poll also in congestion checker It does strange things when even one fd larger than FD_SETSIZE is passed to select(). --- sysdep/unix/io.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'sysdep') diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index b769de58..eb1c1cad 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -1679,19 +1679,12 @@ sk_maybe_write(sock *s) int sk_rx_ready(sock *s) { - fd_set rd, wr; - struct timeval timo; int rv; - - FD_ZERO(&rd); - FD_ZERO(&wr); - FD_SET(s->fd, &rd); - - timo.tv_sec = 0; - timo.tv_usec = 0; + struct pollfd pfd = { .fd = s->fd }; + pfd.events |= POLLIN; redo: - rv = select(s->fd+1, &rd, &wr, NULL, &timo); + rv = poll(&pfd, 1, 0); if ((rv < 0) && (errno == EINTR || errno == EAGAIN)) goto redo; -- cgit v1.2.3 From ea0a8be2ff5afb8385a69cc0df70984e0fd3a570 Mon Sep 17 00:00:00 2001 From: Jan Moskyto Matejka Date: Wed, 30 Mar 2016 16:21:32 +0200 Subject: IO/Poll: fix mistaken variable merge The events variable is used in the short loop decision. The reasons are not much clear, keeping this to keep the former behaviour. --- sysdep/unix/io.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'sysdep') diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index eb1c1cad..112ebe91 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -2045,7 +2045,7 @@ io_loop(void) { int poll_tout; time_t tout; - int nfds, events; + int nfds, events, pout; sock *s; node *n; int fdmax = 256; @@ -2125,16 +2125,16 @@ io_loop(void) /* And finally enter poll() to find active sockets */ watchdog_stop(); - events = poll(pfd, nfds, poll_tout); + pout = poll(pfd, nfds, poll_tout); watchdog_start(); - if (events < 0) + if (pout < 0) { if (errno == EINTR || errno == EAGAIN) continue; die("poll: %m"); } - if (events) + if (pout) { /* guaranteed to be non-empty */ current_sock = SKIP_BACK(sock, n, HEAD(sock_list)); -- cgit v1.2.3 From e86cfd41d975122cc944db68383aef4028da9575 Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Wed, 23 Mar 2016 18:25:15 +0100 Subject: KRT: Fix route learn scan when route changed When a kernel route changed, function krt_learn_scan() noticed that and replaced the route in internal kernel FIB, but after that, function krt_learn_prune() failed to propagate the new route to the nest, because it confused the new route with the (removed) old best route and decided that the best route did not changed. Wow, the original code (and the bug) is almost 17 years old. --- nest/route.h | 2 +- sysdep/bsd/krt-sock.c | 5 ++--- sysdep/linux/netlink.c | 3 ++- sysdep/unix/krt.c | 46 +++++++++++++++++++++++++++++----------------- 4 files changed, 34 insertions(+), 22 deletions(-) (limited to 'sysdep') diff --git a/nest/route.h b/nest/route.h index c435b9e0..9368808f 100644 --- a/nest/route.h +++ b/nest/route.h @@ -223,8 +223,8 @@ typedef struct rte { struct { /* Routes generated by krt sync (both temporary and inherited ones) */ s8 src; /* Alleged route source (see krt.h) */ u8 proto; /* Kernel source protocol ID */ - u8 type; /* Kernel route type */ u8 seen; /* Seen during last scan */ + u8 best; /* Best route in network, propagated to core */ u32 metric; /* Kernel metric */ } krt; } u; diff --git a/sysdep/bsd/krt-sock.c b/sysdep/bsd/krt-sock.c index 29203d1b..6ff3b2b7 100644 --- a/sysdep/bsd/krt-sock.c +++ b/sysdep/bsd/krt-sock.c @@ -493,9 +493,8 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan) e->net = net; e->u.krt.src = src; e->u.krt.proto = src2; - - /* These are probably too Linux-specific */ - e->u.krt.type = 0; + e->u.krt.seen = 0; + e->u.krt.best = 0; e->u.krt.metric = 0; if (scan) diff --git a/sysdep/linux/netlink.c b/sysdep/linux/netlink.c index 640d1877..1ffdff07 100644 --- a/sysdep/linux/netlink.c +++ b/sysdep/linux/netlink.c @@ -1102,7 +1102,8 @@ nl_parse_route(struct nlmsghdr *h, int scan) e->net = net; e->u.krt.src = src; e->u.krt.proto = i->rtm_protocol; - e->u.krt.type = i->rtm_type; + e->u.krt.seen = 0; + e->u.krt.best = 0; e->u.krt.metric = 0; if (a[RTA_PRIORITY]) diff --git a/sysdep/unix/krt.c b/sysdep/unix/krt.c index 5e78586b..f5dee877 100644 --- a/sysdep/unix/krt.c +++ b/sysdep/unix/krt.c @@ -417,46 +417,58 @@ again: net *n = (net *) f; rte *e, **ee, *best, **pbest, *old_best; - old_best = n->routes; + /* + * Note that old_best may be NULL even if there was an old best route in + * the previous step, because it might be replaced in krt_learn_scan(). + * But in that case there is a new valid best route. + */ + + old_best = NULL; best = NULL; pbest = NULL; ee = &n->routes; while (e = *ee) { + if (e->u.krt.best) + old_best = e; + if (!e->u.krt.seen) { *ee = e->next; rte_free(e); continue; } + if (!best || best->u.krt.metric > e->u.krt.metric) { best = e; pbest = ee; } + e->u.krt.seen = 0; + e->u.krt.best = 0; ee = &e->next; } if (!n->routes) { DBG("%I/%d: deleting\n", n->n.prefix, n->n.pxlen); if (old_best) - { - krt_learn_announce_delete(p, n); - n->n.flags &= ~KRF_INSTALLED; - } + krt_learn_announce_delete(p, n); + FIB_ITERATE_PUT(&fit, f); fib_delete(fib, f); goto again; } + + best->u.krt.best = 1; *pbest = best->next; best->next = n->routes; n->routes = best; - if (best != old_best || !(n->n.flags & KRF_INSTALLED) || p->reload) + + if ((best != old_best) || p->reload) { DBG("%I/%d: announcing (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric); krt_learn_announce_update(p, best); - n->n.flags |= KRF_INSTALLED; } else DBG("%I/%d: uptodate (metric=%d)\n", n->n.prefix, n->n.pxlen, best->u.krt.metric); @@ -515,31 +527,31 @@ krt_learn_async(struct krt_proto *p, rte *e, int new) best = n->routes; bestp = &n->routes; for(gg=&n->routes; g=*gg; gg=&g->next) + { if (best->u.krt.metric > g->u.krt.metric) { best = g; bestp = gg; } + + g->u.krt.best = 0; + } + if (best) { + best->u.krt.best = 1; *bestp = best->next; best->next = n->routes; n->routes = best; } + if (best != old_best) { DBG("krt_learn_async: distributing change\n"); if (best) - { - krt_learn_announce_update(p, best); - n->n.flags |= KRF_INSTALLED; - } + krt_learn_announce_update(p, best); else - { - n->routes = NULL; - krt_learn_announce_delete(p, n); - n->n.flags &= ~KRF_INSTALLED; - } + krt_learn_announce_delete(p, n); } } @@ -564,7 +576,7 @@ krt_dump(struct proto *P) static void krt_dump_attrs(rte *e) { - debug(" [m=%d,p=%d,t=%d]", e->u.krt.metric, e->u.krt.proto, e->u.krt.type); + debug(" [m=%d,p=%d]", e->u.krt.metric, e->u.krt.proto); } #endif -- cgit v1.2.3 From 9e7b3ebdf9556d7464911dd39e862b1c003319b3 Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Wed, 6 Apr 2016 11:49:34 +0200 Subject: IO: Replace RX priority heuristic with explicit mark In BIRD, RX has lower priority than TX with the exception of RX from control socket. The patch replaces heuristic based on socket type with explicit mark and uses it for both control socket and BGP session waiting to be established. This should avoid an issue when during heavy load, outgoing connection could connect (TX event), send open, but then failed to receive OPEN / establish in time, not sending notifications between and therefore got hold timer expired error from the neighbor immediately after it finally established the connection. --- lib/socket.h | 1 + proto/bgp/bgp.c | 3 +++ sysdep/unix/io.c | 4 ++-- sysdep/unix/main.c | 2 ++ 4 files changed, 8 insertions(+), 2 deletions(-) (limited to 'sysdep') diff --git a/lib/socket.h b/lib/socket.h index fbea92aa..0327e9e5 100644 --- a/lib/socket.h +++ b/lib/socket.h @@ -28,6 +28,7 @@ typedef struct birdsock { struct iface *iface; /* Interface; specify this for broad/multicast sockets */ byte *rbuf, *rpos; /* NULL=allocate automatically */ + uint fast_rx; /* RX has higher priority in event loop */ uint rbsize; int (*rx_hook)(struct birdsock *, int size); /* NULL=receiving turned off, returns 1 to clear rx buffer */ diff --git a/proto/bgp/bgp.c b/proto/bgp/bgp.c index 11f33b14..7328cb79 100644 --- a/proto/bgp/bgp.c +++ b/proto/bgp/bgp.c @@ -374,6 +374,8 @@ bgp_conn_enter_established_state(struct bgp_conn *conn) if (ipa_zero(p->source_addr)) p->source_addr = conn->sk->saddr; + conn->sk->fast_rx = 0; + p->conn = conn; p->last_error_class = 0; p->last_error_code = 0; @@ -696,6 +698,7 @@ bgp_setup_sk(struct bgp_conn *conn, sock *s) { s->data = conn; s->err_hook = bgp_sock_err; + s->fast_rx = 1; conn->sk = s; } diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index 112ebe91..078fe73c 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -2152,7 +2152,7 @@ io_loop(void) int steps; steps = MAX_STEPS; - if ((s->type >= SK_MAGIC) && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) + if (s->fast_rx && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) do { steps--; @@ -2197,7 +2197,7 @@ io_loop(void) goto next2; } - if ((s->type < SK_MAGIC) && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) + if (!s->fast_rx && (pfd[s->index].revents & (POLLIN | POLLHUP | POLLERR)) && s->rx_hook) { count++; io_log_event(s->rx_hook, s->data); diff --git a/sysdep/unix/main.c b/sysdep/unix/main.c index 24d34f60..5d5586a0 100644 --- a/sysdep/unix/main.c +++ b/sysdep/unix/main.c @@ -450,6 +450,7 @@ cli_connect(sock *s, int size UNUSED) s->err_hook = cli_err; s->data = c = cli_new(s); s->pool = c->pool; /* We need to have all the socket buffers allocated in the cli pool */ + s->fast_rx = 1; c->rx_pos = c->rx_buf; c->rx_aux = NULL; rmove(s, c->pool); @@ -466,6 +467,7 @@ cli_init_unix(uid_t use_uid, gid_t use_gid) s->type = SK_UNIX_PASSIVE; s->rx_hook = cli_connect; s->rbsize = 1024; + s->fast_rx = 1; /* Return value intentionally ignored */ unlink(path_control_socket); -- cgit v1.2.3 From bd22d7f41d37dec8f7b8b845f6a18c775e66ddfc Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Wed, 6 Apr 2016 11:57:28 +0200 Subject: IO: Avoid multiple event cycles in one loop cycle. Event cycle may took too much time and trigger next timer events, so avoid cycling between timer and event cycles inside the loop cycle. --- proto/bgp/bgp.c | 4 ++++ sysdep/unix/io.c | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'sysdep') diff --git a/proto/bgp/bgp.c b/proto/bgp/bgp.c index 7328cb79..94c8e5c2 100644 --- a/proto/bgp/bgp.c +++ b/proto/bgp/bgp.c @@ -668,6 +668,10 @@ bgp_keepalive_timeout(timer *t) DBG("BGP: Keepalive timer\n"); bgp_schedule_packet(conn, PKT_KEEPALIVE); + + /* Kick TX a bit faster */ + if (ev_active(conn->tx_ev)) + ev_run(conn->tx_ev); } static void diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index 078fe73c..5955dbfe 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -2055,12 +2055,13 @@ io_loop(void) for(;;) { events = ev_run_list(&global_event_list); + timers: update_times(); tout = tm_first_shot(); if (tout <= now) { tm_shot(); - continue; + goto timers; } poll_tout = (events ? 0 : MIN(tout - now, 3)) * 1000; /* Time in milliseconds */ -- cgit v1.2.3