lib: fix monitor-based polling support

This commit is contained in:
Philippe Gerum 2019-06-18 12:26:14 +02:00
parent e60078a3eb
commit 41aa0ddcce
5 changed files with 236 additions and 8 deletions

View File

@ -5,6 +5,7 @@
*/
#include <stdarg.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <time.h>
@ -151,6 +152,11 @@ static int try_wait(struct evl_monitor_state *state)
return val;
}
static inline bool is_polled(struct evl_monitor_state *state)
{
return !!atomic_read(&state->u.event.pollrefs);
}
int evl_timedwait_flags(struct evl_flags *flg,
const struct timespec *timeout, int *r_bits)
{
@ -173,7 +179,7 @@ int evl_timedwait_flags(struct evl_flags *flg,
*/
state = flg->active.state;
mode = evl_get_current_mode();
if (!(mode & (T_INBAND|T_WEAK|T_DEBUG))) {
if (!(mode & (T_INBAND|T_WEAK|T_DEBUG)) && !is_polled(state)) {
ret = try_wait(state);
if (ret) {
*r_bits = ret;
@ -235,9 +241,18 @@ int evl_post_flags(struct evl_flags *flg, int bits)
if (!bits)
return -EINVAL;
/*
* Unlike with gated event, we have no raceless way to detect
* that somebody is waiting on the flag group from userland,
* so we do a kernel entry each time a zero->non-zero
* transition is observed for the value. Fortunately, having
* some thread(s) already waiting for a flag to be posted is
* the most likely situation, so such entry will be required
* in most cases anyway.
*/
state = flg->active.state;
val = atomic_read(&state->u.event.value);
if (!val) {
if (!val || is_polled(state)) {
slow_path:
if (evl_get_current())
ret = oob_ioctl(flg->active.efd, EVL_MONIOC_SIGNAL, &mask);
@ -250,9 +265,14 @@ int evl_post_flags(struct evl_flags *flg, int bits)
prev = val;
next = prev | bits;
val = atomic_cmpxchg(&state->u.event.value, prev, next);
/* Check if somebody sneaked in the wait queue. */
if (!val)
goto slow_path;
if (is_polled(state)) {
/* If swap happened, just trigger a wakeup. */
if (val == prev)
mask = 0;
goto slow_path;
}
} while (val != prev);
return 0;

View File

@ -5,6 +5,7 @@
*/
#include <stdarg.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <time.h>
@ -219,10 +220,16 @@ int evl_tryget_sem(struct evl_sem *sem)
return try_get(sem->active.state);
}
static inline bool is_polled(struct evl_monitor_state *state)
{
return !!atomic_read(&state->u.event.pollrefs);
}
int evl_put_sem(struct evl_sem *sem)
{
struct evl_monitor_state *state;
int val, prev, next, ret;
__s32 sigval = 1;
ret = check_sanity(sem);
if (ret)
@ -230,13 +237,13 @@ int evl_put_sem(struct evl_sem *sem)
state = sem->active.state;
val = atomic_read(&state->u.event.value);
if (val < 0) {
if (val < 0 || is_polled(state)) {
slow_path:
if (evl_get_current())
ret = oob_ioctl(sem->active.efd, EVL_MONIOC_SIGNAL, NULL);
ret = oob_ioctl(sem->active.efd, EVL_MONIOC_SIGNAL, &sigval);
else
/* In-band threads may post pended sema4s. */
ret = ioctl(sem->active.efd, EVL_MONIOC_SIGNAL, NULL);
ret = ioctl(sem->active.efd, EVL_MONIOC_SIGNAL, &sigval);
return ret ? -errno : 0;
}
@ -244,9 +251,19 @@ int evl_put_sem(struct evl_sem *sem)
prev = val;
next = prev + 1;
val = atomic_cmpxchg(&state->u.event.value, prev, next);
/* Check if somebody sneaked in the wait queue. */
/*
* If somebody sneaked in the wait queue or started
* polling us in the meantime, we have to perform a
* kernel entry.
*/
if (val < 0)
goto slow_path;
if (is_polled(state)) {
/* If swap happened, just trigger a wakeup. */
if (val == prev)
sigval = 0;
goto slow_path;
}
} while (val != prev);
return 0;

View File

@ -32,7 +32,7 @@ static void *flags_receiver(void *arg)
__Tcall_assert(tfd, evl_attach_self("monitor-flags-receiver:%d", getpid()));
__Tcall_assert(ret, evl_get_sem(&p->start));
evl_read_clock(EVL_CLOCK_MONOTONIC, &now);
timespec_add_ns(&timeout, &now, 200000000); /* 200ms */
timespec_add_ns(&timeout, &now, 400000000); /* 400ms */
/* Sender is quiet: expect timeout. */
if (!__Fcall(ret, evl_timedwait_flags(&p->flags, &timeout, &bits)) ||

101
tests/poll-flags.c Normal file
View File

@ -0,0 +1,101 @@
/*
* SPDX-License-Identifier: MIT
*/
#include <sys/types.h>
#include <time.h>
#include <stdbool.h>
#include <unistd.h>
#include <pthread.h>
#include <stdlib.h>
#include <evl/thread.h>
#include <evl/flags.h>
#include <evl/clock.h>
#include <evl/poll.h>
#include "helpers.h"
#define NR_RECEIVERS 1
#define LOW_PRIO 1
#define HIGH_PRIO 2
static int pollfd_in, pollfd_out, ffd;
static struct evl_flags flags;
struct test_context {
int serial;
};
static void *flags_poller(void *arg)
{
struct evl_poll_event pollset;
struct test_context *p = arg;
struct timespec now, timeout;
int ret, tfd, bits;
__Tcall_assert(tfd, evl_attach_self("monitor-flags-poller:%d.%d",
getpid(), p->serial));
do {
__Tcall_assert(ret, evl_poll(pollfd_in, &pollset, 1));
__Texpr_assert(ret == 1);
__Texpr_assert(pollset.events == POLLIN);
__Texpr_assert(pollset.fd == ffd);
evl_read_clock(EVL_CLOCK_MONOTONIC, &now);
timespec_add_ns(&timeout, &now, 1000000000);
__Tcall_assert(ret, evl_timedwait_flags(&flags, &timeout, &bits));
} while (bits != 0x80000000);
return NULL;
}
int main(int argc, char *argv[])
{
struct test_context c[NR_RECEIVERS];
pthread_t pollers[NR_RECEIVERS];
struct evl_poll_event pollset;
struct sched_param param;
void *status = NULL;
int tfd, ret, n;
char *name;
param.sched_priority = HIGH_PRIO;
__Texpr_assert(pthread_setschedparam(pthread_self(),
SCHED_FIFO, &param) == 0);
/* EVL inherits the inband scheduling params upon attachment. */
__Tcall_assert(tfd, evl_attach_self("poll-flags:%d", getpid()));
name = get_unique_name(EVL_MONITOR_DEV, 0);
__Tcall_assert(ffd, evl_new_flags(&flags, EVL_CLOCK_MONOTONIC, 0, name));
__Tcall_assert(pollfd_in, evl_new_poll());
__Tcall_assert(ret, evl_add_pollfd(pollfd_in, ffd, POLLIN));
__Tcall_assert(pollfd_out, evl_new_poll());
__Tcall_assert(ret, evl_add_pollfd(pollfd_out, ffd, POLLOUT));
for (n = 0; n < NR_RECEIVERS; n++) {
c[n].serial = n;
new_thread(pollers + n, SCHED_FIFO, LOW_PRIO, flags_poller, c + n);
}
for (n = 1; n != 0; n <<= 1) {
/* Wait for the flag group to be clear. */
__Tcall_assert(ret, evl_poll(pollfd_out, &pollset, 1));
__Texpr_assert(ret == 1);
__Texpr_assert(pollset.events == POLLOUT);
__Texpr_assert(pollset.fd == ffd);
/* Then post the next pattern. */
__Tcall_assert(ret, evl_post_flags(&flags, n));
}
for (n = 0; n < NR_RECEIVERS; n++) {
__Texpr_assert(pthread_join(pollers[n], &status) == 0);
__Texpr_assert(status == NULL);
}
__Tcall_assert(ret, evl_close_flags(&flags));
return 0;
}

90
tests/poll-sem.c Normal file
View File

@ -0,0 +1,90 @@
/*
* SPDX-License-Identifier: MIT
*/
#include <sys/types.h>
#include <time.h>
#include <stdbool.h>
#include <unistd.h>
#include <pthread.h>
#include <stdlib.h>
#include <evl/thread.h>
#include <evl/sem.h>
#include <evl/clock.h>
#include <evl/poll.h>
#include "helpers.h"
#define NR_RECEIVERS 1
#define LOW_PRIO 1
#define HIGH_PRIO 2
#define PUT_COUNT 1024
static int pollfd_in, sfd;
static struct evl_sem sem;
struct test_context {
int serial;
};
static void *sem_poller(void *arg)
{
struct evl_poll_event pollset;
struct test_context *p = arg;
int ret, tfd, n;
__Tcall_assert(tfd, evl_attach_self("monitor-sem-poller:%d.%d",
getpid(), p->serial));
for (n = 0; n < PUT_COUNT; n++) {
__Tcall_assert(ret, evl_poll(pollfd_in, &pollset, 1));
__Texpr_assert(ret == 1);
__Texpr_assert(pollset.events == POLLIN);
__Texpr_assert(pollset.fd == sfd);
__Tcall_assert(ret, evl_tryget_sem(&sem));
}
return NULL;
}
int main(int argc, char *argv[])
{
struct test_context c[NR_RECEIVERS];
pthread_t pollers[NR_RECEIVERS];
struct sched_param param;
void *status = NULL;
int tfd, ret, n;
char *name;
param.sched_priority = HIGH_PRIO;
__Texpr_assert(pthread_setschedparam(pthread_self(),
SCHED_FIFO, &param) == 0);
/* EVL inherits the inband scheduling params upon attachment. */
__Tcall_assert(tfd, evl_attach_self("poll-sem:%d", getpid()));
name = get_unique_name(EVL_MONITOR_DEV, 0);
__Tcall_assert(sfd, evl_new_sem(&sem, EVL_CLOCK_MONOTONIC, 0, name));
__Tcall_assert(pollfd_in, evl_new_poll());
__Tcall_assert(ret, evl_add_pollfd(pollfd_in, sfd, POLLIN));
for (n = 0; n < NR_RECEIVERS; n++) {
c[n].serial = n;
new_thread(pollers + n, SCHED_FIFO, LOW_PRIO, sem_poller, c + n);
}
for (n = 0; n < PUT_COUNT; n++)
__Tcall_assert(ret, evl_put_sem(&sem));
for (n = 0; n < NR_RECEIVERS; n++) {
__Texpr_assert(pthread_join(pollers[n], &status) == 0);
__Texpr_assert(status == NULL);
}
__Tcall_assert(ret, evl_close_sem(&sem));
return 0;
}