quark

quark web server
git clone git://git.suckless.org/quark
Log | Files | Refs | LICENSE

queue.c (3653B)


      1 /* See LICENSE file for copyright and license details. */
      2 #include <stddef.h>
      3 
      4 #ifdef __linux__
      5 	#include <sys/epoll.h>
      6 #else
      7 	#include <sys/types.h>
      8 	#include <sys/event.h>
      9 	#include <sys/time.h>
     10 #endif
     11 
     12 #include "queue.h"
     13 #include "util.h"
     14 
     15 int
     16 queue_create(void)
     17 {
     18 	int qfd;
     19 
     20 	#ifdef __linux__
     21 		if ((qfd = epoll_create1(0)) < 0) {
     22 			warn("epoll_create1:");
     23 		}
     24 	#else
     25 		if ((qfd = kqueue()) < 0) {
     26 			warn("kqueue:");
     27 		}
     28 	#endif
     29 
     30 	return qfd;
     31 }
     32 
     33 int
     34 queue_add_fd(int qfd, int fd, enum queue_event_type t, int shared,
     35              const void *data)
     36 {
     37 	#ifdef __linux__
     38 		struct epoll_event e;
     39 
     40 		/* set event flag */
     41 		if (shared) {
     42 			/*
     43 			 * if the fd is shared, "exclusive" is the only
     44 			 * way to avoid spurious wakeups and "blocking"
     45 			 * accept()'s.
     46 			 */
     47 			e.events = EPOLLEXCLUSIVE;
     48 		} else {
     49 			/*
     50 			 * if we have the fd for ourselves (i.e. only
     51 			 * within the thread), we want to be
     52 			 * edge-triggered, as our logic makes sure
     53 			 * that the buffers are drained when we return
     54 			 * to epoll_wait()
     55 			 */
     56 			e.events = EPOLLET;
     57 		}
     58 
     59 		switch (t) {
     60 		case QUEUE_EVENT_IN:
     61 			e.events |= EPOLLIN;
     62 			break;
     63 		case QUEUE_EVENT_OUT:
     64 			e.events |= EPOLLOUT;
     65 			break;
     66 		}
     67 
     68 		/* set data pointer */
     69 		e.data.ptr = (void *)data;
     70 
     71 		/* register fd in the interest list */
     72 		if (epoll_ctl(qfd, EPOLL_CTL_ADD, fd, &e) < 0) {
     73 			warn("epoll_ctl:");
     74 			return -1;
     75 		}
     76 	#else
     77 		struct kevent e;
     78 		int events;
     79 
     80 		/* prepare event flag */
     81 		events = (shared) ? 0 : EV_CLEAR;
     82 
     83 		switch (t) {
     84 		case QUEUE_EVENT_IN:
     85 			events |= EVFILT_READ;
     86 			break;
     87 		case QUEUE_EVENT_OUT:
     88 			events |= EVFILT_WRITE;
     89 			break;
     90 		}
     91 
     92 		EV_SET(&e, fd, events, EV_ADD, 0, 0, (void *)data);
     93 
     94 		if (kevent(qfd, &e, 1, NULL, 0, NULL) < 0) {
     95 			warn("kevent:");
     96 			return -1;
     97 		}
     98 	#endif
     99 
    100 	return 0;
    101 }
    102 
    103 int
    104 queue_mod_fd(int qfd, int fd, enum queue_event_type t, const void *data)
    105 {
    106 	#ifdef __linux__
    107 		struct epoll_event e;
    108 
    109 		/* set event flag (only for non-shared fd's) */
    110 		e.events = EPOLLET;
    111 
    112 		switch (t) {
    113 		case QUEUE_EVENT_IN:
    114 			e.events |= EPOLLIN;
    115 			break;
    116 		case QUEUE_EVENT_OUT:
    117 			e.events |= EPOLLOUT;
    118 			break;
    119 		}
    120 
    121 		/* set data pointer */
    122 		e.data.ptr = (void *)data;
    123 
    124 		/* register fd in the interest list */
    125 		if (epoll_ctl(qfd, EPOLL_CTL_MOD, fd, &e) < 0) {
    126 			warn("epoll_ctl:");
    127 			return -1;
    128 		}
    129 	#else
    130 		struct kevent e;
    131 		int events;
    132 
    133 		events = EV_CLEAR;
    134 
    135 		switch (t) {
    136 		case QUEUE_EVENT_IN:
    137 			events |= EVFILT_READ;
    138 			break;
    139 		case QUEUE_EVENT_OUT:
    140 			events |= EVFILT_WRITE;
    141 			break;
    142 		}
    143 
    144 		EV_SET(&e, fd, events, EV_ADD, 0, 0, (void *)data);
    145 
    146 		if (kevent(qfd, &e, 1, NULL, 0, NULL) < 0) {
    147 			warn("kevent:");
    148 			return -1;
    149 		}
    150 	#endif
    151 
    152 	return 0;
    153 }
    154 
    155 int
    156 queue_rem_fd(int qfd, int fd)
    157 {
    158 	#ifdef __linux__
    159 		struct epoll_event e;
    160 
    161 		if (epoll_ctl(qfd, EPOLL_CTL_DEL, fd, &e) < 0) {
    162 			warn("epoll_ctl:");
    163 			return -1;
    164 		}
    165 	#else
    166 		struct kevent e;
    167 
    168 		EV_SET(&e, fd, 0, EV_DELETE, 0, 0, 0);
    169 
    170 		if (kevent(qfd, &e, 1, NULL, 0, NULL) < 0) {
    171 			warn("kevent:");
    172 			return -1;
    173 		}
    174 	#endif
    175 
    176 	return 0;
    177 }
    178 
    179 ssize_t
    180 queue_wait(int qfd, queue_event *e, size_t elen)
    181 {
    182 	ssize_t nready;
    183 
    184 	#ifdef __linux__
    185 		if ((nready = epoll_wait(qfd, e, elen, -1)) < 0) {
    186 			warn("epoll_wait:");
    187 			return -1;
    188 		}
    189 	#else
    190 		if ((nready = kevent(qfd, NULL, 0, e, elen, NULL)) < 0) {
    191 			warn("kevent:");
    192 			return -1;
    193 		}
    194 	#endif
    195 
    196 	return nready;
    197 }
    198 
    199 void *
    200 queue_event_get_data(const queue_event *e)
    201 {
    202 	#ifdef __linux__
    203 		return e->data.ptr;
    204 	#else
    205 		return e->udata;
    206 	#endif
    207 }
    208 
    209 int
    210 queue_event_is_error(const queue_event *e)
    211 {
    212 	#ifdef __linux__
    213 		return (e->events & ~(EPOLLIN | EPOLLOUT)) ? 1 : 0;
    214 	#else
    215 		return (e->flags & EV_EOF) ? 1 : 0;
    216 	#endif
    217 }