TDME2 1.9.121
KernelEventMechanism.cpp
Go to the documentation of this file.
1// kqueue
2#if defined(__FreeBSD__) || defined(__OpenBSD__)
3 #include <sys/types.h>
4#endif
5#include <sys/event.h>
6#include <sys/time.h>
7
8#include <errno.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <unistd.h>
13
14#include <tdme/tdme.h>
20
24
25#if defined(__NetBSD__)
26 #define KEVENT_UDATA_DATATYPE __intptr_t
27#else
28 #define KEVENT_UDATA_DATATYPE void*
29#endif
30
31KernelEventMechanism::KernelEventMechanism() : initialized(false),_psd(NULL) {
32 // allocate platform specific data
33 _psd = static_cast<void*>(new KernelEventMechanismPSD());
34}
35
37 // delete platform specific data
38 delete static_cast<KernelEventMechanismPSD*>(_psd);
39}
40
41void KernelEventMechanism::setSocketInterest(const NetworkSocket& socket, const NIOInterest lastInterest, const NIOInterest interest, const void* cookie) {
42 // platform specific data
43 auto psd = static_cast<KernelEventMechanismPSD*>(_psd);
44
45 psd->kqMutex.lock();
46 // check for change list overrun
47 if (psd->kqChangeListCurrent + 2 >= psd->kqChangeListMax) {
48 // try to enlarge buffers
49 bool reallocated = false;
50 auto kqChangeList0Resized = (struct kevent*)realloc(psd->kqChangeList[0], sizeof(struct kevent) * (psd->kqChangeListMax << 1));
51 auto kqChangeList1Resized = (struct kevent*)realloc(psd->kqChangeList[1], sizeof(struct kevent) * (psd->kqChangeListMax << 1));
52
53 // realloc failed?
54 if (kqChangeList0Resized == NULL || kqChangeList1Resized == NULL) {
55 // yep
56 reallocated = false;
57 // only change kq change list if successfully reallocated
58 if (kqChangeList0Resized != NULL) psd->kqChangeList[0] = kqChangeList0Resized;
59 if (kqChangeList1Resized != NULL) psd->kqChangeList[1] = kqChangeList1Resized;
60 } else {
61 // success
62 reallocated = true;
63 // enlarge kq change list max
64 psd->kqChangeListMax<<=1;
65 // change kq change lists
66 psd->kqChangeList[0] = kqChangeList0Resized;
67 psd->kqChangeList[1] = kqChangeList1Resized;
68 }
69
70 // failed to reallocate
71 if (reallocated == false) {
72 psd->kqMutex.unlock();
73 throw NetworkKEMException("kqueue change list too small");
74 }
75 }
76 // handle read interest
77 if ((interest & NIO_INTEREST_READ) == NIO_INTEREST_READ) {
78 struct kevent* ke = &psd->kqChangeList[psd->kqChangeListBuffer][psd->kqChangeListCurrent++];
79 ke->ident = socket.descriptor;
80 ke->filter = EVFILT_READ;
81 ke->flags = EV_ADD | EV_ENABLE;
82 ke->fflags = 0;
83 ke->data = 0;
84 ke->udata = (KEVENT_UDATA_DATATYPE)cookie;
85 } else {
86 struct kevent* ke = &psd->kqChangeList[psd->kqChangeListBuffer][psd->kqChangeListCurrent++];
87 ke->ident = socket.descriptor;
88 ke->filter = EVFILT_READ;
89 ke->flags = EV_ADD | EV_DISABLE;
90 ke->fflags = 0;
91 ke->data = 0;
92 ke->udata = (KEVENT_UDATA_DATATYPE)cookie;
93 }
94 // handle write interest
95 if ((interest & NIO_INTEREST_WRITE) == NIO_INTEREST_WRITE) {
96 struct kevent* ke = &psd->kqChangeList[psd->kqChangeListBuffer][psd->kqChangeListCurrent++];
97 ke->ident = socket.descriptor;
98 ke->filter = EVFILT_WRITE;
99 ke->flags = EV_ADD | EV_ENABLE;
100 ke->fflags = 0;
101 ke->data = 0;
102 ke->udata = (KEVENT_UDATA_DATATYPE)cookie;
103 } else {
104 struct kevent* ke = &psd->kqChangeList[psd->kqChangeListBuffer][psd->kqChangeListCurrent++];
105 ke->ident = socket.descriptor;
106 ke->filter = EVFILT_WRITE;
107 ke->flags = EV_ADD | EV_DISABLE;
108 ke->fflags = 0;
109 ke->data = 0;
110 ke->udata = (KEVENT_UDATA_DATATYPE)cookie;
111 }
112 psd->kqMutex.unlock();
113}
114
115void KernelEventMechanism::initKernelEventMechanism(const unsigned int maxCCU) {
116 // platform specific data
118
119 // kqueue change list, maxCCU * (read + write change)
120 // can still be too less as you could change the filter a lot in a single request
121 psd->kqChangeListMax = maxCCU * 2;
122 psd->kqChangeListCurrent = 0;
123 psd->kqChangeList = (struct kevent**)malloc(sizeof(void*) * 2);
124 psd->kqChangeList[0] = (struct kevent*)malloc(sizeof(struct kevent) * psd->kqChangeListMax);
125 psd->kqChangeList[1] = (struct kevent*)malloc(sizeof(struct kevent) * psd->kqChangeListMax);
126 if (psd->kqChangeList == NULL) {
127 throw NetworkKEMException("Could not allocate kqueue change list array");
128 }
129 if (psd->kqChangeList[0] == NULL) {
130 free(psd->kqChangeList);
131 throw NetworkKEMException("Could not allocate kqueue change list 0");
132 }
133 if (psd->kqChangeList[1] == NULL) {
134 free(psd->kqChangeList[0]);
135 free(psd->kqChangeList);
136 throw NetworkKEMException("Could not allocate kqueue change list 1");
137 }
138
139 // kqueue event list, maxCCU * (read + write change)
140 psd->kqEventListMax = maxCCU * 2;
141 psd->kqEventList = (struct kevent*)malloc(sizeof(struct kevent) * psd->kqEventListMax);
142 if (psd->kqEventList == NULL) {
143 free(psd->kqChangeList[0]);
144 free(psd->kqChangeList[1]);
145 free(psd->kqChangeList);
146 throw NetworkKEMException("Could not allocate kqueue event list");
147 }
148
149 // start kqueue and get the descriptor
150 psd->kq = kqueue();
151 if (psd->kq == -1) {
153 std::string msg = "Could not create kqueue: ";
154 msg+= strerror(errno);
155 throw NetworkKEMException(msg);
156 }
157
158 //
159 initialized = true;
160}
161
163 // skip if not initialized
164 if (initialized == false) return;
165
166 // platform specific data
167 auto psd = static_cast<KernelEventMechanismPSD*>(_psd);
168
169 //
170 free(psd->kqChangeList[0]);
171 free(psd->kqChangeList[1]);
172 free(psd->kqChangeList);
173 free(psd->kqEventList);
174 close(psd->kq);
175}
176
178 // platform specific data
180
181 // have a timeout of 1ms
182 // as we only can delegate interest changes to the kernel by
183 // running kevent
184 const struct timespec timeout = {0, 1L * 1000L * 1000L};
185
186 //
187 while (true == true) {
188 // do kqueue change list double buffer logic
189 psd->kqMutex.lock();
190
191 // current kevent parameter from current change kqueue list
192 auto kqChangeListFilledBuffer = psd->kqChangeListBuffer;
193 auto kqChangeListFilledCurrent = psd->kqChangeListCurrent;
194
195 // cycle change list buffer
196 psd->kqChangeListBuffer = (psd->kqChangeListBuffer + 1) % 2;
197
198 // reset change list
199 psd->kqChangeListCurrent = 0;
200
201 // done
202 psd->kqMutex.unlock();
203
204 // kevent
205 int events = kevent(
206 psd->kq,
207 psd->kqChangeList[kqChangeListFilledBuffer],
208 kqChangeListFilledCurrent,
209 psd->kqEventList,
210 psd->kqEventListMax,
211 &timeout
212 );
213
214 // check for error
215 if (events == -1) {
216 if (errno == EINTR) {
217 // kevent was interrupted by system call, so ignore this and restart
218 } else {
219 std::string msg = "kevent failed: ";
220 msg+= strerror(errno);
221 throw NetworkKEMException(msg);
222 }
223 } else {
224 //
225 return events;
226 }
227 }
228}
229
230void KernelEventMechanism::decodeKernelEvent(const unsigned int index, NIOInterest &interest, void*& cookie) {
231 // platform specific data
232 auto psd = static_cast<KernelEventMechanismPSD*>(_psd);
233
234 struct kevent* ke = &psd->kqEventList[index];
235 cookie = (void*)ke->udata;
236 switch (ke->filter) {
237 case(EVFILT_READ):
238 interest = NIO_INTEREST_READ;
239 break;
240 case(EVFILT_WRITE):
241 interest = NIO_INTEREST_WRITE;
242 break;
243 default:
244 interest = NIO_INTEREST_NONE;
245 }
246}
#define KEVENT_UDATA_DATATYPE
Interface to kernel event mechanismns.
void shutdownKernelEventMechanism()
shutdowns the kernel event mechanism
void setSocketInterest(const NetworkSocket &socket, const NIOInterest lastInterest, const NIOInterest interest, const void *cookie)
sets a non blocked socket io interest
int doKernelEventMechanism()
do the kernel event mechanism
void initKernelEventMechanism(const unsigned int maxCCU)
initializes the kernel event mechanism
void decodeKernelEvent(const unsigned int index, NIOInterest &interest, void *&cookie)
decodes a kernel event
Kernel event mechanism exception class.
Base class of network sockets.
Definition: NetworkSocket.h:17
BSD kernel event mechanism platform specific data.
void unlock()
Unlocks this mutex.
Definition: Mutex.cpp:48
void lock()
Locks the mutex, additionally mutex locks will block until other locks have been unlocked.
Definition: Mutex.cpp:39
const NIOInterest NIO_INTEREST_NONE
Definition: NIOInterest.h:11
const NIOInterest NIO_INTEREST_READ
Definition: NIOInterest.h:12
uint8_t NIOInterest
type definiton for network UI interest
Definition: NIOInterest.h:10
const NIOInterest NIO_INTEREST_WRITE
Definition: NIOInterest.h:13