version 1.0.0

This commit is contained in:
Bert Belder 2017-11-30 23:31:14 +01:00
parent c4756004e0
commit f9c89c1b42
49 changed files with 2638 additions and 3984 deletions

View File

@ -1,37 +0,0 @@
---
Language: Cpp
BasedOnStyle: Google
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Attach
ColumnLimit: 79
Cpp11BracedListStyle: true
DerivePointerAlignment: false
IndentWrappedFunctionNames: true
MaxEmptyLinesToKeep: 1
PointerAlignment: Left
SpaceAfterCStyleCast: true
SpacesInContainerLiterals: false
# Alas, not supported:
# ForceEmptyLineAtEOF: true
---
Language: JavaScript
BasedOnStyle: Google
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
BinPackArguments: false
BinPackParameters: false
BreakBeforeBraces: Attach
ColumnLimit: 79
IndentWrappedFunctionNames: true
MaxEmptyLinesToKeep: 1
SpacesInContainerLiterals: false

43
.gitignore vendored
View File

@ -1,43 +0,0 @@
*_BASE_*
*_BACKUP_*
*_REMOTE_*
*_LOCAL_*
*.opensdf
*.orig
*.sdf
*.sln
*.suo
*.TMP
*.user
*.VC.db
*.VC.opendb
*.vcxproj
*.vcxproj.filters
*.vcxproj.user
*.vspx
/*.build/
/*.dir/
/*.xcodeproj/
/.vs/
/.vscode/
/bin/
/build/
/CMakeFiles/
/CMakeScripts/
/Debug/
/dist/
/ipch/
/lib/
/MinSizeRel/
/out/
/Release/
/RelWithDebInfo/
/x64/
/Win32/
/CMakeCache.txt
/cmake_install.cmake
/Makefile

View File

@ -1,99 +0,0 @@
cmake_minimum_required(VERSION 2.8.0)
project(wepoll)
if(WIN32)
link_libraries(ws2_32)
endif()
if(MSVC)
add_compile_options(/Wall /wd4201 /wd4242 /wd4710 /wd4711 /wd4820)
else()
add_compile_options(-Wall)
endif()
file(GLOB SOURCES_SRC src/*.c src/*.h)
file(GLOB SOURCES_SRC_C src/*.c)
file(GLOB SOURCES_SRC_REGULAR src/regular/*.c src/regular/*.h)
file(GLOB SOURCES_SRC_COMBINED src/combined/*.c src/combined/*.h)
file(GLOB SOURCES_INCLUDE include/*.h)
file(GLOB SOURCES_TEST test/*.c)
file(GLOB_RECURSE SOURCES_TEST_SHARED test/shared/*.c test/shared/*.h)
source_group("" FILES ${SOURCES_INCLUDE})
source_group(src FILES ${SOURCES_SRC})
source_group(src FILES ${SOURCES_SRC_REGULAR})
source_group(src FILES ${SOURCES_SRC_COMBINED})
source_group("" FILES ${SOURCES_TEST})
source_group(test/shared FILES ${SOURCES_TEST_SHARED})
foreach(TEST_SOURCE ${SOURCES_TEST})
get_filename_component(TEST_NAME ${TEST_SOURCE} NAME_WE)
add_executable(${TEST_NAME} ${TEST_SOURCE} ${SOURCES_SRC} ${SOURCES_SRC_REGULAR} ${SOURCES_TEST_SHARED})
target_include_directories(${TEST_NAME} PUBLIC include src src/regular test/shared)
list(APPEND TEST_TARGETS ${TEST_NAME})
list(APPEND TEST_OUTPUTS $<TARGET_FILE:${TEST_NAME}>)
endforeach(TEST_SOURCE ${SOURCES_TEST})
set(TEST_ALL_STAMP "test-all.stamp")
set(TEST_RUNNER "tools/run-tests.js")
add_custom_command(
OUTPUT ${TEST_ALL_STAMP}
COMMAND node ${TEST_RUNNER} ${TEST_OUTPUTS}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${TEST_RUNNER} ${TEST_TARGETS}
)
add_custom_target(test-all DEPENDS ${TEST_ALL_STAMP})
foreach(HEADER_SOURCE ${SOURCES_INCLUDE})
get_filename_component(HEADER_NAME ${HEADER_SOURCE} NAME_WE)
string(TOUPPER ${HEADER_NAME} HEADER_NAME_UC)
set(COMBINED_NAME "${HEADER_NAME}-combined")
set(DIST_SRC_C "dist/${HEADER_NAME}.c")
set(DIST_SRC_H "dist/${HEADER_NAME}.h")
set(SOURCES_README "doc/README.md")
set(DIST_README "dist/README.md")
add_custom_command(
OUTPUT ${DIST_SRC_C}
COMMAND node tools/combine.js -Iinclude -Isrc -Isrc/combined --strip-guards ${HEADER_SOURCE} ${SOURCES_SRC_C} > ${DIST_SRC_C}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
IMPLICIT_DEPENDS c ${SOURCES_INCLUDE} ${SOURCES_SRC} ${SOURCES_SRC_COMBINED}
)
add_custom_command(
OUTPUT ${DIST_SRC_H}
COMMAND node tools/combine.js ${HEADER_SOURCE} > ${DIST_SRC_H}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
IMPLICIT_DEPENDS c ${SOURCES_INCLUDE}
)
add_custom_command(
OUTPUT ${DIST_README}
COMMAND ${CMAKE_COMMAND} -E copy ${SOURCES_README} ${DIST_README}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${SOURCES_README}
)
add_custom_target(${COMBINED_NAME} DEPENDS ${DIST_SRC_C} ${DIST_SRC_H} ${DIST_README})
set(COMBINED_DLL_NAME "${HEADER_NAME}-combined.dll")
set(COMBINED_DLL_OUTPUT "${HEADER_NAME}-combined")
add_library(${COMBINED_DLL_NAME} SHARED ${DIST_SRC_C})
if(MSVC)
target_compile_definitions(${COMBINED_DLL_NAME} PUBLIC "-D${HEADER_NAME_UC}_EXPORT=__declspec(dllexport)" )
else()
target_compile_definitions(${COMBINED_DLL_NAME} PUBLIC "-D${HEADER_NAME_UC}_EXPORT=__attribute__((visibility(\"default\")))")
endif()
set_target_properties(${COMBINED_DLL_NAME} PROPERTIES OUTPUT_NAME ${COMBINED_DLL_OUTPUT})
set(DLL_NAME "${HEADER_NAME}.dll")
set(DLL_OUTPUT "${HEADER_NAME}")
add_library(${DLL_NAME} SHARED ${HEADER_SOURCE} ${SOURCES_SRC} ${SOURCES_SRC_REGULAR})
target_include_directories(${DLL_NAME} PUBLIC include src/regular)
if(MSVC)
target_compile_options(${DLL_NAME} PUBLIC "-FI${HEADER_SOURCE}")
target_compile_definitions(${DLL_NAME} PUBLIC "-D${HEADER_NAME_UC}_EXPORT=__declspec(dllexport)" )
else()
target_compile_options(${DLL_NAME} PUBLIC -include ${HEADER_SOURCE} -fvisibility=hidden)
target_compile_definitions(${DLL_NAME} PUBLIC "-D${HEADER_NAME_UC}_EXPORT=__attribute__((visibility(\"default\")))")
endif()
set_target_properties(${DLL_NAME} PROPERTIES OUTPUT_NAME ${DLL_OUTPUT})
endforeach(HEADER_SOURCE ${SOURCES_INCLUDE})

27
LICENSE
View File

@ -1,27 +0,0 @@
Copyright 2012-2017, Bert Belder. All rights reserved.
The red-black tree implementation:
Copyright 2002 Niels Provos <provos@citi.umich.edu> All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,4 +0,0 @@
@echo off
rd /s/q CMakeFiles
del CMakeCache.txt *.cmake ALL_BUILD*.* ZERO_CHECK*.*
cmake.exe -G "Visual Studio 14 2015 Win64" %*

170
src/afd.c
View File

@ -1,170 +0,0 @@
#include "afd.h"
#include "error.h"
#include "nt.h"
#include "util.h"
#include "win.h"
#define FILE_DEVICE_NETWORK 0x00000012
#define METHOD_BUFFERED 0
#define AFD_POLL 9
#define _AFD_CONTROL_CODE(operation, method) \
((FILE_DEVICE_NETWORK) << 12 | (operation << 2) | method)
#define IOCTL_AFD_POLL _AFD_CONTROL_CODE(AFD_POLL, METHOD_BUFFERED)
#ifndef SIO_BASE_HANDLE
#define SIO_BASE_HANDLE 0x48000022
#endif
int afd_poll(SOCKET driver_socket,
AFD_POLL_INFO* poll_info,
OVERLAPPED* overlapped) {
IO_STATUS_BLOCK iosb;
IO_STATUS_BLOCK* iosb_ptr;
HANDLE event = NULL;
void* apc_context;
NTSTATUS status;
if (overlapped != NULL) {
/* Overlapped operation. */
iosb_ptr = (IO_STATUS_BLOCK*) &overlapped->Internal;
event = overlapped->hEvent;
/* Do not report iocp completion if hEvent is tagged. */
if ((uintptr_t) event & 1) {
event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1);
apc_context = NULL;
} else {
apc_context = overlapped;
}
} else {
/* Blocking operation. */
iosb_ptr = &iosb;
event = CreateEventW(NULL, FALSE, FALSE, NULL);
if (event == NULL)
return_error(-1);
apc_context = NULL;
}
iosb_ptr->Status = STATUS_PENDING;
status = NtDeviceIoControlFile((HANDLE) driver_socket,
event,
NULL,
apc_context,
iosb_ptr,
IOCTL_AFD_POLL,
poll_info,
sizeof *poll_info,
poll_info,
sizeof *poll_info);
if (overlapped == NULL) {
/* If this is a blocking operation, wait for the event to become
* signaled, and then grab the real status from the io status block.
*/
if (status == STATUS_PENDING) {
DWORD r = WaitForSingleObject(event, INFINITE);
if (r == WAIT_FAILED) {
DWORD error = GetLastError();
CloseHandle(event);
return_error(-1, error);
}
status = iosb_ptr->Status;
}
CloseHandle(event);
}
if (status == STATUS_SUCCESS)
return 0;
else if (status == STATUS_PENDING)
return_error(-1, ERROR_IO_PENDING);
else
return_error(-1, RtlNtStatusToDosError(status));
}
static SOCKET _afd_get_base_socket(SOCKET socket) {
SOCKET base_socket;
DWORD bytes;
if (WSAIoctl(socket,
SIO_BASE_HANDLE,
NULL,
0,
&base_socket,
sizeof base_socket,
&bytes,
NULL,
NULL) == SOCKET_ERROR)
return_error(INVALID_SOCKET);
return base_socket;
}
static ssize_t _afd_get_protocol_info(SOCKET socket,
WSAPROTOCOL_INFOW* protocol_info) {
ssize_t id;
int opt_len;
opt_len = sizeof *protocol_info;
if (getsockopt(socket,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) protocol_info,
&opt_len) != 0)
return_error(-1);
id = -1;
for (size_t i = 0; i < array_count(AFD_PROVIDER_GUID_LIST); i++) {
if (memcmp(&protocol_info->ProviderId,
&AFD_PROVIDER_GUID_LIST[i],
sizeof protocol_info->ProviderId) == 0) {
id = i;
break;
}
}
/* Check if the protocol uses an msafd socket. */
if (id < 0)
return_error(-1, ERROR_NOT_SUPPORTED);
return id;
}
WEPOLL_INTERNAL ssize_t afd_get_protocol(SOCKET socket,
SOCKET* afd_socket_out,
WSAPROTOCOL_INFOW* protocol_info) {
ssize_t id;
SOCKET afd_socket;
/* Try to get protocol information, assuming that the given socket is an AFD
* socket. This should almost always be the case, and if it is, that saves us
* a call to WSAIoctl(). */
afd_socket = socket;
id = _afd_get_protocol_info(afd_socket, protocol_info);
if (id < 0) {
/* If getting protocol information failed, it might be due to the socket
* not being an AFD socket. If so, attempt to fetch the underlying base
* socket, then try again to obtain protocol information. If that also
* fails, return the *original* error. */
DWORD original_error = GetLastError();
if (original_error != ERROR_NOT_SUPPORTED)
return_error(-1);
afd_socket = _afd_get_base_socket(socket);
if (afd_socket == INVALID_SOCKET || afd_socket == socket)
return_error(-1, original_error);
id = _afd_get_protocol_info(afd_socket, protocol_info);
if (id < 0)
return_error(-1, original_error);
}
*afd_socket_out = afd_socket;
return id;
}

View File

@ -1,81 +0,0 @@
#ifndef WEPOLL_AFD_H_
#define WEPOLL_AFD_H_
#include "internal.h"
#include "ntstatus.h"
#include "util.h"
#include "win.h"
/* clang-format off */
#define AFD_NO_FAST_IO 0x00000001
#define AFD_OVERLAPPED 0x00000002
#define AFD_IMMEDIATE 0x00000004
#define AFD_POLL_RECEIVE_BIT 0
#define AFD_POLL_RECEIVE (1 << AFD_POLL_RECEIVE_BIT)
#define AFD_POLL_RECEIVE_EXPEDITED_BIT 1
#define AFD_POLL_RECEIVE_EXPEDITED (1 << AFD_POLL_RECEIVE_EXPEDITED_BIT)
#define AFD_POLL_SEND_BIT 2
#define AFD_POLL_SEND (1 << AFD_POLL_SEND_BIT)
#define AFD_POLL_DISCONNECT_BIT 3
#define AFD_POLL_DISCONNECT (1 << AFD_POLL_DISCONNECT_BIT)
#define AFD_POLL_ABORT_BIT 4
#define AFD_POLL_ABORT (1 << AFD_POLL_ABORT_BIT)
#define AFD_POLL_LOCAL_CLOSE_BIT 5
#define AFD_POLL_LOCAL_CLOSE (1 << AFD_POLL_LOCAL_CLOSE_BIT)
#define AFD_POLL_CONNECT_BIT 6
#define AFD_POLL_CONNECT (1 << AFD_POLL_CONNECT_BIT)
#define AFD_POLL_ACCEPT_BIT 7
#define AFD_POLL_ACCEPT (1 << AFD_POLL_ACCEPT_BIT)
#define AFD_POLL_CONNECT_FAIL_BIT 8
#define AFD_POLL_CONNECT_FAIL (1 << AFD_POLL_CONNECT_FAIL_BIT)
#define AFD_POLL_QOS_BIT 9
#define AFD_POLL_QOS (1 << AFD_POLL_QOS_BIT)
#define AFD_POLL_GROUP_QOS_BIT 10
#define AFD_POLL_GROUP_QOS (1 << AFD_POLL_GROUP_QOS_BIT)
#define AFD_NUM_POLL_EVENTS 11
#define AFD_POLL_ALL ((1 << AFD_NUM_POLL_EVENTS) - 1)
/* clang-format on */
typedef struct _AFD_POLL_HANDLE_INFO {
HANDLE Handle;
ULONG Events;
NTSTATUS Status;
} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
typedef struct _AFD_POLL_INFO {
LARGE_INTEGER Timeout;
ULONG NumberOfHandles;
ULONG Exclusive;
AFD_POLL_HANDLE_INFO Handles[1];
} AFD_POLL_INFO, *PAFD_POLL_INFO;
WEPOLL_INTERNAL int afd_poll(SOCKET driver_socket,
AFD_POLL_INFO* poll_info,
OVERLAPPED* overlapped);
WEPOLL_INTERNAL ssize_t afd_get_protocol(SOCKET socket,
SOCKET* afd_socket_out,
WSAPROTOCOL_INFOW* protocol_info);
/* clang-format off */
static const GUID AFD_PROVIDER_GUID_LIST[] = {
/* MSAFD Tcpip [TCP+UDP+RAW / IP] */
{0xe70f1aa0, 0xab8b, 0x11cf,
{0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
/* MSAFD Tcpip [TCP+UDP+RAW / IPv6] */
{0xf9eab0c0, 0x26d4, 0x11d0,
{0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
/* MSAFD RfComm [Bluetooth] */
{0x9fc48064, 0x7298, 0x43e4,
{0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}},
/* MSAFD Irda [IrDA] */
{0x3972523d, 0x2af1, 0x11d1,
{0xb6, 0x55, 0x00, 0x80, 0x5f, 0x36, 0x42, 0xcc}}};
/* clang-format on */
#endif /* WEPOLL_AFD_H_ */

121
src/api.c
View File

@ -1,121 +0,0 @@
#include <stdint.h>
#include <stdlib.h>
#include "api.h"
#include "error.h"
#include "init.h"
#include "port.h"
#include "reflock-tree.h"
#include "util.h"
#include "win.h"
static reflock_tree_t _epoll_handle_tree;
static inline ep_port_t* _handle_tree_node_to_port(
reflock_tree_node_t* tree_node) {
return container_of(tree_node, ep_port_t, handle_tree_node);
}
int api_global_init(void) {
reflock_tree_init(&_epoll_handle_tree);
return 0;
}
static HANDLE _epoll_create(void) {
ep_port_t* port_info;
HANDLE ephnd;
if (init() < 0)
return NULL;
port_info = ep_port_new(&ephnd);
if (port_info == NULL)
return NULL;
if (reflock_tree_add(&_epoll_handle_tree,
&port_info->handle_tree_node,
(uintptr_t) ephnd) < 0) {
ep_port_delete(port_info);
return_error(INVALID_HANDLE_VALUE, ERROR_ALREADY_EXISTS);
}
return ephnd;
}
HANDLE epoll_create(int size) {
if (size <= 0)
return_error(INVALID_HANDLE_VALUE, ERROR_INVALID_PARAMETER);
return _epoll_create();
}
HANDLE epoll_create1(int flags) {
if (flags != 0)
return_error(INVALID_HANDLE_VALUE, ERROR_INVALID_PARAMETER);
return _epoll_create();
}
int epoll_close(HANDLE ephnd) {
reflock_tree_node_t* tree_node;
ep_port_t* port_info;
if (init() < 0)
return -1;
tree_node = reflock_tree_del_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL)
return_error(-1, ERROR_INVALID_HANDLE);
port_info = _handle_tree_node_to_port(tree_node);
ep_port_close(port_info);
reflock_tree_node_unref_and_destroy(tree_node);
return ep_port_delete(port_info);
}
int epoll_ctl(HANDLE ephnd, int op, SOCKET sock, struct epoll_event* ev) {
reflock_tree_node_t* tree_node;
ep_port_t* port_info;
int result;
if (init() < 0)
return -1;
tree_node =
reflock_tree_find_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL)
return_error(-1, ERROR_INVALID_HANDLE);
port_info = _handle_tree_node_to_port(tree_node);
result = ep_port_ctl(port_info, op, sock, ev);
reflock_tree_node_unref(tree_node);
return result;
}
int epoll_wait(HANDLE ephnd,
struct epoll_event* events,
int maxevents,
int timeout) {
reflock_tree_node_t* tree_node;
ep_port_t* port_info;
int result;
if (init() < 0)
return -1;
tree_node =
reflock_tree_find_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL)
return_error(-1, ERROR_INVALID_HANDLE);
port_info = _handle_tree_node_to_port(tree_node);
result = ep_port_wait(port_info, events, maxevents, timeout);
reflock_tree_node_unref(tree_node);
return result;
}

View File

@ -1,8 +0,0 @@
#ifndef WEPOLL_API_H_
#define WEPOLL_API_H_
#include "internal.h"
WEPOLL_INTERNAL int api_global_init(void);
#endif /* WEPOLL_API_H_ */

View File

@ -1,7 +0,0 @@
#ifndef WEPOLL_INTERNAL_H_
#define WEPOLL_INTERNAL_H_
#define WEPOLL_INTERNAL static
#define WEPOLL_INTERNAL_EXTERN static
#endif /* WEPOLL_INTERNAL_H_ */

View File

@ -1,121 +0,0 @@
#include <errno.h>
#include "error.h"
#include "nt.h"
#include "ntstatus.h"
#include "win.h"
#define _ERROR_ERRNO_MAP(X) \
X(ERROR_ACCESS_DENIED, EACCES) \
X(ERROR_ALREADY_EXISTS, EEXIST) \
X(ERROR_BAD_COMMAND, EACCES) \
X(ERROR_BAD_EXE_FORMAT, ENOEXEC) \
X(ERROR_BAD_LENGTH, EACCES) \
X(ERROR_BAD_NETPATH, ENOENT) \
X(ERROR_BAD_NET_NAME, ENOENT) \
X(ERROR_BAD_NET_RESP, ENETDOWN) \
X(ERROR_BAD_PATHNAME, ENOENT) \
X(ERROR_BROKEN_PIPE, EPIPE) \
X(ERROR_CANNOT_MAKE, EACCES) \
X(ERROR_COMMITMENT_LIMIT, ENOMEM) \
X(ERROR_CONNECTION_ABORTED, ECONNABORTED) \
X(ERROR_CONNECTION_ACTIVE, EISCONN) \
X(ERROR_CONNECTION_REFUSED, ECONNREFUSED) \
X(ERROR_CRC, EACCES) \
X(ERROR_DIR_NOT_EMPTY, ENOTEMPTY) \
X(ERROR_DISK_FULL, ENOSPC) \
X(ERROR_DUP_NAME, EADDRINUSE) \
X(ERROR_FILENAME_EXCED_RANGE, ENOENT) \
X(ERROR_FILE_NOT_FOUND, ENOENT) \
X(ERROR_GEN_FAILURE, EACCES) \
X(ERROR_GRACEFUL_DISCONNECT, EPIPE) \
X(ERROR_HOST_DOWN, EHOSTUNREACH) \
X(ERROR_HOST_UNREACHABLE, EHOSTUNREACH) \
X(ERROR_INSUFFICIENT_BUFFER, EFAULT) \
X(ERROR_INVALID_ADDRESS, EADDRNOTAVAIL) \
X(ERROR_INVALID_FUNCTION, EINVAL) \
X(ERROR_INVALID_HANDLE, EBADF) \
X(ERROR_INVALID_NETNAME, EADDRNOTAVAIL) \
X(ERROR_INVALID_PARAMETER, EINVAL) \
X(ERROR_INVALID_USER_BUFFER, EMSGSIZE) \
X(ERROR_IO_PENDING, EINPROGRESS) \
X(ERROR_LOCK_VIOLATION, EACCES) \
X(ERROR_MORE_DATA, EMSGSIZE) \
X(ERROR_NETNAME_DELETED, ECONNABORTED) \
X(ERROR_NETWORK_ACCESS_DENIED, EACCES) \
X(ERROR_NETWORK_BUSY, ENETDOWN) \
X(ERROR_NETWORK_UNREACHABLE, ENETUNREACH) \
X(ERROR_NOACCESS, EFAULT) \
X(ERROR_NOT_ENOUGH_MEMORY, ENOMEM) \
X(ERROR_NOT_ENOUGH_QUOTA, ENOMEM) \
X(ERROR_NOT_FOUND, ENOENT) \
X(ERROR_NOT_LOCKED, EACCES) \
X(ERROR_NOT_READY, EACCES) \
X(ERROR_NOT_SAME_DEVICE, EXDEV) \
X(ERROR_NOT_SUPPORTED, EOPNOTSUPP) \
X(ERROR_NO_MORE_FILES, ENOENT) \
X(ERROR_NO_SYSTEM_RESOURCES, ENOMEM) \
X(ERROR_OPERATION_ABORTED, EINTR) \
X(ERROR_OUT_OF_PAPER, EACCES) \
X(ERROR_PAGEFILE_QUOTA, ENOMEM) \
X(ERROR_PATH_NOT_FOUND, ENOENT) \
X(ERROR_PIPE_NOT_CONNECTED, EPIPE) \
X(ERROR_PORT_UNREACHABLE, ECONNRESET) \
X(ERROR_PROTOCOL_UNREACHABLE, ENETUNREACH) \
X(ERROR_REM_NOT_LIST, ECONNREFUSED) \
X(ERROR_REQUEST_ABORTED, EINTR) \
X(ERROR_REQ_NOT_ACCEP, EWOULDBLOCK) \
X(ERROR_SECTOR_NOT_FOUND, EACCES) \
X(ERROR_SEM_TIMEOUT, ETIMEDOUT) \
X(ERROR_SHARING_VIOLATION, EACCES) \
X(ERROR_TOO_MANY_NAMES, ENOMEM) \
X(ERROR_TOO_MANY_OPEN_FILES, EMFILE) \
X(ERROR_UNEXP_NET_ERR, ECONNABORTED) \
X(ERROR_WAIT_NO_CHILDREN, ECHILD) \
X(ERROR_WORKING_SET_QUOTA, ENOMEM) \
X(ERROR_WRITE_PROTECT, EACCES) \
X(ERROR_WRONG_DISK, EACCES) \
X(WSAEACCES, EACCES) \
X(WSAEADDRINUSE, EADDRINUSE) \
X(WSAEADDRNOTAVAIL, EADDRNOTAVAIL) \
X(WSAEAFNOSUPPORT, EAFNOSUPPORT) \
X(WSAECONNABORTED, ECONNABORTED) \
X(WSAECONNREFUSED, ECONNREFUSED) \
X(WSAECONNRESET, ECONNRESET) \
X(WSAEDISCON, EPIPE) \
X(WSAEFAULT, EFAULT) \
X(WSAEHOSTDOWN, EHOSTUNREACH) \
X(WSAEHOSTUNREACH, EHOSTUNREACH) \
X(WSAEINTR, EINTR) \
X(WSAEINVAL, EINVAL) \
X(WSAEISCONN, EISCONN) \
X(WSAEMSGSIZE, EMSGSIZE) \
X(WSAENETDOWN, ENETDOWN) \
X(WSAENETRESET, EHOSTUNREACH) \
X(WSAENETUNREACH, ENETUNREACH) \
X(WSAENOBUFS, ENOMEM) \
X(WSAENOTCONN, ENOTCONN) \
X(WSAENOTSOCK, EBADF) \
X(WSAEOPNOTSUPP, EOPNOTSUPP) \
X(WSAESHUTDOWN, EPIPE) \
X(WSAETIMEDOUT, ETIMEDOUT) \
X(WSAEWOULDBLOCK, EWOULDBLOCK)
errno_t err_map_win_error_to_errno(DWORD error) {
switch (error) {
#define X(error_sym, errno_sym) \
case error_sym: \
return errno_sym;
_ERROR_ERRNO_MAP(X)
#undef X
}
return EINVAL;
}
void err_set_win_error(DWORD error) {
if (error == 0)
error = GetLastError();
else
SetLastError(error);
errno = err_map_win_error_to_errno(error);
}

View File

@ -1,20 +0,0 @@
#ifndef WEPOLL_ERROR_H_
#define WEPOLL_ERROR_H_
#include <errno.h>
#include "internal.h"
#include "ntstatus.h"
#define _return_error_helper(error, value) \
do { \
err_set_win_error(error); \
return (value); \
} while (0)
#define return_error(value, ...) _return_error_helper(__VA_ARGS__ + 0, value)
WEPOLL_INTERNAL errno_t err_map_win_error_to_errno(DWORD error);
WEPOLL_INTERNAL void err_set_win_error(DWORD error);
#endif /* WEPOLL_ERROR_H_ */

View File

@ -1,46 +0,0 @@
#include <stdbool.h>
#include "api.h"
#include "error.h"
#include "init.h"
#include "nt.h"
#include "reflock.h"
#include "util.h"
#include "win.h"
static bool _initialized = false;
static INIT_ONCE _once = INIT_ONCE_STATIC_INIT;
static int _winsock_global_init(void) {
int r;
WSADATA wsa_data;
r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
if (r != 0)
return_error(-1);
return 0;
}
static BOOL CALLBACK _init_once_callback(INIT_ONCE* once,
void* parameter,
void** context) {
unused(once);
unused(parameter);
unused(context);
if (_winsock_global_init() < 0 || nt_global_init() < 0 ||
reflock_global_init() < 0 || api_global_init() < 0)
return FALSE;
_initialized = true;
return TRUE;
}
int init(void) {
if (!_initialized &&
!InitOnceExecuteOnce(&_once, _init_once_callback, NULL, NULL))
return -1; /* LastError and errno aren't touched InitOnceExecuteOnce. */
return 0;
}

View File

@ -1,8 +0,0 @@
#ifndef WEPOLL_INIT_H_
#define WEPOLL_INIT_H_
#include "internal.h"
WEPOLL_INTERNAL int init(void);
#endif /* WEPOLL_INIT_H_ */

View File

@ -1,26 +0,0 @@
#include <stdlib.h>
#include "nt.h"
#include "win.h"
#define X(return_type, attributes, name, parameters) \
WEPOLL_INTERNAL return_type(attributes* name) parameters = NULL;
NTDLL_IMPORT_LIST(X)
#undef X
int nt_global_init(void) {
HMODULE ntdll;
ntdll = GetModuleHandleW(L"ntdll.dll");
if (ntdll == NULL)
return -1;
#define X(return_type, attributes, name, parameters) \
name = (return_type(attributes*) parameters) GetProcAddress(ntdll, #name); \
if (name == NULL) \
return -1;
NTDLL_IMPORT_LIST(X)
#undef X
return 0;
}

View File

@ -1,77 +0,0 @@
#ifndef WEPOLL_NT_H_
#define WEPOLL_NT_H_
#include "internal.h"
#include "ntstatus.h"
#include "win.h"
WEPOLL_INTERNAL int nt_global_init(void);
typedef struct _IO_STATUS_BLOCK {
union {
NTSTATUS Status;
PVOID Pointer;
};
ULONG_PTR Information;
} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
typedef VOID(NTAPI* PIO_APC_ROUTINE)(PVOID ApcContext,
PIO_STATUS_BLOCK IoStatusBlock,
ULONG Reserved);
typedef struct _LSA_UNICODE_STRING {
USHORT Length;
USHORT MaximumLength;
PWSTR Buffer;
} LSA_UNICODE_STRING, *PLSA_UNICODE_STRING, UNICODE_STRING, *PUNICODE_STRING;
typedef struct _OBJECT_ATTRIBUTES {
ULONG Length;
HANDLE RootDirectory;
PUNICODE_STRING ObjectName;
ULONG Attributes;
PVOID SecurityDescriptor;
PVOID SecurityQualityOfService;
} OBJECT_ATTRIBUTES, *POBJECT_ATTRIBUTES;
#define NTDLL_IMPORT_LIST(X) \
X(NTSTATUS, \
NTAPI, \
NtDeviceIoControlFile, \
(HANDLE FileHandle, \
HANDLE Event, \
PIO_APC_ROUTINE ApcRoutine, \
PVOID ApcContext, \
PIO_STATUS_BLOCK IoStatusBlock, \
ULONG IoControlCode, \
PVOID InputBuffer, \
ULONG InputBufferLength, \
PVOID OutputBuffer, \
ULONG OutputBufferLength)) \
\
X(ULONG, WINAPI, RtlNtStatusToDosError, (NTSTATUS Status)) \
\
X(NTSTATUS, \
NTAPI, \
NtCreateKeyedEvent, \
(PHANDLE handle, \
ACCESS_MASK access, \
POBJECT_ATTRIBUTES attr, \
ULONG flags)) \
\
X(NTSTATUS, \
NTAPI, \
NtWaitForKeyedEvent, \
(HANDLE handle, PVOID key, BOOLEAN alertable, PLARGE_INTEGER mstimeout)) \
\
X(NTSTATUS, \
NTAPI, \
NtReleaseKeyedEvent, \
(HANDLE handle, PVOID key, BOOLEAN alertable, PLARGE_INTEGER mstimeout))
#define X(return_type, attributes, name, parameters) \
WEPOLL_INTERNAL_EXTERN return_type(attributes* name) parameters;
NTDLL_IMPORT_LIST(X)
#undef X
#endif /* WEPOLL_NT_H_ */

View File

@ -1,75 +0,0 @@
#ifndef WEPOLL_NTSTATUS_H_
#define WEPOLL_NTSTATUS_H_
#include "win.h"
#ifndef _NTDEF_
typedef LONG NTSTATUS;
typedef NTSTATUS* PNTSTATUS;
#endif
#ifndef NT_SUCCESS
#define NT_SUCCESS(status) (((NTSTATUS)(status)) >= 0)
#endif
#ifndef STATUS_SUCCESS
#define STATUS_SUCCESS ((NTSTATUS) 0x00000000L)
#endif
#ifndef STATUS_WAIT_0
#define STATUS_WAIT_0 ((NTSTATUS) 0x00000000L)
#endif
#ifndef STATUS_WAIT_1
#define STATUS_WAIT_1 ((NTSTATUS) 0x00000001L)
#endif
#ifndef STATUS_WAIT_2
#define STATUS_WAIT_2 ((NTSTATUS) 0x00000002L)
#endif
#ifndef STATUS_WAIT_3
#define STATUS_WAIT_3 ((NTSTATUS) 0x00000003L)
#endif
#ifndef STATUS_WAIT_63
#define STATUS_WAIT_63 ((NTSTATUS) 0x0000003FL)
#endif
#ifndef STATUS_ABANDONED
#define STATUS_ABANDONED ((NTSTATUS) 0x00000080L)
#endif
#ifndef STATUS_ABANDONED_WAIT_0
#define STATUS_ABANDONED_WAIT_0 ((NTSTATUS) 0x00000080L)
#endif
#ifndef STATUS_ABANDONED_WAIT_63
#define STATUS_ABANDONED_WAIT_63 ((NTSTATUS) 0x000000BFL)
#endif
#ifndef STATUS_USER_APC
#define STATUS_USER_APC ((NTSTATUS) 0x000000C0L)
#endif
#ifndef STATUS_KERNEL_APC
#define STATUS_KERNEL_APC ((NTSTATUS) 0x00000100L)
#endif
#ifndef STATUS_ALERTED
#define STATUS_ALERTED ((NTSTATUS) 0x00000101L)
#endif
#ifndef STATUS_TIMEOUT
#define STATUS_TIMEOUT ((NTSTATUS) 0x00000102L)
#endif
#ifndef STATUS_PENDING
#define STATUS_PENDING ((NTSTATUS) 0x00000103L)
#endif
#ifndef STATUS_CANCELLED
#define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L)
#endif
#endif /* WEPOLL_NTSTATUS_H_ */

View File

@ -1,139 +0,0 @@
#include <assert.h>
#include <malloc.h>
#include "error.h"
#include "poll-group.h"
#include "port.h"
#include "util.h"
#include "win.h"
static const size_t _POLL_GROUP_MAX_SIZE = 32;
typedef struct poll_group_allocator {
ep_port_t* port_info;
queue_t poll_group_queue;
WSAPROTOCOL_INFOW protocol_info;
} poll_group_allocator_t;
typedef struct poll_group {
poll_group_allocator_t* allocator;
queue_node_t queue_node;
SOCKET socket;
size_t group_size;
} poll_group_t;
static int _poll_group_create_socket(poll_group_t* poll_group,
WSAPROTOCOL_INFOW* protocol_info,
HANDLE iocp) {
SOCKET socket;
socket = WSASocketW(protocol_info->iAddressFamily,
protocol_info->iSocketType,
protocol_info->iProtocol,
protocol_info,
0,
WSA_FLAG_OVERLAPPED);
if (socket == INVALID_SOCKET)
return_error(-1);
if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
goto error;
if (CreateIoCompletionPort((HANDLE) socket, iocp, 0, 0) == NULL)
goto error;
poll_group->socket = socket;
return 0;
error:;
DWORD error = GetLastError();
closesocket(socket);
return_error(-1, error);
}
static poll_group_t* _poll_group_new(poll_group_allocator_t* pga) {
poll_group_t* poll_group = malloc(sizeof *poll_group);
if (poll_group == NULL)
return_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
memset(poll_group, 0, sizeof *poll_group);
queue_node_init(&poll_group->queue_node);
poll_group->allocator = pga;
if (_poll_group_create_socket(
poll_group, &pga->protocol_info, pga->port_info->iocp) < 0) {
free(poll_group);
return NULL;
}
queue_append(&pga->poll_group_queue, &poll_group->queue_node);
return poll_group;
}
static void _poll_group_delete(poll_group_t* poll_group) {
assert(poll_group->group_size == 0);
closesocket(poll_group->socket);
queue_remove(&poll_group->queue_node);
free(poll_group);
}
SOCKET poll_group_get_socket(poll_group_t* poll_group) {
return poll_group->socket;
}
poll_group_allocator_t* poll_group_allocator_new(
ep_port_t* port_info, const WSAPROTOCOL_INFOW* protocol_info) {
poll_group_allocator_t* pga = malloc(sizeof *pga);
if (pga == NULL)
return_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
queue_init(&pga->poll_group_queue);
pga->port_info = port_info;
pga->protocol_info = *protocol_info;
return pga;
}
void poll_group_allocator_delete(poll_group_allocator_t* pga) {
queue_t* poll_group_queue = &pga->poll_group_queue;
while (!queue_empty(poll_group_queue)) {
queue_node_t* queue_node = queue_first(poll_group_queue);
poll_group_t* poll_group =
container_of(queue_node, poll_group_t, queue_node);
_poll_group_delete(poll_group);
}
free(pga);
}
poll_group_t* poll_group_acquire(poll_group_allocator_t* pga) {
queue_t* queue = &pga->poll_group_queue;
poll_group_t* poll_group =
!queue_empty(queue)
? container_of(queue_last(queue), poll_group_t, queue_node)
: NULL;
if (poll_group == NULL || poll_group->group_size >= _POLL_GROUP_MAX_SIZE)
poll_group = _poll_group_new(pga);
if (poll_group == NULL)
return NULL;
if (++poll_group->group_size == _POLL_GROUP_MAX_SIZE)
queue_move_first(&pga->poll_group_queue, &poll_group->queue_node);
return poll_group;
}
void poll_group_release(poll_group_t* poll_group) {
poll_group_allocator_t* pga = poll_group->allocator;
poll_group->group_size--;
assert(poll_group->group_size < _POLL_GROUP_MAX_SIZE);
queue_move_last(&pga->poll_group_queue, &poll_group->queue_node);
/* TODO: free the poll_group_t* item at some point. */
}

View File

@ -1,22 +0,0 @@
#ifndef WEPOLL_POLL_GROUP_H_
#define WEPOLL_POLL_GROUP_H_
#include "error.h"
#include "internal.h"
#include "queue.h"
#include "win.h"
typedef struct ep_port ep_port_t;
typedef struct poll_group_allocator poll_group_allocator_t;
typedef struct poll_group poll_group_t;
WEPOLL_INTERNAL poll_group_allocator_t* poll_group_allocator_new(
ep_port_t* port_info, const WSAPROTOCOL_INFOW* protocol_info);
WEPOLL_INTERNAL void poll_group_allocator_delete(poll_group_allocator_t* pga);
WEPOLL_INTERNAL poll_group_t* poll_group_acquire(poll_group_allocator_t* pga);
WEPOLL_INTERNAL void poll_group_release(poll_group_t* ds);
WEPOLL_INTERNAL SOCKET poll_group_get_socket(poll_group_t* poll_group);
#endif /* WEPOLL_POLL_GROUP_H_ */

View File

@ -1,393 +0,0 @@
#include <assert.h>
#include <malloc.h>
#include <stdlib.h>
#include "error.h"
#include "poll-group.h"
#include "port.h"
#include "queue.h"
#include "sock.h"
#include "tree.h"
#include "util.h"
#include "wepoll.h"
#include "win.h"
#define _PORT_MAX_ON_STACK_COMPLETIONS 256
static ep_port_t* _ep_port_alloc(void) {
ep_port_t* port_info = malloc(sizeof *port_info);
if (port_info == NULL)
return_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
return port_info;
}
static void _ep_port_free(ep_port_t* port) {
assert(port != NULL);
free(port);
}
ep_port_t* ep_port_new(HANDLE* iocp_out) {
ep_port_t* port_info;
HANDLE iocp;
port_info = _ep_port_alloc();
if (port_info == NULL)
goto err1;
iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
if (iocp == INVALID_HANDLE_VALUE)
goto err2;
memset(port_info, 0, sizeof *port_info);
port_info->iocp = iocp;
queue_init(&port_info->update_queue);
tree_init(&port_info->sock_tree);
reflock_tree_node_init(&port_info->handle_tree_node);
InitializeCriticalSection(&port_info->lock);
*iocp_out = iocp;
return port_info;
err2:
_ep_port_free(port_info);
err1:
return NULL;
}
static int _ep_port_close_iocp(ep_port_t* port_info) {
HANDLE iocp = port_info->iocp;
port_info->iocp = NULL;
if (!CloseHandle(iocp))
return_error(-1);
return 0;
}
int ep_port_close(ep_port_t* port_info) {
int result;
EnterCriticalSection(&port_info->lock);
result = _ep_port_close_iocp(port_info);
LeaveCriticalSection(&port_info->lock);
return result;
}
int ep_port_delete(ep_port_t* port_info) {
tree_node_t* tree_node;
EnterCriticalSection(&port_info->lock);
if (port_info->iocp != NULL)
_ep_port_close_iocp(port_info);
while ((tree_node = tree_root(&port_info->sock_tree)) != NULL) {
ep_sock_t* sock_info = container_of(tree_node, ep_sock_t, tree_node);
ep_sock_force_delete(port_info, sock_info);
}
for (size_t i = 0; i < array_count(port_info->poll_group_allocators); i++) {
poll_group_allocator_t* pga = port_info->poll_group_allocators[i];
if (pga != NULL)
poll_group_allocator_delete(pga);
}
LeaveCriticalSection(&port_info->lock);
DeleteCriticalSection(&port_info->lock);
_ep_port_free(port_info);
return 0;
}
static int _ep_port_update_events(ep_port_t* port_info) {
queue_t* update_queue = &port_info->update_queue;
/* Walk the queue, submitting new poll requests for every socket that needs
* it. */
while (!queue_empty(update_queue)) {
queue_node_t* queue_node = queue_first(update_queue);
ep_sock_t* sock_info = container_of(queue_node, ep_sock_t, queue_node);
if (ep_sock_update(port_info, sock_info) < 0)
return -1;
/* ep_sock_update() removes the socket from the update list if
* successfull. */
}
return 0;
}
static void _ep_port_update_events_if_polling(ep_port_t* port_info) {
if (port_info->active_poll_count > 0)
_ep_port_update_events(port_info);
}
static int _ep_port_feed_events(ep_port_t* port_info,
struct epoll_event* epoll_events,
OVERLAPPED_ENTRY* iocp_events,
int iocp_event_count) {
int epoll_event_count = 0;
for (int i = 0; i < iocp_event_count; i++) {
OVERLAPPED* overlapped = iocp_events[i].lpOverlapped;
struct epoll_event* ev = &epoll_events[epoll_event_count];
epoll_event_count += ep_sock_feed_event(port_info, overlapped, ev);
}
return epoll_event_count;
}
static int _ep_port_poll(ep_port_t* port_info,
struct epoll_event* epoll_events,
OVERLAPPED_ENTRY* iocp_events,
int maxevents,
DWORD timeout) {
ULONG completion_count;
if (_ep_port_update_events(port_info) < 0)
return -1;
port_info->active_poll_count++;
LeaveCriticalSection(&port_info->lock);
BOOL r = GetQueuedCompletionStatusEx(port_info->iocp,
iocp_events,
maxevents,
&completion_count,
timeout,
FALSE);
EnterCriticalSection(&port_info->lock);
port_info->active_poll_count--;
if (!r)
return_error(-1);
return _ep_port_feed_events(
port_info, epoll_events, iocp_events, completion_count);
}
int ep_port_wait(ep_port_t* port_info,
struct epoll_event* events,
int maxevents,
int timeout) {
OVERLAPPED_ENTRY stack_iocp_events[_PORT_MAX_ON_STACK_COMPLETIONS];
OVERLAPPED_ENTRY* iocp_events;
ULONGLONG due = 0;
DWORD gqcs_timeout;
int result;
/* Check whether `maxevents` is in range. */
if (maxevents <= 0)
return_error(-1, ERROR_INVALID_PARAMETER);
/* Decide whether the IOCP completion list can live on the stack, or allocate
* memory for it on the heap. */
if ((size_t) maxevents <= array_count(stack_iocp_events)) {
iocp_events = stack_iocp_events;
} else if ((iocp_events = malloc(maxevents * sizeof *iocp_events)) == NULL) {
iocp_events = stack_iocp_events;
maxevents = array_count(stack_iocp_events);
}
/* Compute the timeout for GetQueuedCompletionStatus, and the wait end
* time, if the user specified a timeout other than zero or infinite.
*/
if (timeout > 0) {
due = GetTickCount64() + timeout;
gqcs_timeout = (DWORD) timeout;
} else if (timeout == 0) {
gqcs_timeout = 0;
} else {
gqcs_timeout = INFINITE;
}
EnterCriticalSection(&port_info->lock);
/* Dequeue completion packets until either at least one interesting event
* has been discovered, or the timeout is reached.
*/
do {
ULONGLONG now;
result =
_ep_port_poll(port_info, events, iocp_events, maxevents, gqcs_timeout);
if (result < 0 || result > 0)
break; /* Result, error, or time-out. */
if (timeout < 0)
continue; /* _ep_port_wait() never times out. */
/* Check for time-out. */
now = GetTickCount64();
if (now >= due)
break;
/* Recompute timeout. */
gqcs_timeout = (DWORD)(due - now);
} while (gqcs_timeout > 0);
_ep_port_update_events_if_polling(port_info);
LeaveCriticalSection(&port_info->lock);
if (iocp_events != stack_iocp_events)
free(iocp_events);
if (result >= 0)
return result;
else if (GetLastError() == WAIT_TIMEOUT)
return 0;
else
return -1;
}
static int _ep_port_ctl_add(ep_port_t* port_info,
SOCKET sock,
struct epoll_event* ev) {
ep_sock_t* sock_info = ep_sock_new(port_info, sock);
if (sock_info == NULL)
return -1;
if (ep_sock_set_event(port_info, sock_info, ev) < 0) {
ep_sock_delete(port_info, sock_info);
return -1;
}
_ep_port_update_events_if_polling(port_info);
return 0;
}
static int _ep_port_ctl_mod(ep_port_t* port_info,
SOCKET sock,
struct epoll_event* ev) {
ep_sock_t* sock_info = ep_port_find_socket(port_info, sock);
if (sock_info == NULL)
return -1;
if (ep_sock_set_event(port_info, sock_info, ev) < 0)
return -1;
_ep_port_update_events_if_polling(port_info);
return 0;
}
static int _ep_port_ctl_del(ep_port_t* port_info, SOCKET sock) {
ep_sock_t* sock_info = ep_port_find_socket(port_info, sock);
if (sock_info == NULL)
return -1;
ep_sock_delete(port_info, sock_info);
return 0;
}
static int _ep_port_ctl_op(ep_port_t* port_info,
int op,
SOCKET sock,
struct epoll_event* ev) {
switch (op) {
case EPOLL_CTL_ADD:
return _ep_port_ctl_add(port_info, sock, ev);
case EPOLL_CTL_MOD:
return _ep_port_ctl_mod(port_info, sock, ev);
case EPOLL_CTL_DEL:
return _ep_port_ctl_del(port_info, sock);
default:
return_error(-1, ERROR_INVALID_PARAMETER);
}
}
int ep_port_ctl(ep_port_t* port_info,
int op,
SOCKET sock,
struct epoll_event* ev) {
int result;
EnterCriticalSection(&port_info->lock);
result = _ep_port_ctl_op(port_info, op, sock, ev);
LeaveCriticalSection(&port_info->lock);
return result;
}
int ep_port_add_socket(ep_port_t* port_info,
ep_sock_t* sock_info,
SOCKET socket) {
if (tree_add(&port_info->sock_tree, &sock_info->tree_node, socket) < 0)
return_error(-1, ERROR_ALREADY_EXISTS);
return 0;
}
int ep_port_del_socket(ep_port_t* port_info, ep_sock_t* sock_info) {
if (tree_del(&port_info->sock_tree, &sock_info->tree_node) < 0)
return_error(-1, ERROR_NOT_FOUND);
return 0;
}
ep_sock_t* ep_port_find_socket(ep_port_t* port_info, SOCKET socket) {
ep_sock_t* sock_info = safe_container_of(
tree_find(&port_info->sock_tree, socket), ep_sock_t, tree_node);
if (sock_info == NULL)
return_error(NULL, ERROR_NOT_FOUND);
return sock_info;
}
static poll_group_allocator_t* _ep_port_get_poll_group_allocator(
ep_port_t* port_info,
size_t protocol_id,
const WSAPROTOCOL_INFOW* protocol_info) {
poll_group_allocator_t** pga;
assert(protocol_id < array_count(port_info->poll_group_allocators));
pga = &port_info->poll_group_allocators[protocol_id];
if (*pga == NULL)
*pga = poll_group_allocator_new(port_info, protocol_info);
return *pga;
}
poll_group_t* ep_port_acquire_poll_group(
ep_port_t* port_info,
size_t protocol_id,
const WSAPROTOCOL_INFOW* protocol_info) {
poll_group_allocator_t* pga =
_ep_port_get_poll_group_allocator(port_info, protocol_id, protocol_info);
return poll_group_acquire(pga);
}
void ep_port_release_poll_group(poll_group_t* poll_group) {
poll_group_release(poll_group);
}
void ep_port_request_socket_update(ep_port_t* port_info,
ep_sock_t* sock_info) {
if (ep_port_is_socket_update_pending(port_info, sock_info))
return;
queue_append(&port_info->update_queue, &sock_info->queue_node);
assert(ep_port_is_socket_update_pending(port_info, sock_info));
}
void ep_port_clear_socket_update(ep_port_t* port_info, ep_sock_t* sock_info) {
if (!ep_port_is_socket_update_pending(port_info, sock_info))
return;
queue_remove(&sock_info->queue_node);
}
bool ep_port_is_socket_update_pending(ep_port_t* port_info,
ep_sock_t* sock_info) {
unused(port_info);
return queue_enqueued(&sock_info->queue_node);
}

View File

@ -1,64 +0,0 @@
#ifndef WEPOLL_PORT_H_
#define WEPOLL_PORT_H_
#include "afd.h"
#include "internal.h"
#include "poll-group.h"
#include "queue.h"
#include "rb.h"
#include "reflock-tree.h"
#include "sock.h"
#include "tree.h"
#include "util.h"
#include "win.h"
typedef struct ep_port ep_port_t;
typedef struct ep_sock ep_sock_t;
typedef struct ep_port {
HANDLE iocp;
poll_group_allocator_t*
poll_group_allocators[array_count(AFD_PROVIDER_GUID_LIST)];
tree_t sock_tree;
queue_t update_queue;
reflock_tree_node_t handle_tree_node;
CRITICAL_SECTION lock;
size_t active_poll_count;
} ep_port_t;
WEPOLL_INTERNAL ep_port_t* ep_port_new(HANDLE* iocp_out);
WEPOLL_INTERNAL int ep_port_close(ep_port_t* port_info);
WEPOLL_INTERNAL int ep_port_delete(ep_port_t* port_info);
WEPOLL_INTERNAL int ep_port_wait(ep_port_t* port_info,
struct epoll_event* events,
int maxevents,
int timeout);
WEPOLL_INTERNAL int ep_port_ctl(ep_port_t* port_info,
int op,
SOCKET sock,
struct epoll_event* ev);
WEPOLL_INTERNAL poll_group_t* ep_port_acquire_poll_group(
ep_port_t* port_info,
size_t protocol_id,
const WSAPROTOCOL_INFOW* protocol_info);
WEPOLL_INTERNAL void ep_port_release_poll_group(poll_group_t* poll_group);
WEPOLL_INTERNAL int ep_port_add_socket(ep_port_t* port_info,
ep_sock_t* sock_info,
SOCKET socket);
WEPOLL_INTERNAL int ep_port_del_socket(ep_port_t* port_info,
ep_sock_t* sock_info);
WEPOLL_INTERNAL ep_sock_t* ep_port_find_socket(ep_port_t* port_info,
SOCKET socket);
WEPOLL_INTERNAL void ep_port_request_socket_update(ep_port_t* port_info,
ep_sock_t* sock_info);
WEPOLL_INTERNAL void ep_port_clear_socket_update(ep_port_t* port_info,
ep_sock_t* sock_info);
WEPOLL_INTERNAL bool ep_port_is_socket_update_pending(ep_port_t* port_info,
ep_sock_t* sock_info);
#endif /* WEPOLL_PORT_H_ */

View File

@ -1,63 +0,0 @@
#include <stdbool.h>
#include <stdlib.h>
#include "queue.h"
void queue_init(queue_t* queue) {
queue_node_init(&queue->head);
}
void queue_node_init(queue_node_t* node) {
node->prev = node;
node->next = node;
}
static inline void _queue_detach(queue_node_t* node) {
node->prev->next = node->next;
node->next->prev = node->prev;
}
queue_node_t* queue_first(const queue_t* queue) {
return !queue_empty(queue) ? queue->head.next : NULL;
}
queue_node_t* queue_last(const queue_t* queue) {
return !queue_empty(queue) ? queue->head.prev : NULL;
}
void queue_prepend(queue_t* queue, queue_node_t* node) {
node->next = queue->head.next;
node->prev = &queue->head;
node->next->prev = node;
queue->head.next = node;
}
void queue_append(queue_t* queue, queue_node_t* node) {
node->next = &queue->head;
node->prev = queue->head.prev;
node->prev->next = node;
queue->head.prev = node;
}
void queue_move_first(queue_t* queue, queue_node_t* node) {
_queue_detach(node);
queue_prepend(queue, node);
}
void queue_move_last(queue_t* queue, queue_node_t* node) {
_queue_detach(node);
queue_append(queue, node);
}
void queue_remove(queue_node_t* node) {
_queue_detach(node);
queue_node_init(node);
}
bool queue_empty(const queue_t* queue) {
return !queue_enqueued(&queue->head);
}
bool queue_enqueued(const queue_node_t* node) {
return node->prev != node;
}

View File

@ -1,34 +0,0 @@
#ifndef WEPOLL_QUEUE_H_
#define WEPOLL_QUEUE_H_
#include <stdbool.h>
#include "internal.h"
typedef struct queue_node queue_node_t;
typedef struct queue_node {
queue_node_t* prev;
queue_node_t* next;
} queue_node_t;
typedef struct queue {
queue_node_t head;
} queue_t;
WEPOLL_INTERNAL void queue_init(queue_t* queue);
WEPOLL_INTERNAL void queue_node_init(queue_node_t* node);
WEPOLL_INTERNAL queue_node_t* queue_first(const queue_t* queue);
WEPOLL_INTERNAL queue_node_t* queue_last(const queue_t* queue);
WEPOLL_INTERNAL void queue_prepend(queue_t* queue, queue_node_t* node);
WEPOLL_INTERNAL void queue_append(queue_t* queue, queue_node_t* node);
WEPOLL_INTERNAL void queue_move_first(queue_t* queue, queue_node_t* node);
WEPOLL_INTERNAL void queue_move_last(queue_t* queue, queue_node_t* node);
WEPOLL_INTERNAL void queue_remove(queue_node_t* node);
WEPOLL_INTERNAL bool queue_empty(const queue_t* queue);
WEPOLL_INTERNAL bool queue_enqueued(const queue_node_t* node);
#endif /* WEPOLL_QUEUE_H_ */

501
src/rb.h
View File

@ -1,501 +0,0 @@
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#ifndef WEPOLL_RB_H_
#define WEPOLL_RB_H_
#ifdef __clang__
#define RB_UNUSED __attribute__((__unused__))
#else
#define RB_UNUSED /* empty */
#endif
/* clang-format off */
/* Macros that define a red-black tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
} while (0)
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left nodeent */ \
struct type *rbe_right; /* right nodeent */ \
struct type *rbe_parent; /* parent nodeent */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) (elm)->field.rbe_left
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
#define RB_COLOR(elm, field) (elm)->field.rbe_color
#define RB_ROOT(head) (head)->rbh_root
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (0)
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
} \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
} while (0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
} \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
} while (0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static RB_UNUSED)
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
attr struct type *name##_RB_INSERT(struct name *, struct type *); \
attr struct type *name##_RB_FIND(struct name *, struct type *); \
attr struct type *name##_RB_NFIND(struct name *, struct type *); \
attr struct type *name##_RB_NEXT(struct type *); \
attr struct type *name##_RB_PREV(struct type *); \
attr struct type *name##_RB_MINMAX(struct name *, int); \
\
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp,)
#define RB_GENERATE_STATIC(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp, static RB_UNUSED)
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
attr void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) != NULL && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
} \
\
attr void \
name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \
struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
struct type *oleft; \
if ((oleft = RB_LEFT(tmp, field)) \
!= NULL) \
RB_COLOR(oleft, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, field); \
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_LEFT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
struct type *oright; \
if ((oright = RB_RIGHT(tmp, field)) \
!= NULL) \
RB_COLOR(oright, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright, field); \
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
} \
\
attr struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field)) != NULL) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old) \
RB_LEFT(RB_PARENT(old, field), field) = elm; \
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm; \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return (old); \
} \
\
/* Inserts a node into the RB tree */ \
attr struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return (NULL); \
} \
\
/* Finds the node with the same key as elm */ \
attr struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (NULL); \
} \
\
/* Finds the first node greater than or equal to the search key */ \
attr struct type * \
name##_RB_NFIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *res = NULL; \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) { \
res = tmp; \
tmp = RB_LEFT(tmp, field); \
} \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (res); \
} \
\
attr struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
attr struct type * \
name##_RB_PREV(struct type *elm) \
{ \
if (RB_LEFT(elm, field)) { \
elm = RB_LEFT(elm, field); \
while (RB_RIGHT(elm, field)) \
elm = RB_RIGHT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
attr struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return (parent); \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_PREV(name, x, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#define RB_FOREACH_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_SAFE(x, name, head, y) \
for ((x) = RB_MIN(name, head); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE(x, name, head) \
for ((x) = RB_MAX(name, head); \
(x) != NULL; \
(x) = name##_RB_PREV(x))
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
for ((x) = RB_MAX(name, head); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
/* clang-format on */
#endif /* WEPOLL_RB_H_ */

View File

@ -1,74 +0,0 @@
#include <stdint.h>
#include "reflock-tree.h"
#include "reflock.h"
#include "tree.h"
#include "util.h"
#include "win.h"
void reflock_tree_init(reflock_tree_t* rlt) {
tree_init(&rlt->tree);
InitializeSRWLock(&rlt->lock);
}
void reflock_tree_node_init(reflock_tree_node_t* node) {
tree_node_init(&node->tree_node);
reflock_init(&node->reflock);
}
int reflock_tree_add(reflock_tree_t* rlt,
reflock_tree_node_t* node,
uintptr_t key) {
int r;
AcquireSRWLockExclusive(&rlt->lock);
r = tree_add(&rlt->tree, &node->tree_node, key);
ReleaseSRWLockExclusive(&rlt->lock);
return r;
}
reflock_tree_node_t* reflock_tree_del_and_ref(reflock_tree_t* rlt,
uintptr_t key) {
tree_node_t* tree_node;
reflock_tree_node_t* rlt_node;
AcquireSRWLockExclusive(&rlt->lock);
tree_node = tree_find(&rlt->tree, (uintptr_t) key);
rlt_node = safe_container_of(tree_node, reflock_tree_node_t, tree_node);
if (rlt_node != NULL) {
tree_del(&rlt->tree, tree_node);
reflock_ref(&rlt_node->reflock);
}
ReleaseSRWLockExclusive(&rlt->lock);
return rlt_node;
}
reflock_tree_node_t* reflock_tree_find_and_ref(reflock_tree_t* rlt,
uintptr_t key) {
tree_node_t* tree_node;
reflock_tree_node_t* rlt_node;
AcquireSRWLockShared(&rlt->lock);
tree_node = tree_find(&rlt->tree, (uintptr_t) key);
rlt_node = safe_container_of(tree_node, reflock_tree_node_t, tree_node);
if (rlt_node != NULL)
reflock_ref(&rlt_node->reflock);
ReleaseSRWLockShared(&rlt->lock);
return rlt_node;
}
void reflock_tree_node_unref(reflock_tree_node_t* node) {
reflock_unref(&node->reflock);
}
void reflock_tree_node_unref_and_destroy(reflock_tree_node_t* node) {
reflock_unref_and_destroy(&node->reflock);
}

View File

@ -1,37 +0,0 @@
#ifndef WEPOLL_REFLOCK_TREE_H_
#define WEPOLL_REFLOCK_TREE_H_
#include <stdint.h>
#include "internal.h"
#include "reflock.h"
#include "tree.h"
#include "win.h"
typedef struct reflock_tree {
tree_t tree;
SRWLOCK lock;
} reflock_tree_t;
typedef struct reflock_tree_node {
tree_node_t tree_node;
reflock_t reflock;
} reflock_tree_node_t;
WEPOLL_INTERNAL void reflock_tree_init(reflock_tree_t* rtl);
WEPOLL_INTERNAL void reflock_tree_node_init(reflock_tree_node_t* node);
WEPOLL_INTERNAL int reflock_tree_add(reflock_tree_t* rlt,
reflock_tree_node_t* node,
uintptr_t key);
WEPOLL_INTERNAL reflock_tree_node_t* reflock_tree_del_and_ref(
reflock_tree_t* rlt, uintptr_t key);
WEPOLL_INTERNAL reflock_tree_node_t* reflock_tree_find_and_ref(
reflock_tree_t* rlt, uintptr_t key);
WEPOLL_INTERNAL void reflock_tree_node_unref(reflock_tree_node_t* node);
WEPOLL_INTERNAL void reflock_tree_node_unref_and_destroy(
reflock_tree_node_t* node);
#endif /* REFLOCK_TREE_H_ */

View File

@ -1,97 +0,0 @@
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include "error.h"
#include "nt.h"
#include "reflock.h"
#include "util.h"
#include "win.h"
/* clang-format off */
static const uint32_t _REF = 0x00000001;
static const uint32_t _REF_MASK = 0x0fffffff;
static const uint32_t _DESTROY = 0x10000000;
static const uint32_t _DESTROY_MASK = 0xf0000000;
static const uint32_t _POISON = 0x300DEAD0;
/* clang-format on */
static HANDLE _keyed_event = NULL;
int reflock_global_init(void) {
NTSTATUS status =
NtCreateKeyedEvent(&_keyed_event, ~(ACCESS_MASK) 0, NULL, 0);
if (status != STATUS_SUCCESS)
return_error(-1, RtlNtStatusToDosError(status));
return 0;
}
void reflock_init(reflock_t* reflock) {
reflock->state = 0;
}
static void _signal_event(const void* address) {
NTSTATUS status =
NtReleaseKeyedEvent(_keyed_event, (PVOID) address, FALSE, NULL);
if (status != STATUS_SUCCESS)
abort();
}
static void _await_event(const void* address) {
NTSTATUS status =
NtWaitForKeyedEvent(_keyed_event, (PVOID) address, FALSE, NULL);
if (status != STATUS_SUCCESS)
abort();
}
static inline uint32_t _sync_add_and_fetch(volatile uint32_t* target,
uint32_t value) {
static_assert(sizeof(*target) == sizeof(long), "");
return InterlockedAdd((volatile long*) target, value);
}
static inline uint32_t _sync_sub_and_fetch(volatile uint32_t* target,
uint32_t value) {
uint32_t add_value = -(int32_t) value;
return _sync_add_and_fetch(target, add_value);
}
static inline uint32_t _sync_fetch_and_set(volatile uint32_t* target,
uint32_t value) {
static_assert(sizeof(*target) == sizeof(long), "");
return InterlockedExchange((volatile long*) target, value);
}
void reflock_ref(reflock_t* reflock) {
uint32_t state = _sync_add_and_fetch(&reflock->state, _REF);
unused(state);
assert((state & _DESTROY_MASK) == 0); /* Overflow or destroyed. */
}
void reflock_unref(reflock_t* reflock) {
uint32_t state = _sync_sub_and_fetch(&reflock->state, _REF);
uint32_t ref_count = state & _REF_MASK;
uint32_t destroy = state & _DESTROY_MASK;
unused(ref_count);
unused(destroy);
if (state == _DESTROY)
_signal_event(reflock);
else
assert(destroy == 0 || ref_count > 0);
}
void reflock_unref_and_destroy(reflock_t* reflock) {
uint32_t state = _sync_add_and_fetch(&reflock->state, _DESTROY - _REF);
uint32_t ref_count = state & _REF_MASK;
assert((state & _DESTROY_MASK) ==
_DESTROY); /* Underflow or already destroyed. */
if (ref_count != 0)
_await_event(reflock);
state = _sync_fetch_and_set(&reflock->state, _POISON);
assert(state == _DESTROY);
}

View File

@ -1,37 +0,0 @@
#ifndef WEPOLL_REFLOCK_H_
#define WEPOLL_REFLOCK_H_
/* The reflock is a special kind of lock that normally prevents a chunk of
* memory from being freed, but does allow the chunk of memory to eventually be
* released in a coordinated fashion.
*
* Under normal operation, threads increase and decrease the reference count,
* which are wait-free operations.
*
* Exactly once during the reflock's lifecycle, a thread holding a reference to
* the lock may "destroy" the lock; this operation blocks until all other
* threads holding a reference to the lock have dereferenced it. After
* "destroy" returns, the calling thread may assume that no other threads have
* a reference to the lock.
*
* Attemmpting to lock or destroy a lock after reflock_unref_and_destroy() has
* been called is invalid and results in undefined behavior. Therefore the user
* should use another lock to guarantee that this can't happen.
*/
#include <stdint.h>
#include "internal.h"
typedef struct reflock {
uint32_t state;
} reflock_t;
WEPOLL_INTERNAL int reflock_global_init(void);
WEPOLL_INTERNAL void reflock_init(reflock_t* reflock);
WEPOLL_INTERNAL void reflock_ref(reflock_t* reflock);
WEPOLL_INTERNAL void reflock_unref(reflock_t* reflock);
WEPOLL_INTERNAL void reflock_unref_and_destroy(reflock_t* reflock);
#endif /* WEPOLL_REFLOCK_H_ */

View File

@ -1,9 +0,0 @@
#ifndef WEPOLL_INTERNAL_H_
#define WEPOLL_INTERNAL_H_
#ifndef WEPOLL_INTERNAL
#define WEPOLL_INTERNAL
#define WEPOLL_INTERNAL_EXTERN extern
#endif
#endif /* WEPOLL_INTERNAL_H_ */

View File

@ -1,356 +0,0 @@
#include <assert.h>
#include <malloc.h>
#include <stdbool.h>
#include <stdint.h>
#include "afd.h"
#include "error.h"
#include "poll-group.h"
#include "port.h"
#include "sock.h"
#include "wepoll.h"
#define _EP_EVENT_MASK 0xffff
typedef struct _poll_req {
OVERLAPPED overlapped;
AFD_POLL_INFO poll_info;
} _poll_req_t;
typedef enum _poll_status {
_POLL_IDLE = 0,
_POLL_PENDING,
_POLL_CANCELLED
} _poll_status_t;
typedef struct _ep_sock_private {
ep_sock_t pub;
_poll_req_t poll_req;
poll_group_t* poll_group;
SOCKET afd_socket;
epoll_data_t user_data;
uint32_t user_events;
uint32_t pending_events;
_poll_status_t poll_status;
unsigned deleted : 1;
} _ep_sock_private_t;
static DWORD _epoll_events_to_afd_events(uint32_t epoll_events) {
DWORD afd_events;
/* These events should always be monitored. */
assert(epoll_events & EPOLLERR);
assert(epoll_events & EPOLLHUP);
afd_events = AFD_POLL_ABORT | AFD_POLL_CONNECT_FAIL | AFD_POLL_LOCAL_CLOSE;
if (epoll_events & (EPOLLIN | EPOLLRDNORM))
afd_events |= AFD_POLL_RECEIVE | AFD_POLL_ACCEPT;
if (epoll_events & (EPOLLPRI | EPOLLRDBAND))
afd_events |= AFD_POLL_RECEIVE_EXPEDITED;
if (epoll_events & (EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND))
afd_events |= AFD_POLL_SEND | AFD_POLL_CONNECT;
if (epoll_events & (EPOLLIN | EPOLLRDNORM | EPOLLRDHUP))
afd_events |= AFD_POLL_DISCONNECT;
return afd_events;
}
static uint32_t _afd_events_to_epoll_events(DWORD afd_events) {
uint32_t epoll_events = 0;
if (afd_events & (AFD_POLL_RECEIVE | AFD_POLL_ACCEPT))
epoll_events |= EPOLLIN | EPOLLRDNORM;
if (afd_events & AFD_POLL_RECEIVE_EXPEDITED)
epoll_events |= EPOLLPRI | EPOLLRDBAND;
if (afd_events & (AFD_POLL_SEND | AFD_POLL_CONNECT))
epoll_events |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
if (afd_events & AFD_POLL_DISCONNECT)
epoll_events |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
if (afd_events & AFD_POLL_ABORT)
epoll_events |= EPOLLHUP;
if (afd_events & AFD_POLL_CONNECT_FAIL)
epoll_events |= EPOLLERR;
return epoll_events;
}
static int _poll_req_submit(_poll_req_t* poll_req,
uint32_t epoll_events,
SOCKET socket,
SOCKET driver_socket) {
int r;
memset(&poll_req->overlapped, 0, sizeof poll_req->overlapped);
poll_req->poll_info.Exclusive = FALSE;
poll_req->poll_info.NumberOfHandles = 1;
poll_req->poll_info.Timeout.QuadPart = INT64_MAX;
poll_req->poll_info.Handles[0].Handle = (HANDLE) socket;
poll_req->poll_info.Handles[0].Status = 0;
poll_req->poll_info.Handles[0].Events =
_epoll_events_to_afd_events(epoll_events);
r = afd_poll(driver_socket, &poll_req->poll_info, &poll_req->overlapped);
if (r != 0 && GetLastError() != ERROR_IO_PENDING)
return_error(-1);
return 0;
}
static int _poll_req_cancel(_poll_req_t* poll_req, SOCKET driver_socket) {
OVERLAPPED* overlapped = &poll_req->overlapped;
if (!CancelIoEx((HANDLE) driver_socket, overlapped)) {
DWORD error = GetLastError();
if (error == ERROR_NOT_FOUND)
return 0; /* Already completed or canceled. */
else
return_error(-1);
}
return 0;
}
static void _poll_req_complete(const _poll_req_t* poll_req,
uint32_t* epoll_events_out,
bool* socket_closed_out) {
const OVERLAPPED* overlapped = &poll_req->overlapped;
uint32_t epoll_events = 0;
bool socket_closed = false;
if ((NTSTATUS) overlapped->Internal == STATUS_CANCELLED) {
/* The poll request was cancelled by CancelIoEx. */
} else if (!NT_SUCCESS(overlapped->Internal)) {
/* The overlapped request itself failed in an unexpected way. */
epoll_events = EPOLLERR;
} else if (poll_req->poll_info.NumberOfHandles < 1) {
/* This overlapped request succeeded but didn't report any events. */
} else {
/* Events related to our socket were reported. */
DWORD afd_events = poll_req->poll_info.Handles[0].Events;
if (afd_events & AFD_POLL_LOCAL_CLOSE)
socket_closed = true; /* Socket closed locally be silently dropped. */
else
epoll_events = _afd_events_to_epoll_events(afd_events);
}
*epoll_events_out = epoll_events;
*socket_closed_out = socket_closed;
}
static inline _ep_sock_private_t* _ep_sock_private(ep_sock_t* sock_info) {
return container_of(sock_info, _ep_sock_private_t, pub);
}
static inline _ep_sock_private_t* _ep_sock_alloc(void) {
_ep_sock_private_t* sock_private = malloc(sizeof *sock_private);
if (sock_private == NULL)
return_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
return sock_private;
}
static inline void _ep_sock_free(_ep_sock_private_t* sock_private) {
assert(sock_private->poll_status == _POLL_IDLE);
free(sock_private);
}
ep_sock_t* ep_sock_new(ep_port_t* port_info, SOCKET socket) {
SOCKET afd_socket;
ssize_t protocol_id;
WSAPROTOCOL_INFOW protocol_info;
poll_group_t* poll_group;
_ep_sock_private_t* sock_private;
if (socket == 0 || socket == INVALID_SOCKET)
return_error(NULL, ERROR_INVALID_HANDLE);
protocol_id = afd_get_protocol(socket, &afd_socket, &protocol_info);
if (protocol_id < 0)
return NULL;
poll_group =
ep_port_acquire_poll_group(port_info, protocol_id, &protocol_info);
if (poll_group == NULL)
return NULL;
sock_private = _ep_sock_alloc();
if (sock_private == NULL)
goto err1;
memset(sock_private, 0, sizeof *sock_private);
sock_private->afd_socket = afd_socket;
sock_private->poll_group = poll_group;
tree_node_init(&sock_private->pub.tree_node);
queue_node_init(&sock_private->pub.queue_node);
if (ep_port_add_socket(port_info, &sock_private->pub, socket) < 0)
goto err2;
return &sock_private->pub;
err2:
_ep_sock_free(sock_private);
err1:
ep_port_release_poll_group(poll_group);
return NULL;
}
void ep_sock_delete(ep_port_t* port_info, ep_sock_t* sock_info) {
_ep_sock_private_t* sock_private = _ep_sock_private(sock_info);
assert(!sock_private->deleted);
sock_private->deleted = true;
if (sock_private->poll_status == _POLL_PENDING) {
_poll_req_cancel(&sock_private->poll_req,
poll_group_get_socket(sock_private->poll_group));
sock_private->poll_status = _POLL_CANCELLED;
sock_private->pending_events = 0;
}
ep_port_del_socket(port_info, sock_info);
ep_port_clear_socket_update(port_info, sock_info);
ep_port_release_poll_group(sock_private->poll_group);
sock_private->poll_group = NULL;
/* If the poll request still needs to complete, the ep_sock object can't
* be free()d yet. `ep_sock_feed_event` will take care of this later. */
if (sock_private->poll_status == _POLL_IDLE)
_ep_sock_free(sock_private);
}
void ep_sock_force_delete(ep_port_t* port_info, ep_sock_t* sock_info) {
_ep_sock_private_t* sock_private = _ep_sock_private(sock_info);
sock_private->poll_status = _POLL_IDLE;
ep_sock_delete(port_info, sock_info);
}
int ep_sock_set_event(ep_port_t* port_info,
ep_sock_t* sock_info,
const struct epoll_event* ev) {
_ep_sock_private_t* sock_private = _ep_sock_private(sock_info);
/* EPOLLERR and EPOLLHUP are always reported, even when no sollicited. */
uint32_t events = ev->events | EPOLLERR | EPOLLHUP;
sock_private->user_events = events;
sock_private->user_data = ev->data;
if ((events & _EP_EVENT_MASK & ~(sock_private->pending_events)) != 0)
ep_port_request_socket_update(port_info, sock_info);
return 0;
}
int ep_sock_update(ep_port_t* port_info, ep_sock_t* sock_info) {
_ep_sock_private_t* sock_private = _ep_sock_private(sock_info);
SOCKET driver_socket = poll_group_get_socket(sock_private->poll_group);
bool broken = false;
assert(ep_port_is_socket_update_pending(port_info, sock_info));
if (sock_private->poll_status == _POLL_PENDING &&
(sock_private->user_events & _EP_EVENT_MASK &
~sock_private->pending_events) == 0) {
/* All the events the user is interested in are already being monitored
* by the pending poll request. It might spuriously complete because of an
* event that we're no longer interested in; if that happens we just
* submit another poll request with the right event mask. */
} else if (sock_private->poll_status == _POLL_PENDING) {
/* A poll request is already pending, but it's not monitoring for all the
* events that the user is interested in. Cancel the pending poll request;
* when it completes it will be submitted again with the correct event
* mask. */
if (_poll_req_cancel(&sock_private->poll_req, driver_socket) < 0)
return -1;
sock_private->poll_status = _POLL_CANCELLED;
sock_private->pending_events = 0;
} else if (sock_private->poll_status == _POLL_CANCELLED) {
/* The poll request has already been cancelled, we're still waiting for it
* to return. For now, there's nothing that needs to be done. */
} else if (sock_private->poll_status == _POLL_IDLE) {
if (_poll_req_submit(&sock_private->poll_req,
sock_private->user_events,
sock_private->afd_socket,
driver_socket) < 0) {
if (GetLastError() == ERROR_INVALID_HANDLE)
/* The socket is broken. It will be dropped from the epoll set. */
broken = true;
else
/* Another error occurred, which is propagated to the caller. */
return -1;
} else {
/* The poll request was successfully submitted. */
sock_private->poll_status = _POLL_PENDING;
sock_private->pending_events = sock_private->user_events;
}
} else {
/* Unreachable. */
assert(false);
}
ep_port_clear_socket_update(port_info, sock_info);
/* If we saw an ERROR_INVALID_HANDLE error, drop the socket. */
if (broken)
ep_sock_delete(port_info, sock_info);
return 0;
}
int ep_sock_feed_event(ep_port_t* port_info,
OVERLAPPED* overlapped,
struct epoll_event* ev) {
_ep_sock_private_t* sock_private =
container_of(overlapped, _ep_sock_private_t, poll_req.overlapped);
ep_sock_t* sock_info = &sock_private->pub;
uint32_t epoll_events;
bool drop_socket;
int ev_count = 0;
sock_private->poll_status = _POLL_IDLE;
sock_private->pending_events = 0;
if (sock_private->deleted) {
/* Ignore completion for overlapped poll operation if the socket has been
* deleted; instead, free the socket. */
_ep_sock_free(sock_private);
return 0;
}
_poll_req_complete(&sock_private->poll_req, &epoll_events, &drop_socket);
/* Filter events that the user didn't ask for. */
epoll_events &= sock_private->user_events;
/* Clear the event mask if EPOLLONESHOT is set and there are any events
* to report. */
if (epoll_events != 0 && (sock_private->user_events & EPOLLONESHOT))
sock_private->user_events = 0;
/* Fill the ev structure if there are any events to report. */
if (epoll_events != 0) {
ev->data = sock_private->user_data;
ev->events = epoll_events;
ev_count = 1;
}
if (drop_socket)
/* Drop the socket from the epoll set. */
ep_sock_delete(port_info, sock_info);
else if (sock_private->user_events != 0)
/* Put the socket back onto the attention list so a new poll request will
* be submitted. */
ep_port_request_socket_update(port_info, sock_info);
return ev_count;
}

View File

@ -1,37 +0,0 @@
#ifndef WEPOLL_SOCK_H_
#define WEPOLL_SOCK_H_
#include <stdint.h>
#include "internal.h"
#include "queue.h"
#include "rb.h"
#include "tree.h"
#include "util.h"
#include "wepoll.h"
#include "win.h"
typedef struct ep_port ep_port_t;
typedef struct poll_req poll_req_t;
typedef struct ep_sock {
tree_node_t tree_node;
queue_node_t queue_node;
} ep_sock_t;
WEPOLL_INTERNAL ep_sock_t* ep_sock_new(ep_port_t* port_info, SOCKET socket);
WEPOLL_INTERNAL void ep_sock_delete(ep_port_t* port_info,
ep_sock_t* sock_info);
WEPOLL_INTERNAL void ep_sock_force_delete(ep_port_t* port_info,
ep_sock_t* sock_info);
WEPOLL_INTERNAL int ep_sock_set_event(ep_port_t* port_info,
ep_sock_t* sock_info,
const struct epoll_event* ev);
WEPOLL_INTERNAL int ep_sock_update(ep_port_t* port_info, ep_sock_t* sock_info);
WEPOLL_INTERNAL int ep_sock_feed_event(ep_port_t* port_info,
OVERLAPPED* overlapped,
struct epoll_event* ev);
#endif /* WEPOLL_SOCK_H_ */

View File

@ -1,62 +0,0 @@
#include <assert.h>
#include "error.h"
#include "rb.h"
#include "tree.h"
static inline int _tree_compare(tree_node_t* a, tree_node_t* b) {
if (a->key < b->key)
return -1;
else if (a->key > b->key)
return 1;
else
return 0;
}
RB_GENERATE_STATIC(tree, tree_node, node, _tree_compare);
void tree_init(tree_t* tree) {
RB_INIT(tree);
}
void tree_node_init(tree_node_t* node) {
memset(node, 0, sizeof *node);
}
int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) {
tree_node_t* existing_node;
node->key = key;
existing_node = RB_INSERT(tree, tree, node);
if (existing_node != NULL)
return -1;
return 0;
}
int tree_del(tree_t* tree, tree_node_t* node) {
tree_node_t* removed_node;
removed_node = RB_REMOVE(tree, tree, node);
if (removed_node == NULL)
return -1;
else
assert(removed_node == node);
return 0;
}
tree_node_t* tree_find(tree_t* tree, uintptr_t key) {
tree_node_t lookup;
memset(&lookup, 0, sizeof lookup);
lookup.key = key;
return RB_FIND(tree, tree, &lookup);
}
tree_node_t* tree_root(tree_t* tree) {
return RB_ROOT(tree);
}

View File

@ -1,28 +0,0 @@
#ifndef WEPOLL_TREE_H_
#define WEPOLL_TREE_H_
#include "internal.h"
#include "rb.h"
/* NB: the tree functions do not set errno or LastError when they fail. Each of
* the API functions has at most one failure mode. It is up to the caller to
* set an appropriate error code when necessary.
*/
typedef RB_HEAD(tree, tree_node) tree_t;
typedef struct tree_node {
RB_ENTRY(tree_node) node;
uintptr_t key;
} tree_node_t;
WEPOLL_INTERNAL void tree_init(tree_t* tree);
WEPOLL_INTERNAL void tree_node_init(tree_node_t* node);
WEPOLL_INTERNAL int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key);
WEPOLL_INTERNAL int tree_del(tree_t* tree, tree_node_t* node);
WEPOLL_INTERNAL tree_node_t* tree_find(tree_t* tree, uintptr_t key);
WEPOLL_INTERNAL tree_node_t* tree_root(tree_t* tree);
#endif /* WEPOLL_TREE_H_ */

View File

@ -1,11 +0,0 @@
#include <stdint.h>
#include <stdlib.h>
#include "util.h"
void* util_safe_container_of_helper(void* ptr, size_t offset) {
if (ptr == NULL)
return NULL;
else
return (char*) ptr - offset;
}

View File

@ -1,31 +0,0 @@
#ifndef WEPOLL_UTIL_H_
#define WEPOLL_UTIL_H_
#include <stddef.h>
#include "internal.h"
#ifndef _SSIZE_T_DEFINED
#define SSIZE_T_DEFINED
typedef intptr_t ssize_t;
#endif
#define array_count(a) (sizeof(a) / (sizeof((a)[0])))
#define container_of(ptr, type, member) \
((type*) ((char*) (ptr) -offsetof(type, member)))
#define safe_container_of(ptr, type, member) \
((type*) util_safe_container_of_helper((ptr), offsetof(type, member)))
#define unused(v) ((void) (v))
#ifdef __clang__
/* Polyfill static_assert() because clang doesn't support it. */
#define static_assert(condition, message) typedef __attribute__( \
(unused)) int __static_assert_##__LINE__[(condition) ? 1 : -1];
#endif
WEPOLL_INTERNAL void* util_safe_container_of_helper(void* ptr, size_t offset);
#endif /* WEPOLL_UTIL_H_ */

View File

@ -1,12 +0,0 @@
#ifndef WEPOLL_WIN_H_
#define WEPOLL_WIN_H_
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <WS2tcpip.h>
#include <WinSock2.h>
#include <Windows.h>
#endif

View File

@ -1,9 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include "test-util.h"
void check_fail(const char* message) {
puts(message);
abort();
}

View File

@ -1,29 +0,0 @@
#ifndef TEST_UTIL_H_
#define TEST_UTIL_H_
#ifdef _MSC_VER
#define no_return _declspec(noreturn)
#else /* GCC/clang */
#define no_return __attribute__((noreturn))
#endif
#ifdef _MSC_VER
#define no_inline _declspec(noinline)
#else /* GCC/clang */
#define no_inline __attribute__((noinline))
#endif
void no_inline no_return check_fail(const char* message);
#define __check_to_string_helper(v) #v
#define __check_to_string(v) __check_to_string_helper(v)
#define check(expression) \
(void) ((!!(expression)) || \
(check_fail("Check failed:\n" \
" test: " #expression "\n" \
" file: " __FILE__ "\n" \
" line: " __check_to_string(__LINE__) "\n"), \
0))
#endif /* TEST_UTIL_H_ */

View File

@ -1,94 +0,0 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "init.h"
#include "test-util.h"
#include "util.h"
#include "wepoll.h"
#include "win.h"
#include <WS2tcpip.h>
#define NUM_SOCKETS 10000
#define RUN_TIME 5000
#define PRINT_INTERVAL 500
static SOCKET create_and_add_socket(HANDLE epfd) {
SOCKET sock;
unsigned long one;
int r;
struct epoll_event ev;
sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
check(sock > 0);
one = 1;
r = ioctlsocket(sock, FIONBIO, &one);
check(r == 0);
ev.events = 0;
ev.data.u64 = 42;
r = epoll_ctl(epfd, EPOLL_CTL_ADD, sock, &ev);
check(r == 0);
return sock;
}
int main(void) {
uint64_t total_events = 0;
uint64_t start_time, last_print_time, now, total_time;
SOCKET sockets[NUM_SOCKETS];
int r;
HANDLE epfd;
r = init();
check(r == 0);
epfd = epoll_create1(0);
check(epfd != NULL);
for (size_t i = 0; i < NUM_SOCKETS; i++)
sockets[i] = create_and_add_socket(epfd);
start_time = GetTickCount64();
last_print_time = 0;
do {
struct epoll_event ev_out[64];
uint64_t count;
for (size_t i = 0; i < NUM_SOCKETS; i++) {
SOCKET sock = sockets[i];
struct epoll_event ev_in;
ev_in.events = rand() & 0xff | EPOLLONESHOT;
ev_in.data.u64 = 42;
r = epoll_ctl(epfd, EPOLL_CTL_MOD, sock, &ev_in);
check(r == 0);
}
count = 0;
do {
r = epoll_wait(epfd, ev_out, array_count(ev_out), count > 0 ? 0 : -1);
check(r >= 0);
count += r;
} while (r > 0);
total_events += count;
now = GetTickCount64();
total_time = now - start_time;
if (now - last_print_time > PRINT_INTERVAL) {
last_print_time = now;
printf("%f events (%f events/sec)\n",
(double) total_events,
(double) total_events / total_time * 1000);
}
} while (total_time < RUN_TIME);
return 0;
}

View File

@ -1,163 +0,0 @@
#include <process.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "init.h"
#include "test-util.h"
#include "util.h"
#include "wepoll.h"
#include "win.h"
#include <WS2tcpip.h>
#define PORT_COUNT 10
#define THREADS_PER_PORT 10
#define LISTEN_PORT 12345
typedef struct test_context {
SOCKET sock;
HANDLE port;
uint64_t data;
HANDLE thread;
} test_context_t;
static SOCKET create_socket(unsigned short port) {
SOCKET sock;
struct sockaddr_in address;
unsigned long one;
int r;
sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
check(sock > 0);
address.sin_family = AF_INET;
address.sin_port = htons(port);
address.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
r = bind(sock, (struct sockaddr*) &address, sizeof address);
check(r == 0);
one = 1;
r = ioctlsocket(sock, FIONBIO, &one);
check(r == 0);
return sock;
}
static void send_message(SOCKET sock, unsigned short port) {
char hello[] = "hello";
struct sockaddr_in address;
WSABUF buf;
DWORD bytes_sent;
int r;
address.sin_family = AF_INET;
address.sin_port = htons(port);
address.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
buf.buf = hello;
buf.len = sizeof(hello);
r = WSASendTo(sock,
&buf,
1,
&bytes_sent,
0,
(struct sockaddr*) &address,
sizeof address,
NULL,
NULL);
check(r >= 0);
}
static unsigned int __stdcall poll_thread(void* arg) {
test_context_t* context = arg;
struct epoll_event ev_out;
int r;
memset(&ev_out, 0, sizeof ev_out);
r = epoll_wait(context->port, &ev_out, 1, -1);
check(r == 1);
check(ev_out.events == EPOLLIN);
check(ev_out.data.u64 == context->data);
printf("Got event (port %p, thread %p)\n", context->port, context->thread);
return 0;
}
int main(void) {
HANDLE ports[PORT_COUNT];
test_context_t contexts[PORT_COUNT][THREADS_PER_PORT];
WSADATA wsa_data;
int r;
/* Initialize winsock. */
r = WSAStartup(MAKEWORD(2, 2), &wsa_data);
check(r == 0);
SOCKET send_sock = create_socket(0);
SOCKET recv_sock = create_socket(LISTEN_PORT);
/* Create PORT_COUNT epoll ports which, will be polled by THREADS_PER_PORT
* threads. */
for (size_t i = 0; i < array_count(contexts); i++) {
HANDLE port;
struct epoll_event ev;
/* Create epoll port. */
port = epoll_create1(0);
check(port != INVALID_HANDLE_VALUE);
ports[i] = port;
/* Register recv_sock with the epoll port. */
ev.events = EPOLLIN;
ev.data.u64 = rand();
r = epoll_ctl(port, EPOLL_CTL_ADD, recv_sock, &ev);
check(r == 0);
/* Start THREADS_PER_PORT threads which will all poll the port. */
for (size_t j = 0; j < array_count(contexts[i]); j++) {
test_context_t* context = &contexts[i][j];
HANDLE thread;
/* Prepare context information for the polling thread. */
context->port = port;
context->sock = recv_sock;
context->data = ev.data.u64;
/* Start thread. */
thread = (HANDLE) _beginthreadex(
NULL, 0, poll_thread, (void*) context, 0, NULL);
check(thread != INVALID_HANDLE_VALUE);
context->thread = thread;
}
}
/* Sleep for a while to give all threads a chance to finish initializing. */
Sleep(500);
/* Send a message to the receiving socket. */
send_message(send_sock, LISTEN_PORT);
/* Wait for all threads to exit and clean up after them. */
for (size_t i = 0; i < array_count(contexts); i++) {
for (size_t j = 0; j < array_count(contexts[i]); j++) {
HANDLE thread = contexts[i][j].thread;
DWORD wr = WaitForSingleObject(thread, INFINITE);
check(wr == WAIT_OBJECT_0);
CloseHandle(thread);
}
}
/* Close all epoll ports. */
for (size_t i = 0; i < array_count(ports); i++) {
HANDLE port = ports[i];
r = epoll_close(port);
check(r == 0);
}
return 0;
}

View File

@ -1,243 +0,0 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "test-util.h"
#include "wepoll.h"
#include "win.h"
#include <WS2tcpip.h>
int tcp_socketpair(SOCKET socks[2]) {
SOCKET listen_sock;
struct sockaddr_in addr;
socklen_t addrlen = sizeof addr;
listen_sock = socket(AF_INET, SOCK_STREAM, 0);
if (listen_sock == INVALID_SOCKET)
return SOCKET_ERROR;
memset(&addr, 0, sizeof addr);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = 0;
if (bind(listen_sock, (struct sockaddr*) &addr, sizeof addr) == SOCKET_ERROR)
goto err1;
if (getsockname(listen_sock, (struct sockaddr*) &addr, &addrlen) ==
SOCKET_ERROR)
goto err1;
if (listen(listen_sock, 1) == SOCKET_ERROR)
goto err1;
socks[0] = socket(AF_INET, SOCK_STREAM, 0);
if (socks[0] == INVALID_SOCKET)
goto err1;
if (connect(socks[0], (struct sockaddr*) &addr, sizeof addr) == SOCKET_ERROR)
goto err2;
socks[1] = accept(listen_sock, NULL, NULL);
if (socks[1] == INVALID_SOCKET)
goto err2;
closesocket(listen_sock);
return 0;
err2:
closesocket(socks[0]);
err1:
closesocket(listen_sock);
return -1;
}
int sock_set_nonblock(SOCKET sock, bool enable) {
u_long arg = enable;
return ioctlsocket(sock, FIONBIO, &arg);
}
int main(void) {
static const char HELLO[] = "hello, world!";
HANDLE epoll_port;
SOCKET send_sock, recv_sock;
{
/* Create an epoll instance. */
epoll_port = epoll_create(1);
check(epoll_port > 0);
}
{
/* Create a TCP socket pair. */
SOCKET socks[2];
int r = tcp_socketpair(socks);
check(r == 0);
send_sock = socks[0];
recv_sock = socks[1];
}
{
/* Enable non-blocking mode on the receiving end. */
int r = sock_set_nonblock(recv_sock, true);
check(r == 0);
}
{
/* Send some data in order to trigger an event on the receiving socket. */
int r = send(send_sock, HELLO, sizeof HELLO, 0);
check(r == sizeof HELLO);
}
{
/* Add the receiving socket to the epoll set. */
struct epoll_event ev;
int r;
ev.events = (uint32_t) EPOLLIN | EPOLLONESHOT;
ev.data.sock = recv_sock;
r = epoll_ctl(epoll_port, EPOLL_CTL_ADD, recv_sock, &ev);
check(r >= 0);
}
{
/* Receive the EPOLLIN event for recv_sock. */
struct epoll_event ev;
int r;
memset(&ev, 0, sizeof ev);
r = epoll_wait(epoll_port, &ev, 1, -1);
check(r == 1);
check(ev.events == EPOLLIN);
check(ev.data.sock == recv_sock);
}
{
/* Read the data from the socket. */
char buffer[16];
int r = recv(recv_sock, buffer, sizeof buffer, 0);
check(r > 0);
}
{
/* Since the last epoll_ctl() call specified the EPOLLONESOT flag,
* no events should be reported here -- neither EPOLLIN nor EPOLLHUP. */
static const int timeout = 250; /* Quarter second. */
struct epoll_event ev;
int r;
memset(&ev, 0, sizeof ev);
r = epoll_wait(epoll_port, &ev, 1, timeout);
check(r == 0); /* Time-out. */
}
{
/* Attempt to EPOLL_CTL_ADD the socket to the port. This should fail,
* because EPOLLONESHOT causes events to be disabled after one is reported,
* but the socket should not be dropped from the epoll set. */
struct epoll_event ev;
int r;
ev.events = (uint32_t) EPOLLIN | EPOLLONESHOT;
ev.data.sock = recv_sock;
r = epoll_ctl(epoll_port, EPOLL_CTL_ADD, recv_sock, &ev);
check(r == -1);
check(errno == EEXIST);
check(GetLastError() == ERROR_ALREADY_EXISTS);
}
{
/* Modify the read socket event mask to EPOLLRDHUP. */
struct epoll_event ev;
int r;
ev.events = (uint32_t) EPOLLRDHUP | EPOLLONESHOT;
ev.data.sock = recv_sock;
r = epoll_ctl(epoll_port, EPOLL_CTL_MOD, recv_sock, &ev);
check(r == 0);
}
{
/* Send some data, which will never be read by the receiving end, otherwise
* Windows won't detect that the connection is closed. */
int r = send(send_sock, HELLO, sizeof HELLO, 0);
check(r == sizeof HELLO);
}
{
/* Send FIN packet. */
int r = shutdown(send_sock, SD_SEND);
check(r == 0);
}
{
/* Receive the EPOLLRDHUP event for recv_sock. */
struct epoll_event ev;
int r;
memset(&ev, 0, sizeof ev);
r = epoll_wait(epoll_port, &ev, 1, -1);
check(r == 1);
check(ev.events == EPOLLRDHUP);
check(ev.data.sock == recv_sock);
}
{
/* Close to receiving socket, so the sending socket should detect a
* connection hang-up */
int r = closesocket(recv_sock);
check(r == 0);
}
{
/* Add the *write* socket to the epoll set. The event mask is empty,
* but since EPOLLHUP and EPOLLERR are always reportable, the next call to
* epoll_wait() should be able to detect that the connection is now closed.
*/
struct epoll_event ev;
int r;
ev.events = 0;
ev.data.sock = send_sock;
r = epoll_ctl(epoll_port, EPOLL_CTL_ADD, send_sock, &ev);
check(r == 0);
}
{
/* Receive the EPOLLHUP event for write end of the connection. */
struct epoll_event ev;
int r;
memset(&ev, 0, sizeof ev);
r = epoll_wait(epoll_port, &ev, 1, -1);
check(r == 1);
check(ev.events == EPOLLHUP);
check(ev.data.sock == send_sock);
}
{
/* Close the send socket. */
int r = closesocket(send_sock);
check(r == 0);
}
{
/* Close the epoll port. */
int r = epoll_close(epoll_port);
check(r == 0);
}
return 0;
}

View File

@ -1,105 +0,0 @@
#include <process.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "init.h"
#include "reflock.h"
#include "test-util.h"
#include "util.h"
#include "win.h"
#define THREAD_COUNT 20
#define TEST_ITERATIONS 50
#define TEST_LENGTH 100
typedef struct test_context {
reflock_t reflock;
SRWLOCK srwlock;
volatile bool stop;
} test_context_t;
static void init_context(test_context_t* context) {
reflock_init(&context->reflock);
InitializeSRWLock(&context->srwlock);
context->stop = false;
}
static void yield(void) {
int count = rand() % 3;
while (count--)
Sleep(0);
}
static unsigned int __stdcall test_thread(void* arg) {
test_context_t* context = arg;
uint64_t lock_count = 0;
AcquireSRWLockShared(&context->srwlock);
while (!context->stop) {
reflock_ref(&context->reflock);
ReleaseSRWLockShared(&context->srwlock);
lock_count++;
yield();
reflock_unref(&context->reflock);
yield();
AcquireSRWLockShared(&context->srwlock);
}
ReleaseSRWLockShared(&context->srwlock);
check(lock_count > 100); /* Hopefully much more. */
return 0;
}
static void destroy_reflock(test_context_t* context) {
AcquireSRWLockExclusive(&context->srwlock);
context->stop = true;
reflock_ref(&context->reflock);
ReleaseSRWLockExclusive(&context->srwlock);
reflock_unref_and_destroy(&context->reflock);
}
static void run_test_iteration(void) {
test_context_t context;
HANDLE threads[THREAD_COUNT];
init_context(&context);
for (size_t i = 0; i < array_count(threads); i++) {
HANDLE thread =
(HANDLE) _beginthreadex(NULL, 0, test_thread, &context, 0, NULL);
check(thread != INVALID_HANDLE_VALUE);
threads[i] = thread;
}
Sleep(TEST_LENGTH);
destroy_reflock(&context);
for (size_t i = 0; i < array_count(threads); i++) {
HANDLE thread = threads[i];
DWORD wr = WaitForSingleObject(thread, INFINITE);
check(wr == WAIT_OBJECT_0);
CloseHandle(thread);
}
}
int main(void) {
if (init() < 0)
return 0;
for (int i = 0; i < TEST_ITERATIONS; i++) {
printf("Iteration %d of %d\n", i + 1, TEST_ITERATIONS);
run_test_iteration();
}
return 0;
}

View File

@ -1,163 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include "error.h"
#include "test-util.h"
#include "wepoll.h"
#include "win.h"
#define LISTEN_PORT 12345
#define NUM_PINGERS 10000
#define RUN_TIME 5000
static const char PING[] = "PING";
int main(void) {
HANDLE epoll_hnd;
int r;
u_long one = 1;
struct sockaddr_in address;
DWORD ticks_start, ticks_last;
long long pings = 0, pings_sent = 0;
int i;
SOCKET srv;
struct epoll_event ev;
epoll_hnd = epoll_create1(0);
check(epoll_hnd && epoll_hnd != INVALID_HANDLE_VALUE);
srv = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
r = ioctlsocket(srv, FIONBIO, &one);
check(r == 0);
address.sin_family = AF_INET;
address.sin_port = htons(LISTEN_PORT);
address.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
r = bind(srv, (struct sockaddr*) &address, sizeof address);
check(r == 0);
ev.events = EPOLLIN | EPOLLERR;
ev.data.sock = srv;
r = epoll_ctl(epoll_hnd, EPOLL_CTL_ADD, srv, &ev);
check(r == 0);
for (i = 0; i < NUM_PINGERS; i++) {
SOCKET sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
r = ioctlsocket(sock, FIONBIO, &one);
check(r == 0);
r = setsockopt(
sock, SOL_SOCKET, SO_REUSEADDR, (const char*) &one, sizeof one);
check(r == 0);
r = connect(sock, (struct sockaddr*) &address, sizeof address);
/* Unlike unix, windows sets the error to EWOULDBLOCK when the connection
* is being established in the background.
*/
check(r == 0 || WSAGetLastError() == WSAEWOULDBLOCK);
ev.events = (uint32_t) EPOLLOUT | EPOLLERR | EPOLLONESHOT;
ev.data.sock = sock;
r = epoll_ctl(epoll_hnd, EPOLL_CTL_ADD, sock, &ev);
check(r == 0);
}
ticks_start = GetTickCount();
ticks_last = ticks_start;
for (;;) {
int count;
struct epoll_event events[16];
DWORD ticks;
ticks = GetTickCount();
if (ticks >= ticks_last + 1000) {
printf("%lld pings (%0.0f per sec), %lld sent (%0.0f per sec)\n",
pings,
(double) pings / (ticks - ticks_start) * 1000.0,
pings_sent,
(double) pings_sent / (ticks - ticks_start) * 1000.0);
ticks_last = ticks;
if (ticks - ticks_start > RUN_TIME)
break;
}
count = epoll_wait(epoll_hnd, events, 15, 1000);
check(count >= 0);
for (i = 0; i < count; i++) {
int revents = events[i].events;
if (revents & EPOLLERR) {
SOCKET sock = events[i].data.sock;
int err = -1;
int err_len = sizeof err;
r = getsockopt(sock, SOL_SOCKET, SO_ERROR, (char*) &err, &err_len);
check(r == 0);
fprintf(stderr, "Socket error: %d\n", err);
r = epoll_ctl(epoll_hnd, EPOLL_CTL_DEL, sock, NULL);
check(r == 0);
continue;
}
if (revents & EPOLLIN) {
SOCKET sock = events[i].data.sock;
char buf[1024];
WSABUF wsa_buf;
DWORD flags, bytes;
wsa_buf.buf = buf;
wsa_buf.len = sizeof buf;
flags = 0;
for (;;) {
r = WSARecv(sock, &wsa_buf, 1, &bytes, &flags, NULL, NULL);
if (r == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
break;
check(r >= 0);
check(bytes == sizeof PING);
pings++;
}
continue;
}
if (revents & EPOLLOUT) {
SOCKET sock = events[i].data.sock;
WSABUF wsa_buf;
DWORD bytes;
wsa_buf.buf = (char*) PING;
wsa_buf.len = sizeof PING;
r = WSASend(sock, &wsa_buf, 1, &bytes, 0, NULL, NULL);
check(r >= 0);
check(bytes == sizeof PING);
pings_sent++;
uint32_t rev = rand() & 0xff | EPOLLOUT | EPOLLONESHOT;
struct epoll_event e;
e.data.sock = sock;
e.events = rev;
if (epoll_ctl(epoll_hnd, EPOLL_CTL_MOD, sock, &e) < 0)
abort();
continue;
}
check(0);
}
}
r = epoll_close(epoll_hnd);
check(r == 0);
closesocket(srv);
return 0;
}

View File

@ -1,153 +0,0 @@
// This is a mess. I know.
var path = require('path');
var fs = require('fs');
// Parse command line options.
var files = [];
var includeDirs = [];
var stripGuardsEnabled = false;
process.argv
.slice(2)
.forEach((arg) => {
let match;
if (match = /^-I(.*)$/.exec(arg))
includeDirs.push(match[1]);
else if (arg === '--strip-guards')
stripGuardsEnabled = true;
else
files.push(arg);
});
var included = {};
function readFileWithPath(fileName, dirs) {
if (/[/\\]/.test(fileName))
return fs.readFileSync(fileName, 'utf8');
for (let i = 0; i < dirs.length; i++) {
var filePath = path.resolve(dirs[i], fileName);
try {
return fs.readFileSync(filePath, 'utf8');
} catch (e) {
// Ignore.
}
}
var err = new Error('file not found: ' + fileName);
err.code = 'ENOENT'
throw err;
}
function strip_guards(filename, source) {
var lead_comments_re = /^(\s*\/\*((?!\*\/)[\s\S])*\*\/)*\s*/;
var trail_comments_re = /(\s*\/\*((?!\*\/)[\s\S])*\*\/)*\s*$/;
var lead_guards_re = /^#ifndef\s+(\w+)\s+#define\s+(\w+)\s+/;
var trail_guards_re = /#endif$/;
// Strip leading and trailing comments and whitespace.
source = source.replace(lead_comments_re, '');
source = source.replace(trail_comments_re, '');
// Find include guards.
var lead_guards = lead_guards_re.exec(source);
var trail_guards = trail_guards_re.exec(source);
// Remove include guards, if found.
if (lead_guards && trail_guards && lead_guards[1] == lead_guards[2]) {
console.error('Stripping include guards: ' + filename);
source = source.replace(lead_guards_re, '');
source = source.replace(trail_guards_re, '');
}
// Add back a trailing newline.
source += '\n';
return source;
}
function lines(filename, strip) {
var source = readFileWithPath(filename, ['.'].concat(includeDirs));
if (strip) source = strip_guards(filename, source);
return source.split(/\r?\n/g);
}
function comment(s) {
return ''; //'/* ' + s + '*/'
}
function include(line, filename) {
var key = path.basename(filename).toLowerCase();
if (included[key])
return comment(line);
console.error('Including: ' + key);
included[key] = true;
return lines(filename, true);
}
function add(filename) {
var key = path.basename(filename).toLowerCase();
console.error('Adding: ' + key);
included[key] = true;
return lines(filename, stripGuardsEnabled);
}
var sys_included = {};
function include_sys(line, filename) {
var key = path.basename(filename).toLowerCase();
if (sys_included[key])
return comment(line);
sys_included[key] = true;
}
var source = [];
source = source.concat('/*')
.concat(fs.readFileSync('LICENSE', 'utf8')
.replace(/^\s+|\s+$/g, '')
.split(/\r?\n/g)
.map(function(s) {
return ' * ' + s;
})
.map(function(s) {
return s.replace(/\s+$/, '');
}))
.concat(' */')
.concat('');
for (var i = 0; i < files.length; i++) {
var filename = files[i];
source = source.concat(add(filename));
}
var patterns = [
{ re: /^\s*#include\s*"([^"]*)".*$/, fn: include },
{ re: /^\s*#include\s*<([^"]*)>.*$/, fn: include_sys }
]
restart: for (var lno = 0; lno < source.length;) {
for (var i in patterns) {
var line = source[lno];
var pattern = patterns[i];
var match = pattern.re.exec(line);
if (match) {
var repl = pattern.fn.apply(null, match);
if (repl != null && repl !== line) {
source.splice.apply(source, [lno, 1].concat(repl));
continue restart;
}
}
}
lno++;
}
source = source
.map((line) => line.replace(/\s+$/, ''))
.join('\n')
.replace(/\n{3,}/g, '\n\n')
.replace(/\n*$/, '\n');
process.stdout.write(source);

View File

@ -1,41 +0,0 @@
const spawn = require('child_process').spawn;
const basename = require('path').basename;
const test_exes = process.argv.slice(2);
run_tests(test_exes);
function run_tests(test_exes, num = 0, fail_count = 0) {
if (test_exes.length <= num)
return done(test_exes, fail_count);
const test_exe = test_exes[num];
const test_name = basename(test_exe, '.exe');
console.log('(%d/%d) %s...', (num + 1), test_exes.length, test_name);
const child = spawn(test_exe, [], { encoding: 'utf8' });
let out = '';
child.stdout.on('data', (data) => out += data);
child.stderr.on('data', (data) => out += data);
child.on('exit', (code) => {
if (code === 0) {
console.log(' PASS')
} else {
console.log(' FAIL\n' + out);
fail_count++;
}
run_tests(test_exes, num + 1, fail_count);
});
}
function done(test_exes, fail_count) {
let pass_count = test_exes.length - fail_count;
console.log(' DONE - %d PASSED, %d FAILED', pass_count, fail_count);
process.exit(fail_count == 0 ? 0 : 1);
}

View File

@ -1,7 +0,0 @@
{
"indentSize": 2,
"tabSize": 2,
"indentStyle": 2,
"newLineCharacter": "\n",
"convertTabsToSpaces": true
}

2608
wepoll.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,33 @@
/*
* Copyright 2012-2017, Bert Belder. All rights reserved.
*
* The red-black tree implementation:
* Copyright 2002 Niels Provos <provos@citi.umich.edu> All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef WEPOLL_H_ #ifndef WEPOLL_H_
#define WEPOLL_H_ #define WEPOLL_H_