src,test: use consistent naming style for private names

This commit is contained in:
Bert Belder 2018-06-06 11:15:31 -07:00
parent 9fa0461e33
commit f813598587
No known key found for this signature in database
GPG Key ID: 7A77887B2E2ED461
14 changed files with 167 additions and 163 deletions

View File

@ -11,7 +11,7 @@
#define IOCTL_AFD_POLL 0x00012024
/* clang-format off */
static const GUID _AFD_PROVIDER_GUID_LIST[] = {
static const GUID AFD__PROVIDER_GUID_LIST[] = {
/* MSAFD Tcpip [TCP+UDP+RAW / IP] */
{0xe70f1aa0, 0xab8b, 0x11cf,
{0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
@ -26,14 +26,14 @@ static const GUID _AFD_PROVIDER_GUID_LIST[] = {
{0xb6, 0x55, 0x00, 0x80, 0x5f, 0x36, 0x42, 0xcc}}};
/* clang-format on */
static const int _AFD_ANY_PROTOCOL = -1;
static const int AFD__ANY_PROTOCOL = -1;
/* This protocol info record is used by afd_create_driver_socket() to create
* sockets that can be used as the first argument to afd_poll(). It is
* populated on startup by afd_global_init(). */
static WSAPROTOCOL_INFOW _afd_driver_socket_template;
static WSAPROTOCOL_INFOW afd__driver_socket_protocol_info;
static const WSAPROTOCOL_INFOW* _afd_find_protocol_info(
static const WSAPROTOCOL_INFOW* afd__find_protocol_info(
const WSAPROTOCOL_INFOW* infos, size_t infos_count, int protocol_id) {
size_t i, j;
@ -41,13 +41,13 @@ static const WSAPROTOCOL_INFOW* _afd_find_protocol_info(
const WSAPROTOCOL_INFOW* info = &infos[i];
/* Apply protocol id filter. */
if (protocol_id != _AFD_ANY_PROTOCOL && protocol_id != info->iProtocol)
if (protocol_id != AFD__ANY_PROTOCOL && protocol_id != info->iProtocol)
continue;
/* Filter out non-MSAFD protocols. */
for (j = 0; j < array_count(_AFD_PROVIDER_GUID_LIST); j++) {
for (j = 0; j < array_count(AFD__PROVIDER_GUID_LIST); j++) {
if (memcmp(&info->ProviderId,
&_AFD_PROVIDER_GUID_LIST[j],
&AFD__PROVIDER_GUID_LIST[j],
sizeof info->ProviderId) == 0)
return info;
}
@ -69,15 +69,15 @@ int afd_global_init(void) {
* socket. Preferentially we pick a UDP socket, otherwise try TCP or any
* other type. */
for (;;) {
afd_info = _afd_find_protocol_info(infos, infos_count, IPPROTO_UDP);
afd_info = afd__find_protocol_info(infos, infos_count, IPPROTO_UDP);
if (afd_info != NULL)
break;
afd_info = _afd_find_protocol_info(infos, infos_count, IPPROTO_TCP);
afd_info = afd__find_protocol_info(infos, infos_count, IPPROTO_TCP);
if (afd_info != NULL)
break;
afd_info = _afd_find_protocol_info(infos, infos_count, _AFD_ANY_PROTOCOL);
afd_info = afd__find_protocol_info(infos, infos_count, AFD__ANY_PROTOCOL);
if (afd_info != NULL)
break;
@ -86,7 +86,7 @@ int afd_global_init(void) {
}
/* Copy found protocol information from the catalog to a static buffer. */
_afd_driver_socket_template = *afd_info;
afd__driver_socket_protocol_info = *afd_info;
free(infos);
return 0;
@ -95,10 +95,10 @@ int afd_global_init(void) {
int afd_create_driver_socket(HANDLE iocp, SOCKET* driver_socket_out) {
SOCKET socket;
socket = WSASocketW(_afd_driver_socket_template.iAddressFamily,
_afd_driver_socket_template.iSocketType,
_afd_driver_socket_template.iProtocol,
&_afd_driver_socket_template,
socket = WSASocketW(afd__driver_socket_protocol_info.iAddressFamily,
afd__driver_socket_protocol_info.iSocketType,
afd__driver_socket_protocol_info.iProtocol,
&afd__driver_socket_protocol_info,
0,
WSA_FLAG_OVERLAPPED);
if (socket == INVALID_SOCKET)

View File

@ -9,19 +9,19 @@
#include "util.h"
#include "win.h"
static ts_tree_t _epoll_handle_tree;
static ts_tree_t epoll__handle_tree;
static inline port_state_t* _handle_tree_node_to_port(
static inline port_state_t* epoll__handle_tree_node_to_port(
ts_tree_node_t* tree_node) {
return container_of(tree_node, port_state_t, handle_tree_node);
}
int api_global_init(void) {
ts_tree_init(&_epoll_handle_tree);
int epoll_global_init(void) {
ts_tree_init(&epoll__handle_tree);
return 0;
}
static HANDLE _epoll_create(void) {
static HANDLE epoll__create(void) {
port_state_t* port_state;
HANDLE ephnd;
@ -32,7 +32,7 @@ static HANDLE _epoll_create(void) {
if (port_state == NULL)
return NULL;
if (ts_tree_add(&_epoll_handle_tree,
if (ts_tree_add(&epoll__handle_tree,
&port_state->handle_tree_node,
(uintptr_t) ephnd) < 0) {
/* This should never happen. */
@ -47,14 +47,14 @@ HANDLE epoll_create(int size) {
if (size <= 0)
return_set_error(NULL, ERROR_INVALID_PARAMETER);
return _epoll_create();
return epoll__create();
}
HANDLE epoll_create1(int flags) {
if (flags != 0)
return_set_error(NULL, ERROR_INVALID_PARAMETER);
return _epoll_create();
return epoll__create();
}
int epoll_close(HANDLE ephnd) {
@ -64,13 +64,13 @@ int epoll_close(HANDLE ephnd) {
if (init() < 0)
return -1;
tree_node = ts_tree_del_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
tree_node = ts_tree_del_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL) {
err_set_win_error(ERROR_INVALID_PARAMETER);
goto err;
}
port_state = _handle_tree_node_to_port(tree_node);
port_state = epoll__handle_tree_node_to_port(tree_node);
port_close(port_state);
ts_tree_node_unref_and_destroy(tree_node);
@ -90,13 +90,13 @@ int epoll_ctl(HANDLE ephnd, int op, SOCKET sock, struct epoll_event* ev) {
if (init() < 0)
return -1;
tree_node = ts_tree_find_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL) {
err_set_win_error(ERROR_INVALID_PARAMETER);
goto err;
}
port_state = _handle_tree_node_to_port(tree_node);
port_state = epoll__handle_tree_node_to_port(tree_node);
r = port_ctl(port_state, op, sock, ev);
ts_tree_node_unref(tree_node);
@ -128,13 +128,13 @@ int epoll_wait(HANDLE ephnd,
if (init() < 0)
return -1;
tree_node = ts_tree_find_and_ref(&_epoll_handle_tree, (uintptr_t) ephnd);
tree_node = ts_tree_find_and_ref(&epoll__handle_tree, (uintptr_t) ephnd);
if (tree_node == NULL) {
err_set_win_error(ERROR_INVALID_PARAMETER);
goto err;
}
port_state = _handle_tree_node_to_port(tree_node);
port_state = epoll__handle_tree_node_to_port(tree_node);
num_events = port_wait(port_state, events, maxevents, timeout);
ts_tree_node_unref(tree_node);

View File

@ -3,6 +3,6 @@
#include "internal.h"
WEPOLL_INTERNAL int api_global_init(void);
WEPOLL_INTERNAL int epoll_global_init(void);
#endif /* WEPOLL_API_H_ */

View File

@ -106,7 +106,7 @@
X(WSASYSNOTREADY, ENETDOWN) \
X(WSAVERNOTSUPPORTED, ENOSYS)
static errno_t _err_map_win_error_to_errno(DWORD error) {
static errno_t err__map_win_error_to_errno(DWORD error) {
switch (error) {
#define X(error_sym, errno_sym) \
case error_sym: \
@ -118,12 +118,12 @@ static errno_t _err_map_win_error_to_errno(DWORD error) {
}
void err_map_win_error(void) {
errno = _err_map_win_error_to_errno(GetLastError());
errno = err__map_win_error_to_errno(GetLastError());
}
void err_set_win_error(DWORD error) {
SetLastError(error);
errno = _err_map_win_error_to_errno(error);
errno = err__map_win_error_to_errno(error);
}
int err_check_handle(HANDLE handle) {

View File

@ -8,10 +8,10 @@
#include "util.h"
#include "ws.h"
static bool _initialized = false;
static INIT_ONCE _once = INIT_ONCE_STATIC_INIT;
static bool init__done = false;
static INIT_ONCE init__once = INIT_ONCE_STATIC_INIT;
static BOOL CALLBACK _init_once_callback(INIT_ONCE* once,
static BOOL CALLBACK init__once_callback(INIT_ONCE* once,
void* parameter,
void** context) {
unused_var(once);
@ -20,16 +20,16 @@ static BOOL CALLBACK _init_once_callback(INIT_ONCE* once,
/* N.b. that initialization order matters here. */
if (ws_global_init() < 0 || nt_global_init() < 0 || afd_global_init() < 0 ||
reflock_global_init() < 0 || api_global_init() < 0)
reflock_global_init() < 0 || epoll_global_init() < 0)
return FALSE;
_initialized = true;
init__done = true;
return TRUE;
}
int init(void) {
if (!_initialized &&
!InitOnceExecuteOnce(&_once, _init_once_callback, NULL, NULL))
if (!init__done &&
!InitOnceExecuteOnce(&init__once, init__once_callback, NULL, NULL))
return -1; /* LastError and errno aren't touched InitOnceExecuteOnce. */
return 0;

View File

@ -8,7 +8,7 @@
#include "util.h"
#include "win.h"
static const size_t _POLL_GROUP_MAX_GROUP_SIZE = 32;
static const size_t POLL_GROUP__MAX_GROUP_SIZE = 32;
typedef struct poll_group {
port_state_t* port_state;
@ -17,7 +17,7 @@ typedef struct poll_group {
size_t group_size;
} poll_group_t;
static poll_group_t* _poll_group_new(port_state_t* port_state) {
static poll_group_t* poll_group__new(port_state_t* port_state) {
poll_group_t* poll_group = malloc(sizeof *poll_group);
if (poll_group == NULL)
return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
@ -60,12 +60,12 @@ poll_group_t* poll_group_acquire(port_state_t* port_state) {
: NULL;
if (poll_group == NULL ||
poll_group->group_size >= _POLL_GROUP_MAX_GROUP_SIZE)
poll_group = _poll_group_new(port_state);
poll_group->group_size >= POLL_GROUP__MAX_GROUP_SIZE)
poll_group = poll_group__new(port_state);
if (poll_group == NULL)
return NULL;
if (++poll_group->group_size == _POLL_GROUP_MAX_GROUP_SIZE)
if (++poll_group->group_size == POLL_GROUP__MAX_GROUP_SIZE)
queue_move_first(&port_state->poll_group_queue, &poll_group->queue_node);
return poll_group;
@ -75,7 +75,7 @@ void poll_group_release(poll_group_t* poll_group) {
port_state_t* port_state = poll_group->port_state;
poll_group->group_size--;
assert(poll_group->group_size < _POLL_GROUP_MAX_GROUP_SIZE);
assert(poll_group->group_size < POLL_GROUP__MAX_GROUP_SIZE);
queue_move_last(&port_state->poll_group_queue, &poll_group->queue_node);

View File

@ -16,7 +16,7 @@
#define PORT__MAX_ON_STACK_COMPLETIONS 256
static port_state_t* _port_alloc(void) {
static port_state_t* port__alloc(void) {
port_state_t* port_state = malloc(sizeof *port_state);
if (port_state == NULL)
return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
@ -24,12 +24,12 @@ static port_state_t* _port_alloc(void) {
return port_state;
}
static void _port_free(port_state_t* port) {
static void port__free(port_state_t* port) {
assert(port != NULL);
free(port);
}
static HANDLE _port_create_iocp(void) {
static HANDLE port__create_iocp(void) {
HANDLE iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
if (iocp == NULL)
return_map_error(NULL);
@ -41,11 +41,11 @@ port_state_t* port_new(HANDLE* iocp_out) {
port_state_t* port_state;
HANDLE iocp;
port_state = _port_alloc();
port_state = port__alloc();
if (port_state == NULL)
goto err1;
iocp = _port_create_iocp();
iocp = port__create_iocp();
if (iocp == NULL)
goto err2;
@ -63,12 +63,12 @@ port_state_t* port_new(HANDLE* iocp_out) {
return port_state;
err2:
_port_free(port_state);
port__free(port_state);
err1:
return NULL;
}
static int _port_close_iocp(port_state_t* port_state) {
static int port__close_iocp(port_state_t* port_state) {
HANDLE iocp = port_state->iocp;
port_state->iocp = NULL;
@ -82,7 +82,7 @@ int port_close(port_state_t* port_state) {
int result;
EnterCriticalSection(&port_state->lock);
result = _port_close_iocp(port_state);
result = port__close_iocp(port_state);
LeaveCriticalSection(&port_state->lock);
return result;
@ -114,12 +114,12 @@ int port_delete(port_state_t* port_state) {
DeleteCriticalSection(&port_state->lock);
_port_free(port_state);
port__free(port_state);
return 0;
}
static int _port_update_events(port_state_t* port_state) {
static int port__update_events(port_state_t* port_state) {
queue_t* sock_update_queue = &port_state->sock_update_queue;
/* Walk the queue, submitting new poll requests for every socket that needs
@ -137,12 +137,12 @@ static int _port_update_events(port_state_t* port_state) {
return 0;
}
static void _port_update_events_if_polling(port_state_t* port_state) {
static void port__update_events_if_polling(port_state_t* port_state) {
if (port_state->active_poll_count > 0)
_port_update_events(port_state);
port__update_events(port_state);
}
static int _port_feed_events(port_state_t* port_state,
static int port__feed_events(port_state_t* port_state,
struct epoll_event* epoll_events,
OVERLAPPED_ENTRY* iocp_events,
DWORD iocp_event_count) {
@ -159,14 +159,14 @@ static int _port_feed_events(port_state_t* port_state,
return epoll_event_count;
}
static int _port_poll(port_state_t* port_state,
static int port__poll(port_state_t* port_state,
struct epoll_event* epoll_events,
OVERLAPPED_ENTRY* iocp_events,
DWORD maxevents,
DWORD timeout) {
DWORD completion_count;
if (_port_update_events(port_state) < 0)
if (port__update_events(port_state) < 0)
return -1;
port_state->active_poll_count++;
@ -187,7 +187,7 @@ static int _port_poll(port_state_t* port_state,
if (!r)
return_map_error(-1);
return _port_feed_events(
return port__feed_events(
port_state, epoll_events, iocp_events, completion_count);
}
@ -233,7 +233,7 @@ int port_wait(port_state_t* port_state,
for (;;) {
uint64_t now;
result = _port_poll(
result = port__poll(
port_state, events, iocp_events, (DWORD) maxevents, gqcs_timeout);
if (result < 0 || result > 0)
break; /* Result, error, or time-out. */
@ -254,7 +254,7 @@ int port_wait(port_state_t* port_state,
gqcs_timeout = (DWORD)(due - now);
}
_port_update_events_if_polling(port_state);
port__update_events_if_polling(port_state);
LeaveCriticalSection(&port_state->lock);
@ -269,7 +269,7 @@ int port_wait(port_state_t* port_state,
return -1;
}
static int _port_ctl_add(port_state_t* port_state,
static int port__ctl_add(port_state_t* port_state,
SOCKET sock,
struct epoll_event* ev) {
sock_state_t* sock_state = sock_new(port_state, sock);
@ -281,12 +281,12 @@ static int _port_ctl_add(port_state_t* port_state,
return -1;
}
_port_update_events_if_polling(port_state);
port__update_events_if_polling(port_state);
return 0;
}
static int _port_ctl_mod(port_state_t* port_state,
static int port__ctl_mod(port_state_t* port_state,
SOCKET sock,
struct epoll_event* ev) {
sock_state_t* sock_state = port_find_socket(port_state, sock);
@ -296,12 +296,12 @@ static int _port_ctl_mod(port_state_t* port_state,
if (sock_set_event(port_state, sock_state, ev) < 0)
return -1;
_port_update_events_if_polling(port_state);
port__update_events_if_polling(port_state);
return 0;
}
static int _port_ctl_del(port_state_t* port_state, SOCKET sock) {
static int port__ctl_del(port_state_t* port_state, SOCKET sock) {
sock_state_t* sock_state = port_find_socket(port_state, sock);
if (sock_state == NULL)
return -1;
@ -311,17 +311,17 @@ static int _port_ctl_del(port_state_t* port_state, SOCKET sock) {
return 0;
}
static int _port_ctl_op(port_state_t* port_state,
static int port__ctl_op(port_state_t* port_state,
int op,
SOCKET sock,
struct epoll_event* ev) {
switch (op) {
case EPOLL_CTL_ADD:
return _port_ctl_add(port_state, sock, ev);
return port__ctl_add(port_state, sock, ev);
case EPOLL_CTL_MOD:
return _port_ctl_mod(port_state, sock, ev);
return port__ctl_mod(port_state, sock, ev);
case EPOLL_CTL_DEL:
return _port_ctl_del(port_state, sock);
return port__ctl_del(port_state, sock);
default:
return_set_error(-1, ERROR_INVALID_PARAMETER);
}
@ -334,7 +334,7 @@ int port_ctl(port_state_t* port_state,
int result;
EnterCriticalSection(&port_state->lock);
result = _port_ctl_op(port_state, op, sock, ev);
result = port__ctl_op(port_state, op, sock, ev);
LeaveCriticalSection(&port_state->lock);
return result;

View File

@ -13,7 +13,7 @@ void queue_node_init(queue_node_t* node) {
node->next = node;
}
static inline void _queue_detach(queue_node_t* node) {
static inline void queue__detach_node(queue_node_t* node) {
node->prev->next = node->next;
node->next->prev = node->prev;
}
@ -41,17 +41,17 @@ void queue_append(queue_t* queue, queue_node_t* node) {
}
void queue_move_first(queue_t* queue, queue_node_t* node) {
_queue_detach(node);
queue__detach_node(node);
queue_prepend(queue, node);
}
void queue_move_last(queue_t* queue, queue_node_t* node) {
_queue_detach(node);
queue__detach_node(node);
queue_append(queue, node);
}
void queue_remove(queue_node_t* node) {
_queue_detach(node);
queue__detach_node(node);
queue_node_init(node);
}

View File

@ -9,18 +9,18 @@
#include "win.h"
/* clang-format off */
static const long _REF = (long) 0x00000001;
static const long _REF_MASK = (long) 0x0fffffff;
static const long _DESTROY = (long) 0x10000000;
static const long _DESTROY_MASK = (long) 0xf0000000;
static const long _POISON = (long) 0x300DEAD0;
static const long REFLOCK__REF = (long) 0x00000001;
static const long REFLOCK__REF_MASK = (long) 0x0fffffff;
static const long REFLOCK__DESTROY = (long) 0x10000000;
static const long REFLOCK__DESTROY_MASK = (long) 0xf0000000;
static const long REFLOCK__POISON = (long) 0x300DEAD0;
/* clang-format on */
static HANDLE _keyed_event = NULL;
static HANDLE reflock__keyed_event = NULL;
int reflock_global_init(void) {
NTSTATUS status =
NtCreateKeyedEvent(&_keyed_event, ~(ACCESS_MASK) 0, NULL, 0);
NtCreateKeyedEvent(&reflock__keyed_event, ~(ACCESS_MASK) 0, NULL, 0);
if (status != STATUS_SUCCESS)
return_set_error(-1, RtlNtStatusToDosError(status));
return 0;
@ -30,48 +30,51 @@ void reflock_init(reflock_t* reflock) {
reflock->state = 0;
}
static void _signal_event(void* address) {
NTSTATUS status = NtReleaseKeyedEvent(_keyed_event, address, FALSE, NULL);
static void reflock__signal_event(void* address) {
NTSTATUS status =
NtReleaseKeyedEvent(reflock__keyed_event, address, FALSE, NULL);
if (status != STATUS_SUCCESS)
abort();
}
static void _await_event(void* address) {
NTSTATUS status = NtWaitForKeyedEvent(_keyed_event, address, FALSE, NULL);
static void reflock__await_event(void* address) {
NTSTATUS status =
NtWaitForKeyedEvent(reflock__keyed_event, address, FALSE, NULL);
if (status != STATUS_SUCCESS)
abort();
}
void reflock_ref(reflock_t* reflock) {
long state = InterlockedAdd(&reflock->state, _REF);
long state = InterlockedAdd(&reflock->state, REFLOCK__REF);
unused_var(state);
assert((state & _DESTROY_MASK) == 0); /* Overflow or destroyed. */
assert((state & REFLOCK__DESTROY_MASK) == 0); /* Overflow or destroyed. */
}
void reflock_unref(reflock_t* reflock) {
long state = InterlockedAdd(&reflock->state, -_REF);
long ref_count = state & _REF_MASK;
long destroy = state & _DESTROY_MASK;
long state = InterlockedAdd(&reflock->state, -REFLOCK__REF);
long ref_count = state & REFLOCK__REF_MASK;
long destroy = state & REFLOCK__DESTROY_MASK;
unused_var(ref_count);
unused_var(destroy);
if (state == _DESTROY)
_signal_event(reflock);
if (state == REFLOCK__DESTROY)
reflock__signal_event(reflock);
else
assert(destroy == 0 || ref_count > 0);
}
void reflock_unref_and_destroy(reflock_t* reflock) {
long state = InterlockedAdd(&reflock->state, _DESTROY - _REF);
long ref_count = state & _REF_MASK;
long state =
InterlockedAdd(&reflock->state, REFLOCK__DESTROY - REFLOCK__REF);
long ref_count = state & REFLOCK__REF_MASK;
assert((state & _DESTROY_MASK) ==
_DESTROY); /* Underflow or already destroyed. */
assert((state & REFLOCK__DESTROY_MASK) ==
REFLOCK__DESTROY); /* Underflow or already destroyed. */
if (ref_count != 0)
_await_event(reflock);
reflock__await_event(reflock);
state = InterlockedExchange(&reflock->state, _POISON);
assert(state == _DESTROY);
state = InterlockedExchange(&reflock->state, REFLOCK__POISON);
assert(state == REFLOCK__DESTROY);
}

View File

@ -11,15 +11,15 @@
#include "sock.h"
#include "ws.h"
static const uint32_t _SOCK_KNOWN_EPOLL_EVENTS =
static const uint32_t SOCK__KNOWN_EPOLL_EVENTS =
EPOLLIN | EPOLLPRI | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM |
EPOLLRDBAND | EPOLLWRNORM | EPOLLWRBAND | EPOLLMSG | EPOLLRDHUP;
typedef enum _poll_status {
_POLL_IDLE = 0,
_POLL_PENDING,
_POLL_CANCELLED
} _poll_status_t;
typedef enum sock__poll_status {
SOCK__POLL_IDLE = 0,
SOCK__POLL_PENDING,
SOCK__POLL_CANCELLED
} sock__poll_status_t;
typedef struct sock_state {
OVERLAPPED overlapped;
@ -31,25 +31,25 @@ typedef struct sock_state {
epoll_data_t user_data;
uint32_t user_events;
uint32_t pending_events;
_poll_status_t poll_status;
sock__poll_status_t poll_status;
bool delete_pending;
} sock_state_t;
static inline sock_state_t* _sock_alloc(void) {
static inline sock_state_t* sock__alloc(void) {
sock_state_t* sock_state = malloc(sizeof *sock_state);
if (sock_state == NULL)
return_set_error(NULL, ERROR_NOT_ENOUGH_MEMORY);
return sock_state;
}
static inline void _sock_free(sock_state_t* sock_state) {
static inline void sock__free(sock_state_t* sock_state) {
free(sock_state);
}
static int _sock_cancel_poll(sock_state_t* sock_state) {
static int sock__cancel_poll(sock_state_t* sock_state) {
HANDLE driver_handle =
(HANDLE)(uintptr_t) poll_group_get_socket(sock_state->poll_group);
assert(sock_state->poll_status == _POLL_PENDING);
assert(sock_state->poll_status == SOCK__POLL_PENDING);
/* CancelIoEx() may fail with ERROR_NOT_FOUND if the overlapped operation has
* already completed. This is not a problem and we proceed normally. */
@ -57,7 +57,7 @@ static int _sock_cancel_poll(sock_state_t* sock_state) {
GetLastError() != ERROR_NOT_FOUND)
return_map_error(-1);
sock_state->poll_status = _POLL_CANCELLED;
sock_state->poll_status = SOCK__POLL_CANCELLED;
sock_state->pending_events = 0;
return 0;
}
@ -78,7 +78,7 @@ sock_state_t* sock_new(port_state_t* port_state, SOCKET socket) {
if (poll_group == NULL)
return NULL;
sock_state = _sock_alloc();
sock_state = sock__alloc();
if (sock_state == NULL)
goto err1;
@ -96,19 +96,19 @@ sock_state_t* sock_new(port_state_t* port_state, SOCKET socket) {
return sock_state;
err2:
_sock_free(sock_state);
sock__free(sock_state);
err1:
poll_group_release(poll_group);
return NULL;
}
static int _sock_delete(port_state_t* port_state,
static int sock__delete(port_state_t* port_state,
sock_state_t* sock_state,
bool force) {
if (!sock_state->delete_pending) {
if (sock_state->poll_status == _POLL_PENDING)
_sock_cancel_poll(sock_state);
if (sock_state->poll_status == SOCK__POLL_PENDING)
sock__cancel_poll(sock_state);
port_cancel_socket_update(port_state, sock_state);
port_unregister_socket_handle(port_state, sock_state);
@ -119,11 +119,11 @@ static int _sock_delete(port_state_t* port_state,
/* If the poll request still needs to complete, the sock_state object can't
* be free()d yet. `sock_feed_event()` or `port_close()` will take care
* of this later. */
if (force || sock_state->poll_status == _POLL_IDLE) {
if (force || sock_state->poll_status == SOCK__POLL_IDLE) {
/* Free the sock_state now. */
port_remove_deleted_socket(port_state, sock_state);
poll_group_release(sock_state->poll_group);
_sock_free(sock_state);
sock__free(sock_state);
} else {
/* Free the socket later. */
port_add_deleted_socket(port_state, sock_state);
@ -133,11 +133,11 @@ static int _sock_delete(port_state_t* port_state,
}
void sock_delete(port_state_t* port_state, sock_state_t* sock_state) {
_sock_delete(port_state, sock_state, false);
sock__delete(port_state, sock_state, false);
}
void sock_force_delete(port_state_t* port_state, sock_state_t* sock_state) {
_sock_delete(port_state, sock_state, true);
sock__delete(port_state, sock_state, true);
}
int sock_set_event(port_state_t* port_state,
@ -151,13 +151,13 @@ int sock_set_event(port_state_t* port_state,
sock_state->user_events = events;
sock_state->user_data = ev->data;
if ((events & _SOCK_KNOWN_EPOLL_EVENTS & ~sock_state->pending_events) != 0)
if ((events & SOCK__KNOWN_EPOLL_EVENTS & ~sock_state->pending_events) != 0)
port_request_socket_update(port_state, sock_state);
return 0;
}
static inline DWORD _epoll_events_to_afd_events(uint32_t epoll_events) {
static inline DWORD sock__epoll_events_to_afd_events(uint32_t epoll_events) {
/* Always monitor for AFD_POLL_LOCAL_CLOSE, which is triggered when the
* socket is closed with closesocket() or CloseHandle(). */
DWORD afd_events = AFD_POLL_LOCAL_CLOSE;
@ -178,7 +178,7 @@ static inline DWORD _epoll_events_to_afd_events(uint32_t epoll_events) {
return afd_events;
}
static inline uint32_t _afd_events_to_epoll_events(DWORD afd_events) {
static inline uint32_t sock__afd_events_to_epoll_events(DWORD afd_events) {
uint32_t epoll_events = 0;
if (afd_events & (AFD_POLL_RECEIVE | AFD_POLL_ACCEPT))
@ -200,27 +200,27 @@ static inline uint32_t _afd_events_to_epoll_events(DWORD afd_events) {
int sock_update(port_state_t* port_state, sock_state_t* sock_state) {
assert(!sock_state->delete_pending);
if ((sock_state->poll_status == _POLL_PENDING) &&
(sock_state->user_events & _SOCK_KNOWN_EPOLL_EVENTS &
if ((sock_state->poll_status == SOCK__POLL_PENDING) &&
(sock_state->user_events & SOCK__KNOWN_EPOLL_EVENTS &
~sock_state->pending_events) == 0) {
/* All the events the user is interested in are already being monitored by
* the pending poll operation. It might spuriously complete because of an
* event that we're no longer interested in; when that happens we'll submit
* a new poll operation with the updated event mask. */
} else if (sock_state->poll_status == _POLL_PENDING) {
} else if (sock_state->poll_status == SOCK__POLL_PENDING) {
/* A poll operation is already pending, but it's not monitoring for all the
* events that the user is interested in. Therefore, cancel the pending
* poll operation; when we receive it's completion package, a new poll
* operation will be submitted with the correct event mask. */
if (_sock_cancel_poll(sock_state) < 0)
if (sock__cancel_poll(sock_state) < 0)
return -1;
} else if (sock_state->poll_status == _POLL_CANCELLED) {
} else if (sock_state->poll_status == SOCK__POLL_CANCELLED) {
/* The poll operation has already been cancelled, we're still waiting for
* it to return. For now, there's nothing that needs to be done. */
} else if (sock_state->poll_status == _POLL_IDLE) {
} else if (sock_state->poll_status == SOCK__POLL_IDLE) {
/* No poll operation is pending; start one. */
sock_state->poll_info.Exclusive = FALSE;
sock_state->poll_info.NumberOfHandles = 1;
@ -228,7 +228,7 @@ int sock_update(port_state_t* port_state, sock_state_t* sock_state) {
sock_state->poll_info.Handles[0].Handle = (HANDLE) sock_state->base_socket;
sock_state->poll_info.Handles[0].Status = 0;
sock_state->poll_info.Handles[0].Events =
_epoll_events_to_afd_events(sock_state->user_events);
sock__epoll_events_to_afd_events(sock_state->user_events);
memset(&sock_state->overlapped, 0, sizeof sock_state->overlapped);
@ -241,7 +241,7 @@ int sock_update(port_state_t* port_state, sock_state_t* sock_state) {
break;
case ERROR_INVALID_HANDLE:
/* Socket closed; it'll be dropped from the epoll set. */
return _sock_delete(port_state, sock_state, false);
return sock__delete(port_state, sock_state, false);
default:
/* Other errors are propagated to the caller. */
return_map_error(-1);
@ -249,7 +249,7 @@ int sock_update(port_state_t* port_state, sock_state_t* sock_state) {
}
/* The poll request was successfully submitted. */
sock_state->poll_status = _POLL_PENDING;
sock_state->poll_status = SOCK__POLL_PENDING;
sock_state->pending_events = sock_state->user_events;
} else {
@ -269,12 +269,12 @@ int sock_feed_event(port_state_t* port_state,
AFD_POLL_INFO* poll_info = &sock_state->poll_info;
uint32_t epoll_events = 0;
sock_state->poll_status = _POLL_IDLE;
sock_state->poll_status = SOCK__POLL_IDLE;
sock_state->pending_events = 0;
if (sock_state->delete_pending) {
/* Socket has been deleted earlier and can now be freed. */
return _sock_delete(port_state, sock_state, false);
return sock__delete(port_state, sock_state, false);
} else if ((NTSTATUS) overlapped->Internal == STATUS_CANCELLED) {
/* The poll request was cancelled by CancelIoEx. */
@ -288,11 +288,12 @@ int sock_feed_event(port_state_t* port_state,
} else if (poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
/* The poll operation reported that the socket was closed. */
return _sock_delete(port_state, sock_state, false);
return sock__delete(port_state, sock_state, false);
} else {
/* Events related to our socket were reported. */
epoll_events = _afd_events_to_epoll_events(poll_info->Handles[0].Events);
epoll_events =
sock__afd_events_to_epoll_events(poll_info->Handles[0].Events);
}
/* Requeue the socket so a new poll request will be submitted. */

View File

@ -26,7 +26,7 @@ int ts_tree_add(ts_tree_t* ts_tree, ts_tree_node_t* node, uintptr_t key) {
return r;
}
static inline ts_tree_node_t* _ts_tree_find_node(ts_tree_t* ts_tree,
static inline ts_tree_node_t* ts_tree__find_node(ts_tree_t* ts_tree,
uintptr_t key) {
tree_node_t* tree_node = tree_find(&ts_tree->tree, key);
if (tree_node == NULL)
@ -40,7 +40,7 @@ ts_tree_node_t* ts_tree_del_and_ref(ts_tree_t* ts_tree, uintptr_t key) {
AcquireSRWLockExclusive(&ts_tree->lock);
ts_tree_node = _ts_tree_find_node(ts_tree, key);
ts_tree_node = ts_tree__find_node(ts_tree, key);
if (ts_tree_node != NULL) {
tree_del(&ts_tree->tree, &ts_tree_node->tree_node);
reflock_ref(&ts_tree_node->reflock);
@ -56,7 +56,7 @@ ts_tree_node_t* ts_tree_find_and_ref(ts_tree_t* ts_tree, uintptr_t key) {
AcquireSRWLockShared(&ts_tree->lock);
ts_tree_node = _ts_tree_find_node(ts_tree, key);
ts_tree_node = ts_tree__find_node(ts_tree, key);
if (ts_tree_node != NULL)
reflock_ref(&ts_tree_node->reflock);

View File

@ -34,11 +34,11 @@ void tree_node_init(tree_node_t* node) {
p->trans->parent = p; \
q->cis = p;
static inline void _tree_rotate_left(tree_t* tree, tree_node_t* node) {
static inline void tree__rotate_left(tree_t* tree, tree_node_t* node) {
TREE__ROTATE(left, right)
}
static inline void _tree_rotate_right(tree_t* tree, tree_node_t* node) {
static inline void tree__rotate_right(tree_t* tree, tree_node_t* node) {
TREE__ROTATE(right, left)
}
@ -60,13 +60,13 @@ static inline void _tree_rotate_right(tree_t* tree, tree_node_t* node) {
node = grandparent; \
} else { \
if (node == parent->trans) { \
_tree_rotate_##cis(tree, parent); \
tree__rotate_##cis(tree, parent); \
node = parent; \
parent = node->parent; \
} \
parent->red = false; \
grandparent->red = true; \
_tree_rotate_##trans(tree, grandparent); \
tree__rotate_##trans(tree, grandparent); \
}
int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) {
@ -110,7 +110,7 @@ int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) {
if (sibling->red) { \
sibling->red = false; \
parent->red = true; \
_tree_rotate_##cis(tree, parent); \
tree__rotate_##cis(tree, parent); \
sibling = parent->trans; \
} \
if ((sibling->left && sibling->left->red) || \
@ -118,12 +118,12 @@ int tree_add(tree_t* tree, tree_node_t* node, uintptr_t key) {
if (!sibling->trans || !sibling->trans->red) { \
sibling->cis->red = false; \
sibling->red = true; \
_tree_rotate_##trans(tree, sibling); \
tree__rotate_##trans(tree, sibling); \
sibling = parent->trans; \
} \
sibling->red = parent->red; \
parent->red = sibling->trans->red = false; \
_tree_rotate_##cis(tree, parent); \
tree__rotate_##cis(tree, parent); \
node = tree->root; \
break; \
} \

View File

@ -15,9 +15,9 @@
static void __cdecl fn(void)
#endif
static void __cdecl leak_check_finalize(void);
static void __cdecl leak_check__finalize(void);
constructor(leak_check_init) {
constructor(leak_check__init) {
/* Enable leak checking. */
_CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF);
@ -30,10 +30,10 @@ constructor(leak_check_init) {
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
/* Register the finalization function to run when the program exits. */
atexit(leak_check_finalize);
atexit(leak_check__finalize);
}
void leak_check_finalize(void) {
void leak_check__finalize(void) {
/* Check if there were memory leaks. */
int leaks_found = _CrtDumpMemoryLeaks();
check(!leaks_found);

View File

@ -23,8 +23,8 @@
void no_inline no_return check_fail(const char* message);
#define _check_to_string_helper(v) #v
#define _check_to_string(v) _check_to_string_helper(v)
#define test_util__to_string_helper(v) #v
#define test_util__to_string(v) test_util__to_string_helper(v)
#define check(expression) \
(void) ((!!(expression)) || \
@ -32,7 +32,7 @@ void no_inline no_return check_fail(const char* message);
"Check failed:\n" \
" test: " #expression "\n" \
" file: " __FILE__ "\n" \
" line: " _check_to_string(__LINE__) "\n"), \
" line: " test_util__to_string(__LINE__) "\n"), \
0))
/* Polyfill `static_assert` for some versions of clang and gcc. */