diff options
author | Brad King <brad.king@kitware.com> | 2016-08-31 13:01:07 (GMT) |
---|---|---|
committer | Brad King <brad.king@kitware.com> | 2016-08-31 13:01:07 (GMT) |
commit | d96416fe482e17bc9f5a741d71d4a51a9b44f65e (patch) | |
tree | a708137b959a2a17d03ea8fdd08ed12cf7a63853 /Utilities/cmlibuv/src/unix | |
parent | 3800fc299fe7703c4a3976f9ba9028e1ebeb3d3e (diff) | |
parent | 3a713eaaf7d289b130acf0007b197553a6528112 (diff) | |
download | CMake-d96416fe482e17bc9f5a741d71d4a51a9b44f65e.zip CMake-d96416fe482e17bc9f5a741d71d4a51a9b44f65e.tar.gz CMake-d96416fe482e17bc9f5a741d71d4a51a9b44f65e.tar.bz2 |
Merge branch 'upstream-libuv' into import-libuv
* upstream-libuv:
libuv 2016-08-30 (897738b1)
Diffstat (limited to 'Utilities/cmlibuv/src/unix')
39 files changed, 17514 insertions, 0 deletions
diff --git a/Utilities/cmlibuv/src/unix/aix.c b/Utilities/cmlibuv/src/unix/aix.c new file mode 100644 index 0000000..652cd98 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/aix.c @@ -0,0 +1,1154 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <net/if.h> +#include <netinet/in.h> +#include <arpa/inet.h> + +#include <sys/time.h> +#include <unistd.h> +#include <fcntl.h> +#include <utmp.h> +#include <libgen.h> + +#include <sys/protosw.h> +#include <libperfstat.h> +#include <procinfo.h> +#include <sys/proc.h> +#include <sys/procfs.h> + +#include <sys/poll.h> + +#include <sys/pollset.h> +#include <ctype.h> +#ifdef HAVE_SYS_AHAFS_EVPRODS_H +#include <sys/ahafs_evProds.h> +#endif + +#include <sys/mntctl.h> +#include <sys/vmount.h> +#include <limits.h> +#include <strings.h> +#include <sys/vnode.h> + +#define RDWR_BUF_SIZE 4096 +#define EQ(a,b) (strcmp(a,b) == 0) + +int uv__platform_loop_init(uv_loop_t* loop) { + loop->fs_fd = -1; + + /* Passing maxfd of -1 should mean the limit is determined + * by the user's ulimit or the global limit as per the doc */ + loop->backend_fd = pollset_create(-1); + + if (loop->backend_fd == -1) + return -1; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->fs_fd != -1) { + uv__close(loop->fs_fd); + loop->fs_fd = -1; + } + + if (loop->backend_fd != -1) { + pollset_destroy(loop->backend_fd); + loop->backend_fd = -1; + } +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct poll_ctl pc; + + pc.events = POLLIN; + pc.cmd = PS_MOD; /* Equivalent to PS_ADD if the fd is not in the pollset. */ + pc.fd = fd; + + if (pollset_ctl(loop->backend_fd, &pc, 1)) + return -errno; + + pc.cmd = PS_DELETE; + if (pollset_ctl(loop->backend_fd, &pc, 1)) + abort(); + + return 0; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct pollfd events[1024]; + struct pollfd pqry; + struct pollfd* pe; + struct poll_ctl pc; + QUEUE* q; + uv__io_t* w; + uint64_t base; + uint64_t diff; + int have_signals; + int nevents; + int count; + int nfds; + int i; + int rc; + int add_failed; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + pc.events = w->pevents; + pc.fd = w->fd; + + add_failed = 0; + if (w->events == 0) { + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + if (errno != EINVAL) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + /* Check if the fd is already in the pollset */ + pqry.fd = pc.fd; + rc = pollset_query(loop->backend_fd, &pqry); + switch (rc) { + case -1: + assert(0 && "Failed to query pollset for file descriptor"); + abort(); + case 0: + assert(0 && "Pollset does not contain file descriptor"); + abort(); + } + /* If we got here then the pollset already contained the file descriptor even though + * we didn't think it should. This probably shouldn't happen, but we can continue. */ + add_failed = 1; + } + } + if (w->events != 0 || add_failed) { + /* Modify, potentially removing events -- need to delete then add. + * Could maybe mod if we knew for sure no events are removed, but + * content of w->events is handled above as not reliable (falls back) + * so may require a pollset_query() which would have to be pretty cheap + * compared to a PS_DELETE to be worth optimizing. Alternatively, could + * lazily remove events, squelching them in the mean time. */ + pc.cmd = PS_DELETE; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to delete file descriptor (pc.fd) from pollset"); + abort(); + } + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + } + + w->events = w->pevents; + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;;) { + nfds = pollset_poll(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + if (nfds == -1) { + if (errno != EINTR) { + abort(); + } + + if (timeout == -1) + continue; + + if (timeout == 0) + return; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + + for (i = 0; i < nfds; i++) { + pe = events + i; + pc.cmd = PS_DELETE; + pc.fd = pe->fd; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (pc.fd == -1) + continue; + + assert(pc.fd >= 0); + assert((unsigned) pc.fd < loop->nwatchers); + + w = loop->watchers[pc.fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + pollset_ctl(loop->backend_fd, &pc, 1); + continue; + } + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->revents); + + nevents++; + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + uint64_t G = 1000000000; + timebasestruct_t t; + read_wall_time(&t, TIMEBASE_SZ); + time_base_to_time(&t, TIMEBASE_SZ); + return (uint64_t) t.tb_high * G + t.tb_low; +} + + +/* + * We could use a static buffer for the path manipulations that we need outside + * of the function, but this function could be called by multiple consumers and + * we don't want to potentially create a race condition in the use of snprintf. + * There is no direct way of getting the exe path in AIX - either through /procfs + * or through some libc APIs. The below approach is to parse the argv[0]'s pattern + * and use it in conjunction with PATH environment variable to craft one. + */ +int uv_exepath(char* buffer, size_t* size) { + int res; + char args[PATH_MAX]; + char abspath[PATH_MAX]; + size_t abspath_size; + struct procsinfo pi; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + pi.pi_pid = getpid(); + res = getargs(&pi, sizeof(pi), args, sizeof(args)); + if (res < 0) + return -EINVAL; + + /* + * Possibilities for args: + * i) an absolute path such as: /home/user/myprojects/nodejs/node + * ii) a relative path such as: ./node or ../myprojects/nodejs/node + * iii) a bare filename such as "node", after exporting PATH variable + * to its location. + */ + + /* Case i) and ii) absolute or relative paths */ + if (strchr(args, '/') != NULL) { + if (realpath(args, abspath) != abspath) + return -errno; + + abspath_size = strlen(abspath); + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; + } else { + /* Case iii). Search PATH environment variable */ + char trypath[PATH_MAX]; + char *clonedpath = NULL; + char *token = NULL; + char *path = getenv("PATH"); + + if (path == NULL) + return -EINVAL; + + clonedpath = uv__strdup(path); + if (clonedpath == NULL) + return -ENOMEM; + + token = strtok(clonedpath, ":"); + while (token != NULL) { + snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args); + if (realpath(trypath, abspath) == abspath) { + /* Check the match is executable */ + if (access(abspath, X_OK) == 0) { + abspath_size = strlen(abspath); + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + uv__free(clonedpath); + return 0; + } + } + token = strtok(NULL, ":"); + } + uv__free(clonedpath); + + /* Out of tokens (path entries), and no match found */ + return -EINVAL; + } +} + + +uint64_t uv_get_free_memory(void) { + perfstat_memory_total_t mem_total; + int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1); + if (result == -1) { + return 0; + } + return mem_total.real_free * 4096; +} + + +uint64_t uv_get_total_memory(void) { + perfstat_memory_total_t mem_total; + int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1); + if (result == -1) { + return 0; + } + return mem_total.real_total * 4096; +} + + +void uv_loadavg(double avg[3]) { + perfstat_cpu_total_t ps_total; + int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); + if (result == -1) { + avg[0] = 0.; avg[1] = 0.; avg[2] = 0.; + return; + } + avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS); + avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS); + avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS); +} + + +#ifdef HAVE_SYS_AHAFS_EVPRODS_H +static char *uv__rawname(char *cp) { + static char rawbuf[FILENAME_MAX+1]; + char *dp = rindex(cp, '/'); + + if (dp == 0) + return 0; + + *dp = 0; + strcpy(rawbuf, cp); + *dp = '/'; + strcat(rawbuf, "/r"); + strcat(rawbuf, dp+1); + return rawbuf; +} + + +/* + * Determine whether given pathname is a directory + * Returns 0 if the path is a directory, -1 if not + * + * Note: Opportunity here for more detailed error information but + * that requires changing callers of this function as well + */ +static int uv__path_is_a_directory(char* filename) { + struct stat statbuf; + + if (stat(filename, &statbuf) < 0) + return -1; /* failed: not a directory, assume it is a file */ + + if (statbuf.st_type == VDIR) + return 0; + + return -1; +} + + +/* + * Check whether AHAFS is mounted. + * Returns 0 if AHAFS is mounted, or an error code < 0 on failure + */ +static int uv__is_ahafs_mounted(void){ + int rv, i = 2; + struct vmount *p; + int size_multiplier = 10; + size_t siz = sizeof(struct vmount)*size_multiplier; + struct vmount *vmt; + const char *dev = "/aha"; + char *obj, *stub; + + p = uv__malloc(siz); + if (p == NULL) + return -errno; + + /* Retrieve all mounted filesystems */ + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + if (rv == 0) { + /* buffer was not large enough, reallocate to correct size */ + siz = *(int*)p; + uv__free(p); + p = uv__malloc(siz); + if (p == NULL) + return -errno; + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + } + + /* Look for dev in filesystems mount info */ + for(vmt = p, i = 0; i < rv; i++) { + obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */ + stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */ + + if (EQ(obj, dev) || EQ(uv__rawname(obj), dev) || EQ(stub, dev)) { + uv__free(p); /* Found a match */ + return 0; + } + vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length); + } + + /* /aha is required for monitoring filesystem changes */ + return -1; +} + +/* + * Recursive call to mkdir() to create intermediate folders, if any + * Returns code from mkdir call + */ +static int uv__makedir_p(const char *dir) { + char tmp[256]; + char *p = NULL; + size_t len; + int err; + + snprintf(tmp, sizeof(tmp),"%s",dir); + len = strlen(tmp); + if (tmp[len - 1] == '/') + tmp[len - 1] = 0; + for (p = tmp + 1; *p; p++) { + if (*p == '/') { + *p = 0; + err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + if (err != 0 && errno != EEXIST) + return err; + *p = '/'; + } + } + return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); +} + +/* + * Creates necessary subdirectories in the AIX Event Infrastructure + * file system for monitoring the object specified. + * Returns code from mkdir call + */ +static int uv__make_subdirs_p(const char *filename) { + char cmd[2048]; + char *p; + int rc = 0; + + /* Strip off the monitor file name */ + p = strrchr(filename, '/'); + + if (p == NULL) + return 0; + + if (uv__path_is_a_directory((char*)filename) == 0) { + sprintf(cmd, "/aha/fs/modDir.monFactory"); + } else { + sprintf(cmd, "/aha/fs/modFile.monFactory"); + } + + strncat(cmd, filename, (p - filename)); + rc = uv__makedir_p(cmd); + + if (rc == -1 && errno != EEXIST){ + return -errno; + } + + return rc; +} + + +/* + * Checks if /aha is mounted, then proceeds to set up the monitoring + * objects for the specified file. + * Returns 0 on success, or an error code < 0 on failure + */ +static int uv__setup_ahafs(const char* filename, int *fd) { + int rc = 0; + char mon_file_write_string[RDWR_BUF_SIZE]; + char mon_file[PATH_MAX]; + int file_is_directory = 0; /* -1 == NO, 0 == YES */ + + /* Create monitor file name for object */ + file_is_directory = uv__path_is_a_directory((char*)filename); + + if (file_is_directory == 0) + sprintf(mon_file, "/aha/fs/modDir.monFactory"); + else + sprintf(mon_file, "/aha/fs/modFile.monFactory"); + + if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX) + return -ENAMETOOLONG; + + /* Make the necessary subdirectories for the monitor file */ + rc = uv__make_subdirs_p(filename); + if (rc == -1 && errno != EEXIST) + return rc; + + strcat(mon_file, filename); + strcat(mon_file, ".mon"); + + *fd = 0; errno = 0; + + /* Open the monitor file, creating it if necessary */ + *fd = open(mon_file, O_CREAT|O_RDWR); + if (*fd < 0) + return -errno; + + /* Write out the monitoring specifications. + * In this case, we are monitoring for a state change event type + * CHANGED=YES + * We will be waiting in select call, rather than a read: + * WAIT_TYPE=WAIT_IN_SELECT + * We only want minimal information for files: + * INFO_LVL=1 + * For directories, we want more information to track what file + * caused the change + * INFO_LVL=2 + */ + + if (file_is_directory == 0) + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2"); + else + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1"); + + rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1); + if (rc < 0) + return -errno; + + return 0; +} + +/* + * Skips a specified number of lines in the buffer passed in. + * Walks the buffer pointed to by p and attempts to skip n lines. + * Returns the total number of lines skipped + */ +static int uv__skip_lines(char **p, int n) { + int lines = 0; + + while(n > 0) { + *p = strchr(*p, '\n'); + if (!p) + return lines; + + (*p)++; + n--; + lines++; + } + return lines; +} + + +/* + * Parse the event occurrence data to figure out what event just occurred + * and take proper action. + * + * The buf is a pointer to the buffer containing the event occurrence data + * Returns 0 on success, -1 if unrecoverable error in parsing + * + */ +static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) { + int evp_rc, i; + char *p; + char filename[PATH_MAX]; /* To be used when handling directories */ + + p = buf; + *events = 0; + + /* Clean the filename buffer*/ + for(i = 0; i < PATH_MAX; i++) { + filename[i] = 0; + } + i = 0; + + /* Check for BUF_WRAP */ + if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) { + assert(0 && "Buffer wrap detected, Some event occurrences lost!"); + return 0; + } + + /* Since we are using the default buffer size (4K), and have specified + * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications + * should check for this keyword if they are using an INFO_LVL of 2 or + * higher, and have a buffer size of <= 4K + */ + + /* Skip to RC_FROM_EVPROD */ + if (uv__skip_lines(&p, 9) != 9) + return -1; + + if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) { + if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */ + if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) { + /* The directory is no longer available for monitoring */ + *events = UV_RENAME; + handle->dir_filename = NULL; + } else { + /* A file was added/removed inside the directory */ + *events = UV_CHANGE; + + /* Get the EVPROD_INFO */ + if (uv__skip_lines(&p, 1) != 1) + return -1; + + /* Scan out the name of the file that triggered the event*/ + if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) { + handle->dir_filename = uv__strdup((const char*)&filename); + } else + return -1; + } + } else { /* Regular File */ + if (evp_rc == AHAFS_MODFILE_RENAME) + *events = UV_RENAME; + else + *events = UV_CHANGE; + } + } + else + return -1; + + return 0; +} + + +/* This is the internal callback */ +static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) { + char result_data[RDWR_BUF_SIZE]; + int bytes, rc = 0; + uv_fs_event_t* handle; + int events = 0; + char fname[PATH_MAX]; + char *p; + + handle = container_of(event_watch, uv_fs_event_t, event_watcher); + + /* At this point, we assume that polling has been done on the + * file descriptor, so we can just read the AHAFS event occurrence + * data and parse its results without having to block anything + */ + bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0); + + assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file"); + + /* Parse the data */ + if(bytes > 0) + rc = uv__parse_data(result_data, &events, handle); + + /* Unrecoverable error */ + if (rc == -1) + return; + + /* For directory changes, the name of the files that triggered the change + * are never absolute pathnames + */ + if (uv__path_is_a_directory(handle->path) == 0) { + p = handle->dir_filename; + } else { + p = strrchr(handle->path, '/'); + if (p == NULL) + p = handle->path; + else + p++; + } + strncpy(fname, p, sizeof(fname) - 1); + /* Just in case */ + fname[sizeof(fname) - 1] = '\0'; + + handle->cb(handle, fname, events, 0); +} +#endif + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +#else + return -ENOSYS; +#endif +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* filename, + unsigned int flags) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + int fd, rc, str_offset = 0; + char cwd[PATH_MAX]; + char absolute_path[PATH_MAX]; + char readlink_cwd[PATH_MAX]; + + + /* Figure out whether filename is absolute or not */ + if (filename[0] == '/') { + /* We have absolute pathname */ + snprintf(absolute_path, sizeof(absolute_path), "%s", filename); + } else { + /* We have a relative pathname, compose the absolute pathname */ + snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid()); + rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1); + if (rc < 0) + return rc; + /* readlink does not null terminate our string */ + readlink_cwd[rc] = '\0'; + + if (filename[0] == '.' && filename[1] == '/') + str_offset = 2; + + snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd, + filename + str_offset); + } + + if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */ + return UV_ENOSYS; + + /* Setup ahafs */ + rc = uv__setup_ahafs((const char *)absolute_path, &fd); + if (rc != 0) + return rc; + + /* Setup/Initialize all the libuv routines */ + uv__handle_start(handle); + uv__io_init(&handle->event_watcher, uv__ahafs_event, fd); + handle->path = uv__strdup(filename); + handle->cb = cb; + + uv__io_start(handle->loop, &handle->event_watcher, POLLIN); + + return 0; +#else + return -ENOSYS; +#endif +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + if (!uv__is_active(handle)) + return 0; + + uv__io_close(handle->loop, &handle->event_watcher); + uv__handle_stop(handle); + + if (uv__path_is_a_directory(handle->path) == 0) { + uv__free(handle->dir_filename); + handle->dir_filename = NULL; + } + + uv__free(handle->path); + handle->path = NULL; + uv__close(handle->event_watcher.fd); + handle->event_watcher.fd = -1; + + return 0; +#else + return -ENOSYS; +#endif +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + uv_fs_event_stop(handle); +#else + UNREACHABLE(); +#endif +} + + +char** uv_setup_args(int argc, char** argv) { + return argv; +} + + +int uv_set_process_title(const char* title) { + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + + buffer[0] = '\0'; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + char pp[64]; + psinfo_t psinfo; + int err; + int fd; + + snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); + + fd = open(pp, O_RDONLY); + if (fd == -1) + return -errno; + + /* FIXME(bnoordhuis) Handle EINTR. */ + err = -EINVAL; + if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) { + *rss = (size_t)psinfo.pr_rssize * 1024; + err = 0; + } + uv__close(fd); + + return err; +} + + +int uv_uptime(double* uptime) { + struct utmp *utmp_buf; + size_t entries = 0; + time_t boot_time; + + utmpname(UTMP_FILE); + + setutent(); + + while ((utmp_buf = getutent()) != NULL) { + if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS) + ++entries; + if (utmp_buf->ut_type == BOOT_TIME) + boot_time = utmp_buf->ut_time; + } + + endutent(); + + if (boot_time == 0) + return -ENOSYS; + + *uptime = time(NULL) - boot_time; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + uv_cpu_info_t* cpu_info; + perfstat_cpu_total_t ps_total; + perfstat_cpu_t* ps_cpus; + perfstat_id_t cpu_id; + int result, ncpus, idx = 0; + + result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); + if (result == -1) { + return -ENOSYS; + } + + ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0); + if (result == -1) { + return -ENOSYS; + } + + ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t)); + if (!ps_cpus) { + return -ENOMEM; + } + + strcpy(cpu_id.name, FIRST_CPU); + result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus); + if (result == -1) { + uv__free(ps_cpus); + return -ENOSYS; + } + + *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t)); + if (!*cpu_infos) { + uv__free(ps_cpus); + return -ENOMEM; + } + + *count = ncpus; + + cpu_info = *cpu_infos; + while (idx < ncpus) { + cpu_info->speed = (int)(ps_total.processorHZ / 1000000); + cpu_info->model = uv__strdup(ps_total.description); + cpu_info->cpu_times.user = ps_cpus[idx].user; + cpu_info->cpu_times.sys = ps_cpus[idx].sys; + cpu_info->cpu_times.idle = ps_cpus[idx].idle; + cpu_info->cpu_times.irq = ps_cpus[idx].wait; + cpu_info->cpu_times.nice = 0; + cpu_info++; + idx++; + } + + uv__free(ps_cpus); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; ++i) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { + uv_interface_address_t* address; + int sockfd, size = 1; + struct ifconf ifc; + struct ifreq *ifr, *p, flg; + + *count = 0; + + if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) { + return -errno; + } + + if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) { + uv__close(sockfd); + return -errno; + } + + ifc.ifc_req = (struct ifreq*)uv__malloc(size); + ifc.ifc_len = size; + if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) { + uv__close(sockfd); + return -errno; + } + +#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p)) + + /* Count all up and running ipv4/ipv6 addresses */ + ifr = ifc.ifc_req; + while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { + p = ifr; + ifr = (struct ifreq*) + ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); + + if (!(p->ifr_addr.sa_family == AF_INET6 || + p->ifr_addr.sa_family == AF_INET)) + continue; + + memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); + if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { + uv__close(sockfd); + return -errno; + } + + if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) + continue; + + (*count)++; + } + + /* Alloc the return interface structs */ + *addresses = (uv_interface_address_t*) + uv__malloc(*count * sizeof(uv_interface_address_t)); + if (!(*addresses)) { + uv__close(sockfd); + return -ENOMEM; + } + address = *addresses; + + ifr = ifc.ifc_req; + while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { + p = ifr; + ifr = (struct ifreq*) + ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); + + if (!(p->ifr_addr.sa_family == AF_INET6 || + p->ifr_addr.sa_family == AF_INET)) + continue; + + memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); + if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { + uv__close(sockfd); + return -ENOSYS; + } + + if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) + continue; + + /* All conditions above must match count loop */ + + address->name = uv__strdup(p->ifr_name); + + if (p->ifr_addr.sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr); + } + + /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */ + + address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0; + + address++; + } + +#undef ADDR_SIZE + + uv__close(sockfd); + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; ++i) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct pollfd* events; + uintptr_t i; + uintptr_t nfds; + struct poll_ctl pc; + + assert(loop->watchers != NULL); + + events = (struct pollfd*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + + if (events != NULL) + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].fd == fd) + events[i].fd = -1; + + /* Remove the file descriptor from the poll set */ + pc.events = 0; + pc.cmd = PS_DELETE; + pc.fd = fd; + if(loop->backend_fd >= 0) + pollset_ctl(loop->backend_fd, &pc, 1); +} diff --git a/Utilities/cmlibuv/src/unix/android-ifaddrs.c b/Utilities/cmlibuv/src/unix/android-ifaddrs.c new file mode 100644 index 0000000..30f681b --- /dev/null +++ b/Utilities/cmlibuv/src/unix/android-ifaddrs.c @@ -0,0 +1,703 @@ +/* +Copyright (c) 2013, Kenneth MacKay +Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016) +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "android-ifaddrs.h" +#include "uv-common.h" + +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#include <unistd.h> +#include <sys/socket.h> +#include <net/if_arp.h> +#include <netinet/in.h> +#include <linux/netlink.h> +#include <linux/rtnetlink.h> + +typedef struct NetlinkList +{ + struct NetlinkList *m_next; + struct nlmsghdr *m_data; + unsigned int m_size; +} NetlinkList; + +static int netlink_socket(void) +{ + struct sockaddr_nl l_addr; + + int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if(l_socket < 0) + { + return -1; + } + + memset(&l_addr, 0, sizeof(l_addr)); + l_addr.nl_family = AF_NETLINK; + if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0) + { + close(l_socket); + return -1; + } + + return l_socket; +} + +static int netlink_send(int p_socket, int p_request) +{ + char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))]; + + struct nlmsghdr *l_hdr; + struct rtgenmsg *l_msg; + struct sockaddr_nl l_addr; + + memset(l_buffer, 0, sizeof(l_buffer)); + + l_hdr = (struct nlmsghdr *)l_buffer; + l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr); + + l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg)); + l_hdr->nlmsg_type = p_request; + l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; + l_hdr->nlmsg_pid = 0; + l_hdr->nlmsg_seq = p_socket; + l_msg->rtgen_family = AF_UNSPEC; + + memset(&l_addr, 0, sizeof(l_addr)); + l_addr.nl_family = AF_NETLINK; + return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr))); +} + +static int netlink_recv(int p_socket, void *p_buffer, size_t p_len) +{ + struct sockaddr_nl l_addr; + struct msghdr l_msg; + + struct iovec l_iov; + l_iov.iov_base = p_buffer; + l_iov.iov_len = p_len; + + for(;;) + { + int l_result; + l_msg.msg_name = (void *)&l_addr; + l_msg.msg_namelen = sizeof(l_addr); + l_msg.msg_iov = &l_iov; + l_msg.msg_iovlen = 1; + l_msg.msg_control = NULL; + l_msg.msg_controllen = 0; + l_msg.msg_flags = 0; + l_result = recvmsg(p_socket, &l_msg, 0); + + if(l_result < 0) + { + if(errno == EINTR) + { + continue; + } + return -2; + } + + /* Buffer was too small */ + if(l_msg.msg_flags & MSG_TRUNC) + { + return -1; + } + return l_result; + } +} + +static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done) +{ + size_t l_size = 4096; + void *l_buffer = NULL; + + for(;;) + { + int l_read; + + uv__free(l_buffer); + l_buffer = uv__malloc(l_size); + if (l_buffer == NULL) + { + return NULL; + } + + l_read = netlink_recv(p_socket, l_buffer, l_size); + *p_size = l_read; + if(l_read == -2) + { + uv__free(l_buffer); + return NULL; + } + if(l_read >= 0) + { + pid_t l_pid = getpid(); + struct nlmsghdr *l_hdr; + for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + *p_done = 1; + break; + } + + if(l_hdr->nlmsg_type == NLMSG_ERROR) + { + uv__free(l_buffer); + return NULL; + } + } + return l_buffer; + } + + l_size *= 2; + } +} + +static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size) +{ + NetlinkList *l_item = uv__malloc(sizeof(NetlinkList)); + if (l_item == NULL) + { + return NULL; + } + + l_item->m_next = NULL; + l_item->m_data = p_data; + l_item->m_size = p_size; + return l_item; +} + +static void freeResultList(NetlinkList *p_list) +{ + NetlinkList *l_cur; + while(p_list) + { + l_cur = p_list; + p_list = p_list->m_next; + uv__free(l_cur->m_data); + uv__free(l_cur); + } +} + +static NetlinkList *getResultList(int p_socket, int p_request) +{ + int l_size; + int l_done; + NetlinkList *l_list; + NetlinkList *l_end; + + if(netlink_send(p_socket, p_request) < 0) + { + return NULL; + } + + l_list = NULL; + l_end = NULL; + + l_done = 0; + while(!l_done) + { + NetlinkList *l_item; + + struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done); + /* Error */ + if(!l_hdr) + { + freeResultList(l_list); + return NULL; + } + + l_item = newListItem(l_hdr, l_size); + if (!l_item) + { + freeResultList(l_list); + return NULL; + } + if(!l_list) + { + l_list = l_item; + } + else + { + l_end->m_next = l_item; + } + l_end = l_item; + } + return l_list; +} + +static size_t maxSize(size_t a, size_t b) +{ + return (a > b ? a : b); +} + +static size_t calcAddrLen(sa_family_t p_family, int p_dataSize) +{ + switch(p_family) + { + case AF_INET: + return sizeof(struct sockaddr_in); + case AF_INET6: + return sizeof(struct sockaddr_in6); + case AF_PACKET: + return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize); + default: + return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize); + } +} + +static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size) +{ + switch(p_family) + { + case AF_INET: + memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size); + break; + case AF_INET6: + memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size); + break; + case AF_PACKET: + memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size); + ((struct sockaddr_ll*)p_dest)->sll_halen = p_size; + break; + default: + memcpy(p_dest->sa_data, p_data, p_size); + break; + } + p_dest->sa_family = p_family; +} + +static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry) +{ + if(!*p_resultList) + { + *p_resultList = p_entry; + } + else + { + struct ifaddrs *l_cur = *p_resultList; + while(l_cur->ifa_next) + { + l_cur = l_cur->ifa_next; + } + l_cur->ifa_next = p_entry; + } +} + +static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList) +{ + struct ifaddrs *l_entry; + + char *l_index; + char *l_name; + char *l_addr; + char *l_data; + + struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr); + + size_t l_nameSize = 0; + size_t l_addrSize = 0; + size_t l_dataSize = 0; + + size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg)); + struct rtattr *l_rta; + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFLA_ADDRESS: + case IFLA_BROADCAST: + l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize)); + break; + case IFLA_IFNAME: + l_nameSize += NLMSG_ALIGN(l_rtaSize + 1); + break; + case IFLA_STATS: + l_dataSize += NLMSG_ALIGN(l_rtaSize); + break; + default: + break; + } + } + + l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize); + if (l_entry == NULL) + { + return -1; + } + memset(l_entry, 0, sizeof(struct ifaddrs)); + l_entry->ifa_name = ""; + + l_index = ((char *)l_entry) + sizeof(struct ifaddrs); + l_name = l_index + sizeof(int); + l_addr = l_name + l_nameSize; + l_data = l_addr + l_addrSize; + + /* Save the interface index so we can look it up when handling the + * addresses. + */ + memcpy(l_index, &l_info->ifi_index, sizeof(int)); + + l_entry->ifa_flags = l_info->ifi_flags; + + l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg)); + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + void *l_rtaData = RTA_DATA(l_rta); + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFLA_ADDRESS: + case IFLA_BROADCAST: + { + size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize); + makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize); + ((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index; + ((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type; + if(l_rta->rta_type == IFLA_ADDRESS) + { + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_broadaddr = (struct sockaddr *)l_addr; + } + l_addr += NLMSG_ALIGN(l_addrLen); + break; + } + case IFLA_IFNAME: + strncpy(l_name, l_rtaData, l_rtaDataSize); + l_name[l_rtaDataSize] = '\0'; + l_entry->ifa_name = l_name; + break; + case IFLA_STATS: + memcpy(l_data, l_rtaData, l_rtaDataSize); + l_entry->ifa_data = l_data; + break; + default: + break; + } + } + + addToEnd(p_resultList, l_entry); + return 0; +} + +static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks) +{ + int l_num = 0; + struct ifaddrs *l_cur = *p_links; + while(l_cur && l_num < p_numLinks) + { + char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs); + int l_index; + memcpy(&l_index, l_indexPtr, sizeof(int)); + if(l_index == p_index) + { + return l_cur; + } + + l_cur = l_cur->ifa_next; + ++l_num; + } + return NULL; +} + +static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks) +{ + struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr); + struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks); + + size_t l_nameSize = 0; + size_t l_addrSize = 0; + + int l_addedNetmask = 0; + + size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg)); + struct rtattr *l_rta; + struct ifaddrs *l_entry; + + char *l_name; + char *l_addr; + + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + if(l_info->ifa_family == AF_PACKET) + { + continue; + } + + switch(l_rta->rta_type) + { + case IFA_ADDRESS: + case IFA_LOCAL: + if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask) + { + /* Make room for netmask */ + l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize)); + l_addedNetmask = 1; + } + case IFA_BROADCAST: + l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize)); + break; + case IFA_LABEL: + l_nameSize += NLMSG_ALIGN(l_rtaSize + 1); + break; + default: + break; + } + } + + l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize); + if (l_entry == NULL) + { + return -1; + } + memset(l_entry, 0, sizeof(struct ifaddrs)); + l_entry->ifa_name = (l_interface ? l_interface->ifa_name : ""); + + l_name = ((char *)l_entry) + sizeof(struct ifaddrs); + l_addr = l_name + l_nameSize; + + l_entry->ifa_flags = l_info->ifa_flags; + if(l_interface) + { + l_entry->ifa_flags |= l_interface->ifa_flags; + } + + l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg)); + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + void *l_rtaData = RTA_DATA(l_rta); + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFA_ADDRESS: + case IFA_BROADCAST: + case IFA_LOCAL: + { + size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize); + makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize); + if(l_info->ifa_family == AF_INET6) + { + if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData)) + { + ((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index; + } + } + + /* Apparently in a point-to-point network IFA_ADDRESS contains + * the dest address and IFA_LOCAL contains the local address + */ + if(l_rta->rta_type == IFA_ADDRESS) + { + if(l_entry->ifa_addr) + { + l_entry->ifa_dstaddr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + } + else if(l_rta->rta_type == IFA_LOCAL) + { + if(l_entry->ifa_addr) + { + l_entry->ifa_dstaddr = l_entry->ifa_addr; + } + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_broadaddr = (struct sockaddr *)l_addr; + } + l_addr += NLMSG_ALIGN(l_addrLen); + break; + } + case IFA_LABEL: + strncpy(l_name, l_rtaData, l_rtaDataSize); + l_name[l_rtaDataSize] = '\0'; + l_entry->ifa_name = l_name; + break; + default: + break; + } + } + + if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6)) + { + unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128); + unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen); + char l_mask[16] = {0}; + unsigned i; + for(i=0; i<(l_prefix/8); ++i) + { + l_mask[i] = 0xff; + } + if(l_prefix % 8) + { + l_mask[i] = 0xff << (8 - (l_prefix % 8)); + } + + makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8); + l_entry->ifa_netmask = (struct sockaddr *)l_addr; + } + + addToEnd(p_resultList, l_entry); + return 0; +} + +static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList) +{ + + int l_numLinks = 0; + pid_t l_pid = getpid(); + for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next) + { + unsigned int l_nlsize = p_netlinkList->m_size; + struct nlmsghdr *l_hdr; + for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + break; + } + + if(l_hdr->nlmsg_type == RTM_NEWLINK) + { + if(interpretLink(l_hdr, p_resultList) == -1) + { + return -1; + } + ++l_numLinks; + } + } + } + return l_numLinks; +} + +static int interpretAddrs(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks) +{ + pid_t l_pid = getpid(); + for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next) + { + unsigned int l_nlsize = p_netlinkList->m_size; + struct nlmsghdr *l_hdr; + for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + break; + } + + if(l_hdr->nlmsg_type == RTM_NEWADDR) + { + if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1) + { + return -1; + } + } + } + } + return 0; +} + +int getifaddrs(struct ifaddrs **ifap) +{ + int l_socket; + int l_result; + int l_numLinks; + NetlinkList *l_linkResults; + NetlinkList *l_addrResults; + + if(!ifap) + { + return -1; + } + *ifap = NULL; + + l_socket = netlink_socket(); + if(l_socket < 0) + { + return -1; + } + + l_linkResults = getResultList(l_socket, RTM_GETLINK); + if(!l_linkResults) + { + close(l_socket); + return -1; + } + + l_addrResults = getResultList(l_socket, RTM_GETADDR); + if(!l_addrResults) + { + close(l_socket); + freeResultList(l_linkResults); + return -1; + } + + l_result = 0; + l_numLinks = interpretLinks(l_socket, l_linkResults, ifap); + if(l_numLinks == -1 || interpretAddrs(l_socket, l_addrResults, ifap, l_numLinks) == -1) + { + l_result = -1; + } + + freeResultList(l_linkResults); + freeResultList(l_addrResults); + close(l_socket); + return l_result; +} + +void freeifaddrs(struct ifaddrs *ifa) +{ + struct ifaddrs *l_cur; + while(ifa) + { + l_cur = ifa; + ifa = ifa->ifa_next; + uv__free(l_cur); + } +} diff --git a/Utilities/cmlibuv/src/unix/async.c b/Utilities/cmlibuv/src/unix/async.c new file mode 100644 index 0000000..393cdeb --- /dev/null +++ b/Utilities/cmlibuv/src/unix/async.c @@ -0,0 +1,290 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* This file contains both the uv__async internal infrastructure and the + * user-facing uv_async_t functions. + */ + +#include "uv.h" +#include "internal.h" +#include "atomic-ops.h" + +#include <errno.h> +#include <stdio.h> /* snprintf() */ +#include <assert.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +static void uv__async_event(uv_loop_t* loop, + struct uv__async* w, + unsigned int nevents); +static int uv__async_eventfd(void); + + +int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { + int err; + + err = uv__async_start(loop, &loop->async_watcher, uv__async_event); + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC); + handle->async_cb = async_cb; + handle->pending = 0; + + QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); + uv__handle_start(handle); + + return 0; +} + + +int uv_async_send(uv_async_t* handle) { + /* Do a cheap read first. */ + if (ACCESS_ONCE(int, handle->pending) != 0) + return 0; + + if (cmpxchgi(&handle->pending, 0, 1) == 0) + uv__async_send(&handle->loop->async_watcher); + + return 0; +} + + +void uv__async_close(uv_async_t* handle) { + QUEUE_REMOVE(&handle->queue); + uv__handle_stop(handle); +} + + +static void uv__async_event(uv_loop_t* loop, + struct uv__async* w, + unsigned int nevents) { + QUEUE queue; + QUEUE* q; + uv_async_t* h; + + QUEUE_MOVE(&loop->async_handles, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_async_t, queue); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&loop->async_handles, q); + + if (cmpxchgi(&h->pending, 1, 0) == 0) + continue; + + if (h->async_cb == NULL) + continue; + h->async_cb(h); + } +} + + +static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + struct uv__async* wa; + char buf[1024]; + unsigned n; + ssize_t r; + + n = 0; + for (;;) { + r = read(w->fd, buf, sizeof(buf)); + + if (r > 0) + n += r; + + if (r == sizeof(buf)) + continue; + + if (r != -1) + break; + + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + if (errno == EINTR) + continue; + + abort(); + } + + wa = container_of(w, struct uv__async, io_watcher); + +#if defined(__linux__) + if (wa->wfd == -1) { + uint64_t val; + assert(n == sizeof(val)); + memcpy(&val, buf, sizeof(val)); /* Avoid alignment issues. */ + wa->cb(loop, wa, val); + return; + } +#endif + + wa->cb(loop, wa, n); +} + + +void uv__async_send(struct uv__async* wa) { + const void* buf; + ssize_t len; + int fd; + int r; + + buf = ""; + len = 1; + fd = wa->wfd; + +#if defined(__linux__) + if (fd == -1) { + static const uint64_t val = 1; + buf = &val; + len = sizeof(val); + fd = wa->io_watcher.fd; /* eventfd */ + } +#endif + + do + r = write(fd, buf, len); + while (r == -1 && errno == EINTR); + + if (r == len) + return; + + if (r == -1) + if (errno == EAGAIN || errno == EWOULDBLOCK) + return; + + abort(); +} + + +void uv__async_init(struct uv__async* wa) { + wa->io_watcher.fd = -1; + wa->wfd = -1; +} + + +int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb) { + int pipefd[2]; + int err; + + if (wa->io_watcher.fd != -1) + return 0; + + err = uv__async_eventfd(); + if (err >= 0) { + pipefd[0] = err; + pipefd[1] = -1; + } + else if (err == -ENOSYS) { + err = uv__make_pipe(pipefd, UV__F_NONBLOCK); +#if defined(__linux__) + /* Save a file descriptor by opening one of the pipe descriptors as + * read/write through the procfs. That file descriptor can then + * function as both ends of the pipe. + */ + if (err == 0) { + char buf[32]; + int fd; + + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", pipefd[0]); + fd = uv__open_cloexec(buf, O_RDWR); + if (fd >= 0) { + uv__close(pipefd[0]); + uv__close(pipefd[1]); + pipefd[0] = fd; + pipefd[1] = fd; + } + } +#endif + } + + if (err < 0) + return err; + + uv__io_init(&wa->io_watcher, uv__async_io, pipefd[0]); + uv__io_start(loop, &wa->io_watcher, POLLIN); + wa->wfd = pipefd[1]; + wa->cb = cb; + + return 0; +} + + +void uv__async_stop(uv_loop_t* loop, struct uv__async* wa) { + if (wa->io_watcher.fd == -1) + return; + + if (wa->wfd != -1) { + if (wa->wfd != wa->io_watcher.fd) + uv__close(wa->wfd); + wa->wfd = -1; + } + + uv__io_stop(loop, &wa->io_watcher, POLLIN); + uv__close(wa->io_watcher.fd); + wa->io_watcher.fd = -1; +} + + +static int uv__async_eventfd() { +#if defined(__linux__) + static int no_eventfd2; + static int no_eventfd; + int fd; + + if (no_eventfd2) + goto skip_eventfd2; + + fd = uv__eventfd2(0, UV__EFD_CLOEXEC | UV__EFD_NONBLOCK); + if (fd != -1) + return fd; + + if (errno != ENOSYS) + return -errno; + + no_eventfd2 = 1; + +skip_eventfd2: + + if (no_eventfd) + goto skip_eventfd; + + fd = uv__eventfd(0); + if (fd != -1) { + uv__cloexec(fd, 1); + uv__nonblock(fd, 1); + return fd; + } + + if (errno != ENOSYS) + return -errno; + + no_eventfd = 1; + +skip_eventfd: + +#endif + + return -ENOSYS; +} diff --git a/Utilities/cmlibuv/src/unix/atomic-ops.h b/Utilities/cmlibuv/src/unix/atomic-ops.h new file mode 100644 index 0000000..815e355 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/atomic-ops.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef UV_ATOMIC_OPS_H_ +#define UV_ATOMIC_OPS_H_ + +#include "internal.h" /* UV_UNUSED */ + +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +#include <atomic.h> +#define __sync_val_compare_and_swap(p, o, n) atomic_cas_ptr(p, o, n) +#endif + +UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)); +UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)); +UV_UNUSED(static void cpu_relax(void)); + +/* Prefer hand-rolled assembly over the gcc builtins because the latter also + * issue full memory barriers. + */ +UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) { +#if defined(__i386__) || defined(__x86_64__) + int out; + __asm__ __volatile__ ("lock; cmpxchg %2, %1;" + : "=a" (out), "+m" (*(volatile int*) ptr) + : "r" (newval), "0" (oldval) + : "memory"); + return out; +#elif defined(_AIX) && defined(__xlC__) + const int out = (*(volatile int*) ptr); + __compare_and_swap(ptr, &oldval, newval); + return out; +#elif defined(__MVS__) + return __plo_CS(ptr, (unsigned int*) ptr, + oldval, (unsigned int*) &newval); +#else + return __sync_val_compare_and_swap(ptr, oldval, newval); +#endif +} + +UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)) { +#if defined(__i386__) || defined(__x86_64__) + long out; + __asm__ __volatile__ ("lock; cmpxchg %2, %1;" + : "=a" (out), "+m" (*(volatile long*) ptr) + : "r" (newval), "0" (oldval) + : "memory"); + return out; +#elif defined(_AIX) && defined(__xlC__) + const long out = (*(volatile int*) ptr); +# if defined(__64BIT__) + __compare_and_swaplp(ptr, &oldval, newval); +# else + __compare_and_swap(ptr, &oldval, newval); +# endif /* if defined(__64BIT__) */ + return out; +#elif defined (__MVS__) +# ifdef _LP64 + return __plo_CSGR(ptr, (unsigned long long*) ptr, + oldval, (unsigned long long*) &newval); +# else + return __plo_CS(ptr, (unsigned int*) ptr, + oldval, (unsigned int*) &newval); +# endif +#else + return __sync_val_compare_and_swap(ptr, oldval, newval); +#endif +} + +UV_UNUSED(static void cpu_relax(void)) { +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */ +#endif +} + +#endif /* UV_ATOMIC_OPS_H_ */ diff --git a/Utilities/cmlibuv/src/unix/core.c b/Utilities/cmlibuv/src/unix/core.c new file mode 100644 index 0000000..d88fc1d --- /dev/null +++ b/Utilities/cmlibuv/src/unix/core.c @@ -0,0 +1,1238 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stddef.h> /* NULL */ +#include <stdio.h> /* printf */ +#include <stdlib.h> +#include <string.h> /* strerror */ +#include <errno.h> +#include <assert.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/un.h> +#include <netinet/in.h> +#include <arpa/inet.h> +#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */ +#include <sys/uio.h> /* writev */ +#include <sys/resource.h> /* getrusage */ +#include <pwd.h> + +#ifdef __sun +# include <sys/filio.h> +# include <sys/types.h> +# include <sys/wait.h> +#endif + +#ifdef __APPLE__ +# include <mach-o/dyld.h> /* _NSGetExecutablePath */ +# include <sys/filio.h> +# if defined(O_CLOEXEC) +# define UV__O_CLOEXEC O_CLOEXEC +# endif +#endif + +#if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) +# include <sys/sysctl.h> +# include <sys/filio.h> +# include <sys/wait.h> +# define UV__O_CLOEXEC O_CLOEXEC +# if defined(__FreeBSD__) && __FreeBSD__ >= 10 +# define uv__accept4 accept4 +# define UV__SOCK_NONBLOCK SOCK_NONBLOCK +# define UV__SOCK_CLOEXEC SOCK_CLOEXEC +# endif +# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC) +# define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC +# endif +#endif + +#if defined(__ANDROID_API__) && __ANDROID_API__ < 21 +# include <dlfcn.h> /* for dlsym */ +#endif + +#if defined(__MVS__) +#include <sys/ioctl.h> +#endif + +static int uv__run_pending(uv_loop_t* loop); + +/* Verify that uv_buf_t is ABI-compatible with struct iovec. */ +STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec)); +STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) == + sizeof(((struct iovec*) 0)->iov_base)); +STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) == + sizeof(((struct iovec*) 0)->iov_len)); +STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base)); +STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); + + +uint64_t uv_hrtime(void) { + return uv__hrtime(UV_CLOCK_PRECISE); +} + + +void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + handle->flags |= UV_CLOSING; + handle->close_cb = close_cb; + + switch (handle->type) { + case UV_NAMED_PIPE: + uv__pipe_close((uv_pipe_t*)handle); + break; + + case UV_TTY: + uv__stream_close((uv_stream_t*)handle); + break; + + case UV_TCP: + uv__tcp_close((uv_tcp_t*)handle); + break; + + case UV_UDP: + uv__udp_close((uv_udp_t*)handle); + break; + + case UV_PREPARE: + uv__prepare_close((uv_prepare_t*)handle); + break; + + case UV_CHECK: + uv__check_close((uv_check_t*)handle); + break; + + case UV_IDLE: + uv__idle_close((uv_idle_t*)handle); + break; + + case UV_ASYNC: + uv__async_close((uv_async_t*)handle); + break; + + case UV_TIMER: + uv__timer_close((uv_timer_t*)handle); + break; + + case UV_PROCESS: + uv__process_close((uv_process_t*)handle); + break; + + case UV_FS_EVENT: + uv__fs_event_close((uv_fs_event_t*)handle); + break; + + case UV_POLL: + uv__poll_close((uv_poll_t*)handle); + break; + + case UV_FS_POLL: + uv__fs_poll_close((uv_fs_poll_t*)handle); + break; + + case UV_SIGNAL: + uv__signal_close((uv_signal_t*) handle); + /* Signal handles may not be closed immediately. The signal code will */ + /* itself close uv__make_close_pending whenever appropriate. */ + return; + + default: + assert(0); + } + + uv__make_close_pending(handle); +} + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { + int r; + int fd; + socklen_t len; + + if (handle == NULL || value == NULL) + return -EINVAL; + + if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE) + fd = uv__stream_fd((uv_stream_t*) handle); + else if (handle->type == UV_UDP) + fd = ((uv_udp_t *) handle)->io_watcher.fd; + else + return -ENOTSUP; + + len = sizeof(*value); + + if (*value == 0) + r = getsockopt(fd, SOL_SOCKET, optname, value, &len); + else + r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len); + + if (r < 0) + return -errno; + + return 0; +} + +void uv__make_close_pending(uv_handle_t* handle) { + assert(handle->flags & UV_CLOSING); + assert(!(handle->flags & UV_CLOSED)); + handle->next_closing = handle->loop->closing_handles; + handle->loop->closing_handles = handle; +} + +int uv__getiovmax(void) { +#if defined(IOV_MAX) + return IOV_MAX; +#elif defined(_SC_IOV_MAX) + static int iovmax = -1; + if (iovmax == -1) { + iovmax = sysconf(_SC_IOV_MAX); + /* On some embedded devices (arm-linux-uclibc based ip camera), + * sysconf(_SC_IOV_MAX) can not get the correct value. The return + * value is -1 and the errno is EINPROGRESS. Degrade the value to 1. + */ + if (iovmax == -1) iovmax = 1; + } + return iovmax; +#else + return 1024; +#endif +} + + +static void uv__finish_close(uv_handle_t* handle) { + /* Note: while the handle is in the UV_CLOSING state now, it's still possible + * for it to be active in the sense that uv__is_active() returns true. + * A good example is when the user calls uv_shutdown(), immediately followed + * by uv_close(). The handle is considered active at this point because the + * completion of the shutdown req is still pending. + */ + assert(handle->flags & UV_CLOSING); + assert(!(handle->flags & UV_CLOSED)); + handle->flags |= UV_CLOSED; + + switch (handle->type) { + case UV_PREPARE: + case UV_CHECK: + case UV_IDLE: + case UV_ASYNC: + case UV_TIMER: + case UV_PROCESS: + case UV_FS_EVENT: + case UV_FS_POLL: + case UV_POLL: + case UV_SIGNAL: + break; + + case UV_NAMED_PIPE: + case UV_TCP: + case UV_TTY: + uv__stream_destroy((uv_stream_t*)handle); + break; + + case UV_UDP: + uv__udp_finish_close((uv_udp_t*)handle); + break; + + default: + assert(0); + break; + } + + uv__handle_unref(handle); + QUEUE_REMOVE(&handle->handle_queue); + + if (handle->close_cb) { + handle->close_cb(handle); + } +} + + +static void uv__run_closing_handles(uv_loop_t* loop) { + uv_handle_t* p; + uv_handle_t* q; + + p = loop->closing_handles; + loop->closing_handles = NULL; + + while (p) { + q = p->next_closing; + uv__finish_close(p); + p = q; + } +} + + +int uv_is_closing(const uv_handle_t* handle) { + return uv__is_closing(handle); +} + + +int uv_backend_fd(const uv_loop_t* loop) { + return loop->backend_fd; +} + + +int uv_backend_timeout(const uv_loop_t* loop) { + if (loop->stop_flag != 0) + return 0; + + if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop)) + return 0; + + if (!QUEUE_EMPTY(&loop->idle_handles)) + return 0; + + if (!QUEUE_EMPTY(&loop->pending_queue)) + return 0; + + if (loop->closing_handles) + return 0; + + return uv__next_timeout(loop); +} + + +static int uv__loop_alive(const uv_loop_t* loop) { + return uv__has_active_handles(loop) || + uv__has_active_reqs(loop) || + loop->closing_handles != NULL; +} + + +int uv_loop_alive(const uv_loop_t* loop) { + return uv__loop_alive(loop); +} + + +int uv_run(uv_loop_t* loop, uv_run_mode mode) { + int timeout; + int r; + int ran_pending; + + r = uv__loop_alive(loop); + if (!r) + uv__update_time(loop); + + while (r != 0 && loop->stop_flag == 0) { + uv__update_time(loop); + uv__run_timers(loop); + ran_pending = uv__run_pending(loop); + uv__run_idle(loop); + uv__run_prepare(loop); + + timeout = 0; + if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT) + timeout = uv_backend_timeout(loop); + + uv__io_poll(loop, timeout); + uv__run_check(loop); + uv__run_closing_handles(loop); + + if (mode == UV_RUN_ONCE) { + /* UV_RUN_ONCE implies forward progress: at least one callback must have + * been invoked when it returns. uv__io_poll() can return without doing + * I/O (meaning: no callbacks) when its timeout expires - which means we + * have pending timers that satisfy the forward progress constraint. + * + * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from + * the check. + */ + uv__update_time(loop); + uv__run_timers(loop); + } + + r = uv__loop_alive(loop); + if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) + break; + } + + /* The if statement lets gcc compile it to a conditional store. Avoids + * dirtying a cache line. + */ + if (loop->stop_flag != 0) + loop->stop_flag = 0; + + return r; +} + + +void uv_update_time(uv_loop_t* loop) { + uv__update_time(loop); +} + + +int uv_is_active(const uv_handle_t* handle) { + return uv__is_active(handle); +} + + +/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */ +int uv__socket(int domain, int type, int protocol) { + int sockfd; + int err; + +#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) + sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol); + if (sockfd != -1) + return sockfd; + + if (errno != EINVAL) + return -errno; +#endif + + sockfd = socket(domain, type, protocol); + if (sockfd == -1) + return -errno; + + err = uv__nonblock(sockfd, 1); + if (err == 0) + err = uv__cloexec(sockfd, 1); + + if (err) { + uv__close(sockfd); + return err; + } + +#if defined(SO_NOSIGPIPE) + { + int on = 1; + setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)); + } +#endif + + return sockfd; +} + +/* get a file pointer to a file in read-only and close-on-exec mode */ +FILE* uv__open_file(const char* path) { + int fd; + FILE* fp; + + fd = uv__open_cloexec(path, O_RDONLY); + if (fd < 0) + return NULL; + + fp = fdopen(fd, "r"); + if (fp == NULL) + uv__close(fd); + + return fp; +} + + +int uv__accept(int sockfd) { + int peerfd; + int err; + + assert(sockfd >= 0); + + while (1) { +#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 10) + static int no_accept4; + + if (no_accept4) + goto skip; + + peerfd = uv__accept4(sockfd, + NULL, + NULL, + UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC); + if (peerfd != -1) + return peerfd; + + if (errno == EINTR) + continue; + + if (errno != ENOSYS) + return -errno; + + no_accept4 = 1; +skip: +#endif + + peerfd = accept(sockfd, NULL, NULL); + if (peerfd == -1) { + if (errno == EINTR) + continue; + return -errno; + } + + err = uv__cloexec(peerfd, 1); + if (err == 0) + err = uv__nonblock(peerfd, 1); + + if (err) { + uv__close(peerfd); + return err; + } + + return peerfd; + } +} + + +int uv__close_nocheckstdio(int fd) { + int saved_errno; + int rc; + + assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */ + + saved_errno = errno; + rc = close(fd); + if (rc == -1) { + rc = -errno; + if (rc == -EINTR || rc == -EINPROGRESS) + rc = 0; /* The close is in progress, not an error. */ + errno = saved_errno; + } + + return rc; +} + + +int uv__close(int fd) { + assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */ + return uv__close_nocheckstdio(fd); +} + + +int uv__nonblock_ioctl(int fd, int set) { + int r; + + do + r = ioctl(fd, FIONBIO, &set); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__cloexec_ioctl(int fd, int set) { + int r; + + do + r = ioctl(fd, set ? FIOCLEX : FIONCLEX); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__nonblock_fcntl(int fd, int set) { + int flags; + int r; + + do + r = fcntl(fd, F_GETFL); + while (r == -1 && errno == EINTR); + + if (r == -1) + return -errno; + + /* Bail out now if already set/clear. */ + if (!!(r & O_NONBLOCK) == !!set) + return 0; + + if (set) + flags = r | O_NONBLOCK; + else + flags = r & ~O_NONBLOCK; + + do + r = fcntl(fd, F_SETFL, flags); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__cloexec_fcntl(int fd, int set) { + int flags; + int r; + + do + r = fcntl(fd, F_GETFD); + while (r == -1 && errno == EINTR); + + if (r == -1) + return -errno; + + /* Bail out now if already set/clear. */ + if (!!(r & FD_CLOEXEC) == !!set) + return 0; + + if (set) + flags = r | FD_CLOEXEC; + else + flags = r & ~FD_CLOEXEC; + + do + r = fcntl(fd, F_SETFD, flags); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +/* This function is not execve-safe, there is a race window + * between the call to dup() and fcntl(FD_CLOEXEC). + */ +int uv__dup(int fd) { + int err; + + fd = dup(fd); + + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) { + struct cmsghdr* cmsg; + ssize_t rc; + int* pfd; + int* end; +#if defined(__linux__) + static int no_msg_cmsg_cloexec; + if (no_msg_cmsg_cloexec == 0) { + rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */ + if (rc != -1) + return rc; + if (errno != EINVAL) + return -errno; + rc = recvmsg(fd, msg, flags); + if (rc == -1) + return -errno; + no_msg_cmsg_cloexec = 1; + } else { + rc = recvmsg(fd, msg, flags); + } +#else + rc = recvmsg(fd, msg, flags); +#endif + if (rc == -1) + return -errno; + if (msg->msg_controllen == 0) + return rc; + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) + if (cmsg->cmsg_type == SCM_RIGHTS) + for (pfd = (int*) CMSG_DATA(cmsg), + end = (int*) ((char*) cmsg + cmsg->cmsg_len); + pfd < end; + pfd += 1) + uv__cloexec(*pfd, 1); + return rc; +} + + +int uv_cwd(char* buffer, size_t* size) { + if (buffer == NULL || size == NULL) + return -EINVAL; + + if (getcwd(buffer, *size) == NULL) + return -errno; + + *size = strlen(buffer); + if (*size > 1 && buffer[*size - 1] == '/') { + buffer[*size-1] = '\0'; + (*size)--; + } + + return 0; +} + + +int uv_chdir(const char* dir) { + if (chdir(dir)) + return -errno; + + return 0; +} + + +void uv_disable_stdio_inheritance(void) { + int fd; + + /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the + * first 16 file descriptors. After that, bail out after the first error. + */ + for (fd = 0; ; fd++) + if (uv__cloexec(fd, 1) && fd > 15) + break; +} + + +int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { + int fd_out; + + switch (handle->type) { + case UV_TCP: + case UV_NAMED_PIPE: + case UV_TTY: + fd_out = uv__stream_fd((uv_stream_t*) handle); + break; + + case UV_UDP: + fd_out = ((uv_udp_t *) handle)->io_watcher.fd; + break; + + case UV_POLL: + fd_out = ((uv_poll_t *) handle)->io_watcher.fd; + break; + + default: + return -EINVAL; + } + + if (uv__is_closing(handle) || fd_out == -1) + return -EBADF; + + *fd = fd_out; + return 0; +} + + +static int uv__run_pending(uv_loop_t* loop) { + QUEUE* q; + QUEUE pq; + uv__io_t* w; + + if (QUEUE_EMPTY(&loop->pending_queue)) + return 0; + + QUEUE_MOVE(&loop->pending_queue, &pq); + + while (!QUEUE_EMPTY(&pq)) { + q = QUEUE_HEAD(&pq); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + w = QUEUE_DATA(q, uv__io_t, pending_queue); + w->cb(loop, w, POLLOUT); + } + + return 1; +} + + +static unsigned int next_power_of_two(unsigned int val) { + val -= 1; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + val += 1; + return val; +} + +static void maybe_resize(uv_loop_t* loop, unsigned int len) { + uv__io_t** watchers; + void* fake_watcher_list; + void* fake_watcher_count; + unsigned int nwatchers; + unsigned int i; + + if (len <= loop->nwatchers) + return; + + /* Preserve fake watcher list and count at the end of the watchers */ + if (loop->watchers != NULL) { + fake_watcher_list = loop->watchers[loop->nwatchers]; + fake_watcher_count = loop->watchers[loop->nwatchers + 1]; + } else { + fake_watcher_list = NULL; + fake_watcher_count = NULL; + } + + nwatchers = next_power_of_two(len + 2) - 2; + watchers = uv__realloc(loop->watchers, + (nwatchers + 2) * sizeof(loop->watchers[0])); + + if (watchers == NULL) + abort(); + for (i = loop->nwatchers; i < nwatchers; i++) + watchers[i] = NULL; + watchers[nwatchers] = fake_watcher_list; + watchers[nwatchers + 1] = fake_watcher_count; + + loop->watchers = watchers; + loop->nwatchers = nwatchers; +} + + +void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { + assert(cb != NULL); + assert(fd >= -1); + QUEUE_INIT(&w->pending_queue); + QUEUE_INIT(&w->watcher_queue); + w->cb = cb; + w->fd = fd; + w->events = 0; + w->pevents = 0; + +#if defined(UV_HAVE_KQUEUE) + w->rcount = 0; + w->wcount = 0; +#endif /* defined(UV_HAVE_KQUEUE) */ +} + + +void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + assert(w->fd >= 0); + assert(w->fd < INT_MAX); + + w->pevents |= events; + maybe_resize(loop, w->fd + 1); + +#if !defined(__sun) + /* The event ports backend needs to rearm all file descriptors on each and + * every tick of the event loop but the other backends allow us to + * short-circuit here if the event mask is unchanged. + */ + if (w->events == w->pevents) { + if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) { + QUEUE_REMOVE(&w->watcher_queue); + QUEUE_INIT(&w->watcher_queue); + } + return; + } +#endif + + if (QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); + + if (loop->watchers[w->fd] == NULL) { + loop->watchers[w->fd] = w; + loop->nfds++; + } +} + + +void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + + if (w->fd == -1) + return; + + assert(w->fd >= 0); + + /* Happens when uv__io_stop() is called on a handle that was never started. */ + if ((unsigned) w->fd >= loop->nwatchers) + return; + + w->pevents &= ~events; + + if (w->pevents == 0) { + QUEUE_REMOVE(&w->watcher_queue); + QUEUE_INIT(&w->watcher_queue); + + if (loop->watchers[w->fd] != NULL) { + assert(loop->watchers[w->fd] == w); + assert(loop->nfds > 0); + loop->watchers[w->fd] = NULL; + loop->nfds--; + w->events = 0; + } + } + else if (QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); +} + + +void uv__io_close(uv_loop_t* loop, uv__io_t* w) { + uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP); + QUEUE_REMOVE(&w->pending_queue); + + /* Remove stale events for this file descriptor */ + uv__platform_invalidate_fd(loop, w->fd); +} + + +void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { + if (QUEUE_EMPTY(&w->pending_queue)) + QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); +} + + +int uv__io_active(const uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + return 0 != (w->pevents & events); +} + + +int uv_getrusage(uv_rusage_t* rusage) { + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage)) + return -errno; + + rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec; + rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec; + + rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec; + rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec; + +#if !defined(__MVS__) + rusage->ru_maxrss = usage.ru_maxrss; + rusage->ru_ixrss = usage.ru_ixrss; + rusage->ru_idrss = usage.ru_idrss; + rusage->ru_isrss = usage.ru_isrss; + rusage->ru_minflt = usage.ru_minflt; + rusage->ru_majflt = usage.ru_majflt; + rusage->ru_nswap = usage.ru_nswap; + rusage->ru_inblock = usage.ru_inblock; + rusage->ru_oublock = usage.ru_oublock; + rusage->ru_msgsnd = usage.ru_msgsnd; + rusage->ru_msgrcv = usage.ru_msgrcv; + rusage->ru_nsignals = usage.ru_nsignals; + rusage->ru_nvcsw = usage.ru_nvcsw; + rusage->ru_nivcsw = usage.ru_nivcsw; +#endif + + return 0; +} + + +int uv__open_cloexec(const char* path, int flags) { + int err; + int fd; + +#if defined(UV__O_CLOEXEC) + static int no_cloexec; + + if (!no_cloexec) { + fd = open(path, flags | UV__O_CLOEXEC); + if (fd != -1) + return fd; + + if (errno != EINVAL) + return -errno; + + /* O_CLOEXEC not supported. */ + no_cloexec = 1; + } +#endif + + fd = open(path, flags); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +int uv__dup2_cloexec(int oldfd, int newfd) { + int r; +#if defined(__FreeBSD__) && __FreeBSD__ >= 10 + r = dup3(oldfd, newfd, O_CLOEXEC); + if (r == -1) + return -errno; + return r; +#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC) + r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd); + if (r != -1) + return r; + if (errno != EINVAL) + return -errno; + /* Fall through. */ +#elif defined(__linux__) + static int no_dup3; + if (!no_dup3) { + do + r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC); + while (r == -1 && errno == EBUSY); + if (r != -1) + return r; + if (errno != ENOSYS) + return -errno; + /* Fall through. */ + no_dup3 = 1; + } +#endif + { + int err; + do + r = dup2(oldfd, newfd); +#if defined(__linux__) + while (r == -1 && errno == EBUSY); +#else + while (0); /* Never retry. */ +#endif + + if (r == -1) + return -errno; + + err = uv__cloexec(newfd, 1); + if (err) { + uv__close(newfd); + return err; + } + + return r; + } +} + + +int uv_os_homedir(char* buffer, size_t* size) { + uv_passwd_t pwd; + char* buf; + size_t len; + int r; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + /* Check if the HOME environment variable is set first */ + buf = getenv("HOME"); + + if (buf != NULL) { + len = strlen(buf); + + if (len >= *size) { + *size = len + 1; + return -ENOBUFS; + } + + memcpy(buffer, buf, len + 1); + *size = len; + + return 0; + } + + /* HOME is not set, so call uv__getpwuid_r() */ + r = uv__getpwuid_r(&pwd); + + if (r != 0) { + return r; + } + + len = strlen(pwd.homedir); + + if (len >= *size) { + *size = len + 1; + uv_os_free_passwd(&pwd); + return -ENOBUFS; + } + + memcpy(buffer, pwd.homedir, len + 1); + *size = len; + uv_os_free_passwd(&pwd); + + return 0; +} + + +int uv_os_tmpdir(char* buffer, size_t* size) { + const char* buf; + size_t len; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + +#define CHECK_ENV_VAR(name) \ + do { \ + buf = getenv(name); \ + if (buf != NULL) \ + goto return_buffer; \ + } \ + while (0) + + /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */ + CHECK_ENV_VAR("TMPDIR"); + CHECK_ENV_VAR("TMP"); + CHECK_ENV_VAR("TEMP"); + CHECK_ENV_VAR("TEMPDIR"); + +#undef CHECK_ENV_VAR + + /* No temp environment variables defined */ + #if defined(__ANDROID__) + buf = "/data/local/tmp"; + #else + buf = "/tmp"; + #endif + +return_buffer: + len = strlen(buf); + + if (len >= *size) { + *size = len + 1; + return -ENOBUFS; + } + + /* The returned directory should not have a trailing slash. */ + if (len > 1 && buf[len - 1] == '/') { + len--; + } + + memcpy(buffer, buf, len + 1); + buffer[len] = '\0'; + *size = len; + + return 0; +} + + +int uv__getpwuid_r(uv_passwd_t* pwd) { + struct passwd pw; + struct passwd* result; + char* buf; + uid_t uid; + size_t bufsize; + size_t name_size; + size_t homedir_size; + size_t shell_size; + long initsize; + int r; +#if defined(__ANDROID_API__) && __ANDROID_API__ < 21 + int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**); + + getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r"); + if (getpwuid_r == NULL) + return -ENOSYS; +#endif + + if (pwd == NULL) + return -EINVAL; + + initsize = sysconf(_SC_GETPW_R_SIZE_MAX); + + if (initsize <= 0) + bufsize = 4096; + else + bufsize = (size_t) initsize; + + uid = geteuid(); + buf = NULL; + + for (;;) { + uv__free(buf); + buf = uv__malloc(bufsize); + + if (buf == NULL) + return -ENOMEM; + + r = getpwuid_r(uid, &pw, buf, bufsize, &result); + + if (r != ERANGE) + break; + + bufsize *= 2; + } + + if (r != 0) { + uv__free(buf); + return -r; + } + + if (result == NULL) { + uv__free(buf); + return -ENOENT; + } + + /* Allocate memory for the username, shell, and home directory */ + name_size = strlen(pw.pw_name) + 1; + homedir_size = strlen(pw.pw_dir) + 1; + shell_size = strlen(pw.pw_shell) + 1; + pwd->username = uv__malloc(name_size + homedir_size + shell_size); + + if (pwd->username == NULL) { + uv__free(buf); + return -ENOMEM; + } + + /* Copy the username */ + memcpy(pwd->username, pw.pw_name, name_size); + + /* Copy the home directory */ + pwd->homedir = pwd->username + name_size; + memcpy(pwd->homedir, pw.pw_dir, homedir_size); + + /* Copy the shell */ + pwd->shell = pwd->homedir + homedir_size; + memcpy(pwd->shell, pw.pw_shell, shell_size); + + /* Copy the uid and gid */ + pwd->uid = pw.pw_uid; + pwd->gid = pw.pw_gid; + + uv__free(buf); + + return 0; +} + + +void uv_os_free_passwd(uv_passwd_t* pwd) { + if (pwd == NULL) + return; + + /* + The memory for name, shell, and homedir are allocated in a single + uv__malloc() call. The base of the pointer is stored in pwd->username, so + that is the field that needs to be freed. + */ + uv__free(pwd->username); + pwd->username = NULL; + pwd->shell = NULL; + pwd->homedir = NULL; +} + + +int uv_os_get_passwd(uv_passwd_t* pwd) { + return uv__getpwuid_r(pwd); +} diff --git a/Utilities/cmlibuv/src/unix/darwin-proctitle.c b/Utilities/cmlibuv/src/unix/darwin-proctitle.c new file mode 100644 index 0000000..1142311 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/darwin-proctitle.c @@ -0,0 +1,206 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <dlfcn.h> +#include <errno.h> +#include <stdlib.h> +#include <string.h> + +#include <TargetConditionals.h> + +#if !TARGET_OS_IPHONE +# include <CoreFoundation/CoreFoundation.h> +# include <ApplicationServices/ApplicationServices.h> +#endif + + +static int uv__pthread_setname_np(const char* name) { + int (*dynamic_pthread_setname_np)(const char* name); + char namebuf[64]; /* MAXTHREADNAMESIZE */ + int err; + + /* pthread_setname_np() first appeared in OS X 10.6 and iOS 3.2. */ + *(void **)(&dynamic_pthread_setname_np) = + dlsym(RTLD_DEFAULT, "pthread_setname_np"); + + if (dynamic_pthread_setname_np == NULL) + return -ENOSYS; + + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + + err = dynamic_pthread_setname_np(namebuf); + if (err) + return -err; + + return 0; +} + + +int uv__set_process_title(const char* title) { +#if TARGET_OS_IPHONE + return uv__pthread_setname_np(title); +#else + CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef, + const char*, + CFStringEncoding); + CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef); + void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef); + void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef); + CFTypeRef (*pLSGetCurrentApplicationASN)(void); + OSStatus (*pLSSetApplicationInformationItem)(int, + CFTypeRef, + CFStringRef, + CFStringRef, + CFDictionaryRef*); + void* application_services_handle; + void* core_foundation_handle; + CFBundleRef launch_services_bundle; + CFStringRef* display_name_key; + CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef); + CFBundleRef (*pCFBundleGetMainBundle)(void); + CFBundleRef hi_services_bundle; + OSStatus (*pSetApplicationIsDaemon)(int); + CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef); + void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t, + void*); + CFTypeRef asn; + int err; + + err = -ENOENT; + application_services_handle = dlopen("/System/Library/Frameworks/" + "ApplicationServices.framework/" + "Versions/A/ApplicationServices", + RTLD_LAZY | RTLD_LOCAL); + core_foundation_handle = dlopen("/System/Library/Frameworks/" + "CoreFoundation.framework/" + "Versions/A/CoreFoundation", + RTLD_LAZY | RTLD_LOCAL); + + if (application_services_handle == NULL || core_foundation_handle == NULL) + goto out; + + *(void **)(&pCFStringCreateWithCString) = + dlsym(core_foundation_handle, "CFStringCreateWithCString"); + *(void **)(&pCFBundleGetBundleWithIdentifier) = + dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier"); + *(void **)(&pCFBundleGetDataPointerForName) = + dlsym(core_foundation_handle, "CFBundleGetDataPointerForName"); + *(void **)(&pCFBundleGetFunctionPointerForName) = + dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName"); + + if (pCFStringCreateWithCString == NULL || + pCFBundleGetBundleWithIdentifier == NULL || + pCFBundleGetDataPointerForName == NULL || + pCFBundleGetFunctionPointerForName == NULL) { + goto out; + } + +#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8) + + launch_services_bundle = + pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices")); + + if (launch_services_bundle == NULL) + goto out; + + *(void **)(&pLSGetCurrentApplicationASN) = + pCFBundleGetFunctionPointerForName(launch_services_bundle, + S("_LSGetCurrentApplicationASN")); + + if (pLSGetCurrentApplicationASN == NULL) + goto out; + + *(void **)(&pLSSetApplicationInformationItem) = + pCFBundleGetFunctionPointerForName(launch_services_bundle, + S("_LSSetApplicationInformationItem")); + + if (pLSSetApplicationInformationItem == NULL) + goto out; + + display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle, + S("_kLSDisplayNameKey")); + + if (display_name_key == NULL || *display_name_key == NULL) + goto out; + + *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle, + "CFBundleGetInfoDictionary"); + *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle, + "CFBundleGetMainBundle"); + if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL) + goto out; + + /* Black 10.9 magic, to remove (Not responding) mark in Activity Monitor */ + hi_services_bundle = + pCFBundleGetBundleWithIdentifier(S("com.apple.HIServices")); + err = -ENOENT; + if (hi_services_bundle == NULL) + goto out; + + *(void **)(&pSetApplicationIsDaemon) = pCFBundleGetFunctionPointerForName( + hi_services_bundle, + S("SetApplicationIsDaemon")); + *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName( + launch_services_bundle, + S("_LSApplicationCheckIn")); + *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) = + pCFBundleGetFunctionPointerForName( + launch_services_bundle, + S("_LSSetApplicationLaunchServicesServerConnectionStatus")); + if (pSetApplicationIsDaemon == NULL || + pLSApplicationCheckIn == NULL || + pLSSetApplicationLaunchServicesServerConnectionStatus == NULL) { + goto out; + } + + if (pSetApplicationIsDaemon(1) != noErr) + goto out; + + pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL); + + /* Check into process manager?! */ + pLSApplicationCheckIn(-2, + pCFBundleGetInfoDictionary(pCFBundleGetMainBundle())); + + asn = pLSGetCurrentApplicationASN(); + + err = -EINVAL; + if (pLSSetApplicationInformationItem(-2, /* Magic value. */ + asn, + *display_name_key, + S(title), + NULL) != noErr) { + goto out; + } + + uv__pthread_setname_np(title); /* Don't care if it fails. */ + err = 0; + +out: + if (core_foundation_handle != NULL) + dlclose(core_foundation_handle); + + if (application_services_handle != NULL) + dlclose(application_services_handle); + + return err; +#endif /* !TARGET_OS_IPHONE */ +} diff --git a/Utilities/cmlibuv/src/unix/darwin.c b/Utilities/cmlibuv/src/unix/darwin.c new file mode 100644 index 0000000..cf95da2 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/darwin.c @@ -0,0 +1,335 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <stdint.h> +#include <errno.h> + +#include <ifaddrs.h> +#include <net/if.h> +#include <net/if_dl.h> + +#include <mach/mach.h> +#include <mach/mach_time.h> +#include <mach-o/dyld.h> /* _NSGetExecutablePath */ +#include <sys/resource.h> +#include <sys/sysctl.h> +#include <unistd.h> /* sysconf */ + + +int uv__platform_loop_init(uv_loop_t* loop) { + loop->cf_state = NULL; + + if (uv__kqueue_init(loop)) + return -errno; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + uv__fsevents_loop_delete(loop); +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + static mach_timebase_info_data_t info; + + if ((ACCESS_ONCE(uint32_t, info.numer) == 0 || + ACCESS_ONCE(uint32_t, info.denom) == 0) && + mach_timebase_info(&info) != KERN_SUCCESS) + abort(); + + return mach_absolute_time() * info.numer / info.denom; +} + + +int uv_exepath(char* buffer, size_t* size) { + /* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */ + char abspath[PATH_MAX * 2 + 1]; + char exepath[PATH_MAX + 1]; + uint32_t exepath_size; + size_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + exepath_size = sizeof(exepath); + if (_NSGetExecutablePath(exepath, &exepath_size)) + return -EIO; + + if (realpath(exepath, abspath) != abspath) + return -errno; + + abspath_size = strlen(abspath); + if (abspath_size == 0) + return -EIO; + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + vm_statistics_data_t info; + mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t); + + if (host_statistics(mach_host_self(), HOST_VM_INFO, + (host_info_t)&info, &count) != KERN_SUCCESS) { + return -EINVAL; /* FIXME(bnoordhuis) Translate error. */ + } + + return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { + uint64_t info; + int which[] = {CTL_HW, HW_MEMSIZE}; + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_resident_set_memory(size_t* rss) { + mach_msg_type_number_t count; + task_basic_info_data_t info; + kern_return_t err; + + count = TASK_BASIC_INFO_COUNT; + err = task_info(mach_task_self(), + TASK_BASIC_INFO, + (task_info_t) &info, + &count); + (void) &err; + /* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than + * KERN_SUCCESS implies a libuv bug. + */ + assert(err == KERN_SUCCESS); + *rss = info.resident_size; + + return 0; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + *uptime = now - info.tv_sec; + + return 0; +} + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks); + char model[512]; + uint64_t cpuspeed; + size_t size; + unsigned int i; + natural_t numcpus; + mach_msg_type_number_t msg_type; + processor_cpu_load_info_data_t *info; + uv_cpu_info_t* cpu_info; + + size = sizeof(model); + if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) && + sysctlbyname("hw.model", &model, &size, NULL, 0)) { + return -errno; + } + + size = sizeof(cpuspeed); + if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0)) + return -errno; + + if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus, + (processor_info_array_t*)&info, + &msg_type) != KERN_SUCCESS) { + return -EINVAL; /* FIXME(bnoordhuis) Translate error. */ + } + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type); + return -ENOMEM; + } + + *count = numcpus; + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier; + cpu_info->cpu_times.irq = 0; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed/1000000; + } + vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type); + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == AF_LINK)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On Mac OS X getifaddrs returns information related to Mac Addresses for + * various devices, such as firewire, etc. These are not relevant here. + */ + if (ent->ifa_addr->sa_family == AF_LINK) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/Utilities/cmlibuv/src/unix/dl.c b/Utilities/cmlibuv/src/unix/dl.c new file mode 100644 index 0000000..fc1c052 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/dl.c @@ -0,0 +1,80 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <dlfcn.h> +#include <errno.h> +#include <string.h> +#include <locale.h> + +static int uv__dlerror(uv_lib_t* lib); + + +int uv_dlopen(const char* filename, uv_lib_t* lib) { + dlerror(); /* Reset error status. */ + lib->errmsg = NULL; + lib->handle = dlopen(filename, RTLD_LAZY); + return lib->handle ? 0 : uv__dlerror(lib); +} + + +void uv_dlclose(uv_lib_t* lib) { + uv__free(lib->errmsg); + lib->errmsg = NULL; + + if (lib->handle) { + /* Ignore errors. No good way to signal them without leaking memory. */ + dlclose(lib->handle); + lib->handle = NULL; + } +} + + +int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) { + dlerror(); /* Reset error status. */ + *ptr = dlsym(lib->handle, name); + return uv__dlerror(lib); +} + + +const char* uv_dlerror(const uv_lib_t* lib) { + return lib->errmsg ? lib->errmsg : "no error"; +} + + +static int uv__dlerror(uv_lib_t* lib) { + const char* errmsg; + + uv__free(lib->errmsg); + + errmsg = dlerror(); + + if (errmsg) { + lib->errmsg = uv__strdup(errmsg); + return -1; + } + else { + lib->errmsg = NULL; + return 0; + } +} diff --git a/Utilities/cmlibuv/src/unix/freebsd.c b/Utilities/cmlibuv/src/unix/freebsd.c new file mode 100644 index 0000000..cba44a3 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/freebsd.c @@ -0,0 +1,460 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <string.h> +#include <errno.h> + +#include <ifaddrs.h> +#include <net/if.h> +#include <net/if_dl.h> + +#include <kvm.h> +#include <paths.h> +#include <sys/user.h> +#include <sys/types.h> +#include <sys/resource.h> +#include <sys/sysctl.h> +#include <vm/vm_param.h> /* VM_LOADAVG */ +#include <time.h> +#include <stdlib.h> +#include <unistd.h> /* sysconf */ +#include <fcntl.h> + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + +#ifndef CPUSTATES +# define CPUSTATES 5U +#endif +#ifndef CP_USER +# define CP_USER 0 +# define CP_NICE 1 +# define CP_SYS 2 +# define CP_IDLE 3 +# define CP_INTR 4 +#endif + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +#ifdef __DragonFly__ +int uv_exepath(char* buffer, size_t* size) { + char abspath[PATH_MAX * 2 + 1]; + ssize_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + abspath_size = readlink("/proc/curproc/file", abspath, sizeof(abspath)); + if (abspath_size < 0) + return -errno; + + assert(abspath_size > 0); + *size -= 1; + + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} +#else +int uv_exepath(char* buffer, size_t* size) { + char abspath[PATH_MAX * 2 + 1]; + int mib[4]; + size_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PATHNAME; + mib[3] = -1; + + abspath_size = sizeof abspath; + if (sysctl(mib, 4, abspath, &abspath_size, NULL, 0)) + return -errno; + + assert(abspath_size > 0); + abspath_size -= 1; + *size -= 1; + + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} +#endif + +uint64_t uv_get_free_memory(void) { + int freecount; + size_t size = sizeof(freecount); + + if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0)) + return -errno; + + return (uint64_t) freecount * sysconf(_SC_PAGESIZE); + +} + + +uint64_t uv_get_total_memory(void) { + unsigned long info; + int which[] = {CTL_HW, HW_PHYSMEM}; + + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + int oid[4]; + + uv__free(process_title); + process_title = uv__strdup(title); + + oid[0] = CTL_KERN; + oid[1] = KERN_PROC; + oid[2] = KERN_PROC_ARGS; + oid[3] = getpid(); + + sysctl(oid, + ARRAY_SIZE(oid), + NULL, + NULL, + process_title, + strlen(process_title) + 1); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + kvm_t *kd = NULL; + struct kinfo_proc *kinfo = NULL; + pid_t pid; + int nprocs; + size_t page_size = getpagesize(); + + pid = getpid(); + + kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); + if (kd == NULL) goto error; + + kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, &nprocs); + if (kinfo == NULL) goto error; + +#ifdef __DragonFly__ + *rss = kinfo->kp_vm_rssize * page_size; +#else + *rss = kinfo->ki_rssize * page_size; +#endif + + kvm_close(kd); + + return 0; + +error: + if (kd) kvm_close(kd); + return -EPERM; +} + + +int uv_uptime(double* uptime) { + int r; + struct timespec sp; + r = clock_gettime(CLOCK_MONOTONIC, &sp); + if (r) + return -errno; + + *uptime = sp.tv_sec; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus, + cur = 0; + uv_cpu_info_t* cpu_info; + const char* maxcpus_key; + const char* cptimes_key; + char model[512]; + long* cp_times; + int numcpus; + size_t size; + int i; + +#if defined(__DragonFly__) + /* This is not quite correct but DragonFlyBSD doesn't seem to have anything + * comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total, + * not per CPU). At least this stops uv_cpu_info() from failing completely. + */ + maxcpus_key = "hw.ncpu"; + cptimes_key = "kern.cp_time"; +#else + maxcpus_key = "kern.smp.maxcpus"; + cptimes_key = "kern.cp_times"; +#endif + + size = sizeof(model); + if (sysctlbyname("hw.model", &model, &size, NULL, 0)) + return -errno; + + size = sizeof(numcpus); + if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) + return -ENOMEM; + + *count = numcpus; + + size = sizeof(cpuspeed); + if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + /* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of + * ncpu. + */ + size = sizeof(maxcpus); + if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + size = maxcpus * CPUSTATES * sizeof(long); + + cp_times = uv__malloc(size); + if (cp_times == NULL) { + uv__free(*cpu_infos); + return -ENOMEM; + } + + if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) { + uv__free(cp_times); + uv__free(*cpu_infos); + return -errno; + } + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed; + + cur+=CPUSTATES; + } + + uv__free(cp_times); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == AF_LINK)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On FreeBSD getifaddrs returns information related to the raw underlying + * devices. We're not interested in this information yet. + */ + if (ent->ifa_addr->sa_family == AF_LINK) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/Utilities/cmlibuv/src/unix/fs.c b/Utilities/cmlibuv/src/unix/fs.c new file mode 100644 index 0000000..216ef97 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/fs.c @@ -0,0 +1,1355 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* Caveat emptor: this file deviates from the libuv convention of returning + * negated errno codes. Most uv_fs_*() functions map directly to the system + * call of the same name. For more complex wrappers, it's easier to just + * return -1 with errno set. The dispatcher in uv__fs_work() takes care of + * getting the errno to the right place (req->result or as the return value.) + */ + +#include "uv.h" +#include "internal.h" + +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <limits.h> /* PATH_MAX */ + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <sys/uio.h> +#include <pthread.h> +#include <unistd.h> +#include <fcntl.h> +#include <utime.h> +#include <poll.h> + +#if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel_) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) +# define HAVE_PREADV 1 +#else +# define HAVE_PREADV 0 +#endif + +#if defined(__linux__) || defined(__sun) +# include <sys/sendfile.h> +#endif + +#define INIT(subtype) \ + do { \ + req->type = UV_FS; \ + if (cb != NULL) \ + uv__req_init(loop, req, UV_FS); \ + req->fs_type = UV_FS_ ## subtype; \ + req->result = 0; \ + req->ptr = NULL; \ + req->loop = loop; \ + req->path = NULL; \ + req->new_path = NULL; \ + req->cb = cb; \ + } \ + while (0) + +#define PATH \ + do { \ + assert(path != NULL); \ + if (cb == NULL) { \ + req->path = path; \ + } else { \ + req->path = uv__strdup(path); \ + if (req->path == NULL) { \ + uv__req_unregister(loop, req); \ + return -ENOMEM; \ + } \ + } \ + } \ + while (0) + +#define PATH2 \ + do { \ + if (cb == NULL) { \ + req->path = path; \ + req->new_path = new_path; \ + } else { \ + size_t path_len; \ + size_t new_path_len; \ + path_len = strlen(path) + 1; \ + new_path_len = strlen(new_path) + 1; \ + req->path = uv__malloc(path_len + new_path_len); \ + if (req->path == NULL) { \ + uv__req_unregister(loop, req); \ + return -ENOMEM; \ + } \ + req->new_path = req->path + path_len; \ + memcpy((void*) req->path, path, path_len); \ + memcpy((void*) req->new_path, new_path, new_path_len); \ + } \ + } \ + while (0) + +#define POST \ + do { \ + if (cb != NULL) { \ + uv__work_submit(loop, &req->work_req, uv__fs_work, uv__fs_done); \ + return 0; \ + } \ + else { \ + uv__fs_work(&req->work_req); \ + return req->result; \ + } \ + } \ + while (0) + + +static ssize_t uv__fs_fdatasync(uv_fs_t* req) { +#if defined(__linux__) || defined(__sun) || defined(__NetBSD__) + return fdatasync(req->file); +#elif defined(__APPLE__) && defined(SYS_fdatasync) + return syscall(SYS_fdatasync, req->file); +#else + return fsync(req->file); +#endif +} + + +static ssize_t uv__fs_futime(uv_fs_t* req) { +#if defined(__linux__) + /* utimesat() has nanosecond resolution but we stick to microseconds + * for the sake of consistency with other platforms. + */ + static int no_utimesat; + struct timespec ts[2]; + struct timeval tv[2]; + char path[sizeof("/proc/self/fd/") + 3 * sizeof(int)]; + int r; + + if (no_utimesat) + goto skip; + + ts[0].tv_sec = req->atime; + ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000; + ts[1].tv_sec = req->mtime; + ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000; + + r = uv__utimesat(req->file, NULL, ts, 0); + if (r == 0) + return r; + + if (errno != ENOSYS) + return r; + + no_utimesat = 1; + +skip: + + tv[0].tv_sec = req->atime; + tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000; + tv[1].tv_sec = req->mtime; + tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000; + snprintf(path, sizeof(path), "/proc/self/fd/%d", (int) req->file); + + r = utimes(path, tv); + if (r == 0) + return r; + + switch (errno) { + case ENOENT: + if (fcntl(req->file, F_GETFL) == -1 && errno == EBADF) + break; + /* Fall through. */ + + case EACCES: + case ENOTDIR: + errno = ENOSYS; + break; + } + + return r; + +#elif defined(__APPLE__) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD__) \ + || defined(__FreeBSD_kernel__) \ + || defined(__NetBSD__) \ + || defined(__OpenBSD__) \ + || defined(__sun) + struct timeval tv[2]; + tv[0].tv_sec = req->atime; + tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000; + tv[1].tv_sec = req->mtime; + tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000; +# if defined(__sun) + return futimesat(req->file, NULL, tv); +# else + return futimes(req->file, tv); +# endif +#elif defined(_AIX71) + struct timespec ts[2]; + ts[0].tv_sec = req->atime; + ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000; + ts[1].tv_sec = req->mtime; + ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000; + return futimens(req->file, ts); +#elif defined(__MVS__) + attrib_t atr; + memset(&atr, 0, sizeof(atr)); + atr.att_mtimechg = 1; + atr.att_atimechg = 1; + atr.att_mtime = req->mtime; + atr.att_atime = req->atime; + return __fchattr(req->file, &atr, sizeof(atr)); +#else + errno = ENOSYS; + return -1; +#endif +} + + +static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { + return mkdtemp((char*) req->path) ? 0 : -1; +} + + +static ssize_t uv__fs_open(uv_fs_t* req) { + static int no_cloexec_support; + int r; + + /* Try O_CLOEXEC before entering locks */ + if (no_cloexec_support == 0) { +#ifdef O_CLOEXEC + r = open(req->path, req->flags | O_CLOEXEC, req->mode); + if (r >= 0) + return r; + if (errno != EINVAL) + return r; + no_cloexec_support = 1; +#endif /* O_CLOEXEC */ + } + + if (req->cb != NULL) + uv_rwlock_rdlock(&req->loop->cloexec_lock); + + r = open(req->path, req->flags, req->mode); + + /* In case of failure `uv__cloexec` will leave error in `errno`, + * so it is enough to just set `r` to `-1`. + */ + if (r >= 0 && uv__cloexec(r, 1) != 0) { + r = uv__close(r); + if (r != 0) + abort(); + r = -1; + } + + if (req->cb != NULL) + uv_rwlock_rdunlock(&req->loop->cloexec_lock); + + return r; +} + + +static ssize_t uv__fs_read(uv_fs_t* req) { +#if defined(__linux__) + static int no_preadv; +#endif + ssize_t result; + +#if defined(_AIX) + struct stat buf; + if(fstat(req->file, &buf)) + return -1; + if(S_ISDIR(buf.st_mode)) { + errno = EISDIR; + return -1; + } +#endif /* defined(_AIX) */ + if (req->off < 0) { + if (req->nbufs == 1) + result = read(req->file, req->bufs[0].base, req->bufs[0].len); + else + result = readv(req->file, (struct iovec*) req->bufs, req->nbufs); + } else { + if (req->nbufs == 1) { + result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off); + goto done; + } + +#if HAVE_PREADV + result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); +#else +# if defined(__linux__) + if (no_preadv) retry: +# endif + { + off_t nread; + size_t index; + + nread = 0; + index = 0; + result = 1; + do { + if (req->bufs[index].len > 0) { + result = pread(req->file, + req->bufs[index].base, + req->bufs[index].len, + req->off + nread); + if (result > 0) + nread += result; + } + index++; + } while (index < req->nbufs && result > 0); + if (nread > 0) + result = nread; + } +# if defined(__linux__) + else { + result = uv__preadv(req->file, + (struct iovec*)req->bufs, + req->nbufs, + req->off); + if (result == -1 && errno == ENOSYS) { + no_preadv = 1; + goto retry; + } + } +# endif +#endif + } + +done: + return result; +} + + +#if defined(__OpenBSD__) || (defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)) +static int uv__fs_scandir_filter(uv__dirent_t* dent) { +#else +static int uv__fs_scandir_filter(const uv__dirent_t* dent) { +#endif + return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; +} + + +static ssize_t uv__fs_scandir(uv_fs_t* req) { + uv__dirent_t **dents; + int saved_errno; + int n; + + dents = NULL; + n = scandir(req->path, &dents, uv__fs_scandir_filter, alphasort); + + /* NOTE: We will use nbufs as an index field */ + req->nbufs = 0; + + if (n == 0) + goto out; /* osx still needs to deallocate some memory */ + else if (n == -1) + return n; + + req->ptr = dents; + + return n; + +out: + saved_errno = errno; + if (dents != NULL) { + int i; + + /* Memory was allocated using the system allocator, so use free() here. */ + for (i = 0; i < n; i++) + free(dents[i]); + free(dents); + } + errno = saved_errno; + + req->ptr = NULL; + + return n; +} + + +static ssize_t uv__fs_pathmax_size(const char* path) { + ssize_t pathmax; + + pathmax = pathconf(path, _PC_PATH_MAX); + + if (pathmax == -1) { +#if defined(PATH_MAX) + return PATH_MAX; +#else +#error "PATH_MAX undefined in the current platform" +#endif + } + + return pathmax; +} + +static ssize_t uv__fs_readlink(uv_fs_t* req) { + ssize_t len; + char* buf; + + len = uv__fs_pathmax_size(req->path); + buf = uv__malloc(len + 1); + + if (buf == NULL) { + errno = ENOMEM; + return -1; + } + + len = readlink(req->path, buf, len); + + if (len == -1) { + uv__free(buf); + return -1; + } + + buf[len] = '\0'; + req->ptr = buf; + + return 0; +} + +static ssize_t uv__fs_realpath(uv_fs_t* req) { + ssize_t len; + char* buf; + + len = uv__fs_pathmax_size(req->path); + buf = uv__malloc(len + 1); + + if (buf == NULL) { + errno = ENOMEM; + return -1; + } + + if (realpath(req->path, buf) == NULL) { + uv__free(buf); + return -1; + } + + req->ptr = buf; + + return 0; +} + +static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { + struct pollfd pfd; + int use_pread; + off_t offset; + ssize_t nsent; + ssize_t nread; + ssize_t nwritten; + size_t buflen; + size_t len; + ssize_t n; + int in_fd; + int out_fd; + char buf[8192]; + + len = req->bufsml[0].len; + in_fd = req->flags; + out_fd = req->file; + offset = req->off; + use_pread = 1; + + /* Here are the rules regarding errors: + * + * 1. Read errors are reported only if nsent==0, otherwise we return nsent. + * The user needs to know that some data has already been sent, to stop + * them from sending it twice. + * + * 2. Write errors are always reported. Write errors are bad because they + * mean data loss: we've read data but now we can't write it out. + * + * We try to use pread() and fall back to regular read() if the source fd + * doesn't support positional reads, for example when it's a pipe fd. + * + * If we get EAGAIN when writing to the target fd, we poll() on it until + * it becomes writable again. + * + * FIXME: If we get a write error when use_pread==1, it should be safe to + * return the number of sent bytes instead of an error because pread() + * is, in theory, idempotent. However, special files in /dev or /proc + * may support pread() but not necessarily return the same data on + * successive reads. + * + * FIXME: There is no way now to signal that we managed to send *some* data + * before a write error. + */ + for (nsent = 0; (size_t) nsent < len; ) { + buflen = len - nsent; + + if (buflen > sizeof(buf)) + buflen = sizeof(buf); + + do + if (use_pread) + nread = pread(in_fd, buf, buflen, offset); + else + nread = read(in_fd, buf, buflen); + while (nread == -1 && errno == EINTR); + + if (nread == 0) + goto out; + + if (nread == -1) { + if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) { + use_pread = 0; + continue; + } + + if (nsent == 0) + nsent = -1; + + goto out; + } + + for (nwritten = 0; nwritten < nread; ) { + do + n = write(out_fd, buf + nwritten, nread - nwritten); + while (n == -1 && errno == EINTR); + + if (n != -1) { + nwritten += n; + continue; + } + + if (errno != EAGAIN && errno != EWOULDBLOCK) { + nsent = -1; + goto out; + } + + pfd.fd = out_fd; + pfd.events = POLLOUT; + pfd.revents = 0; + + do + n = poll(&pfd, 1, -1); + while (n == -1 && errno == EINTR); + + if (n == -1 || (pfd.revents & ~POLLOUT) != 0) { + errno = EIO; + nsent = -1; + goto out; + } + } + + offset += nread; + nsent += nread; + } + +out: + if (nsent != -1) + req->off = offset; + + return nsent; +} + + +static ssize_t uv__fs_sendfile(uv_fs_t* req) { + int in_fd; + int out_fd; + + in_fd = req->flags; + out_fd = req->file; + +#if defined(__linux__) || defined(__sun) + { + off_t off; + ssize_t r; + + off = req->off; + r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len); + + /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but + * it still writes out data. Fortunately, we can detect it by checking if + * the offset has been updated. + */ + if (r != -1 || off > req->off) { + r = off - req->off; + req->off = off; + return r; + } + + if (errno == EINVAL || + errno == EIO || + errno == ENOTSOCK || + errno == EXDEV) { + errno = 0; + return uv__fs_sendfile_emul(req); + } + + return -1; + } +#elif defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) + { + off_t len; + ssize_t r; + + /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in + * non-blocking mode and not all data could be written. If a non-zero + * number of bytes have been sent, we don't consider it an error. + */ + +#if defined(__FreeBSD__) || defined(__DragonFly__) + len = 0; + r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); +#elif defined(__FreeBSD_kernel__) + len = 0; + r = bsd_sendfile(in_fd, + out_fd, + req->off, + req->bufsml[0].len, + NULL, + &len, + 0); +#else + /* The darwin sendfile takes len as an input for the length to send, + * so make sure to initialize it with the caller's value. */ + len = req->bufsml[0].len; + r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0); +#endif + + /* + * The man page for sendfile(2) on DragonFly states that `len` contains + * a meaningful value ONLY in case of EAGAIN and EINTR. + * Nothing is said about it's value in case of other errors, so better + * not depend on the potential wrong assumption that is was not modified + * by the syscall. + */ + if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) { + req->off += len; + return (ssize_t) len; + } + + if (errno == EINVAL || + errno == EIO || + errno == ENOTSOCK || + errno == EXDEV) { + errno = 0; + return uv__fs_sendfile_emul(req); + } + + return -1; + } +#else + /* Squelch compiler warnings. */ + (void) &in_fd; + (void) &out_fd; + + return uv__fs_sendfile_emul(req); +#endif +} + + +static ssize_t uv__fs_utime(uv_fs_t* req) { + struct utimbuf buf; + buf.actime = req->atime; + buf.modtime = req->mtime; + return utime(req->path, &buf); /* TODO use utimes() where available */ +} + + +static ssize_t uv__fs_write(uv_fs_t* req) { +#if defined(__linux__) + static int no_pwritev; +#endif + ssize_t r; + + /* Serialize writes on OS X, concurrent write() and pwrite() calls result in + * data loss. We can't use a per-file descriptor lock, the descriptor may be + * a dup(). + */ +#if defined(__APPLE__) + static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; + + if (pthread_mutex_lock(&lock)) + abort(); +#endif + + if (req->off < 0) { + if (req->nbufs == 1) + r = write(req->file, req->bufs[0].base, req->bufs[0].len); + else + r = writev(req->file, (struct iovec*) req->bufs, req->nbufs); + } else { + if (req->nbufs == 1) { + r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); + goto done; + } +#if HAVE_PREADV + r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); +#else +# if defined(__linux__) + if (no_pwritev) retry: +# endif + { + off_t written; + size_t index; + + written = 0; + index = 0; + r = 0; + do { + if (req->bufs[index].len > 0) { + r = pwrite(req->file, + req->bufs[index].base, + req->bufs[index].len, + req->off + written); + if (r > 0) + written += r; + } + index++; + } while (index < req->nbufs && r >= 0); + if (written > 0) + r = written; + } +# if defined(__linux__) + else { + r = uv__pwritev(req->file, + (struct iovec*) req->bufs, + req->nbufs, + req->off); + if (r == -1 && errno == ENOSYS) { + no_pwritev = 1; + goto retry; + } + } +# endif +#endif + } + +done: +#if defined(__APPLE__) + if (pthread_mutex_unlock(&lock)) + abort(); +#endif + + return r; +} + +static void uv__to_stat(struct stat* src, uv_stat_t* dst) { + dst->st_dev = src->st_dev; + dst->st_mode = src->st_mode; + dst->st_nlink = src->st_nlink; + dst->st_uid = src->st_uid; + dst->st_gid = src->st_gid; + dst->st_rdev = src->st_rdev; + dst->st_ino = src->st_ino; + dst->st_size = src->st_size; + dst->st_blksize = src->st_blksize; + dst->st_blocks = src->st_blocks; + +#if defined(__APPLE__) + dst->st_atim.tv_sec = src->st_atimespec.tv_sec; + dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec; + dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec; + dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec; + dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec; + dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec; + dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec; + dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec; + dst->st_flags = src->st_flags; + dst->st_gen = src->st_gen; +#elif defined(__ANDROID__) + dst->st_atim.tv_sec = src->st_atime; + dst->st_atim.tv_nsec = src->st_atimensec; + dst->st_mtim.tv_sec = src->st_mtime; + dst->st_mtim.tv_nsec = src->st_mtimensec; + dst->st_ctim.tv_sec = src->st_ctime; + dst->st_ctim.tv_nsec = src->st_ctimensec; + dst->st_birthtim.tv_sec = src->st_ctime; + dst->st_birthtim.tv_nsec = src->st_ctimensec; + dst->st_flags = 0; + dst->st_gen = 0; +#elif !defined(_AIX) && ( \ + defined(_BSD_SOURCE) || \ + defined(_SVID_SOURCE) || \ + defined(_XOPEN_SOURCE) || \ + defined(_DEFAULT_SOURCE)) + dst->st_atim.tv_sec = src->st_atim.tv_sec; + dst->st_atim.tv_nsec = src->st_atim.tv_nsec; + dst->st_mtim.tv_sec = src->st_mtim.tv_sec; + dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec; + dst->st_ctim.tv_sec = src->st_ctim.tv_sec; + dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec; +# if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) + dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec; + dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec; + dst->st_flags = src->st_flags; + dst->st_gen = src->st_gen; +# else + dst->st_birthtim.tv_sec = src->st_ctim.tv_sec; + dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec; + dst->st_flags = 0; + dst->st_gen = 0; +# endif +#else + dst->st_atim.tv_sec = src->st_atime; + dst->st_atim.tv_nsec = 0; + dst->st_mtim.tv_sec = src->st_mtime; + dst->st_mtim.tv_nsec = 0; + dst->st_ctim.tv_sec = src->st_ctime; + dst->st_ctim.tv_nsec = 0; + dst->st_birthtim.tv_sec = src->st_ctime; + dst->st_birthtim.tv_nsec = 0; + dst->st_flags = 0; + dst->st_gen = 0; +#endif +} + + +static int uv__fs_stat(const char *path, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = stat(path, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +static int uv__fs_lstat(const char *path, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = lstat(path, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +static int uv__fs_fstat(int fd, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = fstat(fd, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +typedef ssize_t (*uv__fs_buf_iter_processor)(uv_fs_t* req); +static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process) { + unsigned int iovmax; + unsigned int nbufs; + uv_buf_t* bufs; + ssize_t total; + ssize_t result; + + iovmax = uv__getiovmax(); + nbufs = req->nbufs; + bufs = req->bufs; + total = 0; + + while (nbufs > 0) { + req->nbufs = nbufs; + if (req->nbufs > iovmax) + req->nbufs = iovmax; + + result = process(req); + if (result <= 0) { + if (total == 0) + total = result; + break; + } + + if (req->off >= 0) + req->off += result; + + req->bufs += req->nbufs; + nbufs -= req->nbufs; + total += result; + } + + if (errno == EINTR && total == -1) + return total; + + if (bufs != req->bufsml) + uv__free(bufs); + + req->bufs = NULL; + req->nbufs = 0; + + return total; +} + + +static void uv__fs_work(struct uv__work* w) { + int retry_on_eintr; + uv_fs_t* req; + ssize_t r; + + req = container_of(w, uv_fs_t, work_req); + retry_on_eintr = !(req->fs_type == UV_FS_CLOSE); + + do { + errno = 0; + +#define X(type, action) \ + case UV_FS_ ## type: \ + r = action; \ + break; + + switch (req->fs_type) { + X(ACCESS, access(req->path, req->flags)); + X(CHMOD, chmod(req->path, req->mode)); + X(CHOWN, chown(req->path, req->uid, req->gid)); + X(CLOSE, close(req->file)); + X(FCHMOD, fchmod(req->file, req->mode)); + X(FCHOWN, fchown(req->file, req->uid, req->gid)); + X(FDATASYNC, uv__fs_fdatasync(req)); + X(FSTAT, uv__fs_fstat(req->file, &req->statbuf)); + X(FSYNC, fsync(req->file)); + X(FTRUNCATE, ftruncate(req->file, req->off)); + X(FUTIME, uv__fs_futime(req)); + X(LSTAT, uv__fs_lstat(req->path, &req->statbuf)); + X(LINK, link(req->path, req->new_path)); + X(MKDIR, mkdir(req->path, req->mode)); + X(MKDTEMP, uv__fs_mkdtemp(req)); + X(OPEN, uv__fs_open(req)); + X(READ, uv__fs_buf_iter(req, uv__fs_read)); + X(SCANDIR, uv__fs_scandir(req)); + X(READLINK, uv__fs_readlink(req)); + X(REALPATH, uv__fs_realpath(req)); + X(RENAME, rename(req->path, req->new_path)); + X(RMDIR, rmdir(req->path)); + X(SENDFILE, uv__fs_sendfile(req)); + X(STAT, uv__fs_stat(req->path, &req->statbuf)); + X(SYMLINK, symlink(req->path, req->new_path)); + X(UNLINK, unlink(req->path)); + X(UTIME, uv__fs_utime(req)); + X(WRITE, uv__fs_buf_iter(req, uv__fs_write)); + default: abort(); + } +#undef X + } while (r == -1 && errno == EINTR && retry_on_eintr); + + if (r == -1) + req->result = -errno; + else + req->result = r; + + if (r == 0 && (req->fs_type == UV_FS_STAT || + req->fs_type == UV_FS_FSTAT || + req->fs_type == UV_FS_LSTAT)) { + req->ptr = &req->statbuf; + } +} + + +static void uv__fs_done(struct uv__work* w, int status) { + uv_fs_t* req; + + req = container_of(w, uv_fs_t, work_req); + uv__req_unregister(req->loop, req); + + if (status == -ECANCELED) { + assert(req->result == 0); + req->result = -ECANCELED; + } + + req->cb(req); +} + + +int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + INIT(ACCESS); + PATH; + req->flags = flags; + POST; +} + + +int uv_fs_chmod(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb) { + INIT(CHMOD); + PATH; + req->mode = mode; + POST; +} + + +int uv_fs_chown(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb) { + INIT(CHOWN); + PATH; + req->uid = uid; + req->gid = gid; + POST; +} + + +int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(CLOSE); + req->file = file; + POST; +} + + +int uv_fs_fchmod(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int mode, + uv_fs_cb cb) { + INIT(FCHMOD); + req->file = file; + req->mode = mode; + POST; +} + + +int uv_fs_fchown(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb) { + INIT(FCHOWN); + req->file = file; + req->uid = uid; + req->gid = gid; + POST; +} + + +int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FDATASYNC); + req->file = file; + POST; +} + + +int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FSTAT); + req->file = file; + POST; +} + + +int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FSYNC); + req->file = file; + POST; +} + + +int uv_fs_ftruncate(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int64_t off, + uv_fs_cb cb) { + INIT(FTRUNCATE); + req->file = file; + req->off = off; + POST; +} + + +int uv_fs_futime(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + double atime, + double mtime, + uv_fs_cb cb) { + INIT(FUTIME); + req->file = file; + req->atime = atime; + req->mtime = mtime; + POST; +} + + +int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(LSTAT); + PATH; + POST; +} + + +int uv_fs_link(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb) { + INIT(LINK); + PATH2; + POST; +} + + +int uv_fs_mkdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb) { + INIT(MKDIR); + PATH; + req->mode = mode; + POST; +} + + +int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { + INIT(MKDTEMP); + req->path = uv__strdup(tpl); + if (req->path == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + POST; +} + + +int uv_fs_open(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + int mode, + uv_fs_cb cb) { + INIT(OPEN); + PATH; + req->flags = flags; + req->mode = mode; + POST; +} + + +int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t off, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return -EINVAL; + + INIT(READ); + req->file = file; + + req->nbufs = nbufs; + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->bufs == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); + + req->off = off; + POST; +} + + +int uv_fs_scandir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + INIT(SCANDIR); + PATH; + req->flags = flags; + POST; +} + + +int uv_fs_readlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb) { + INIT(READLINK); + PATH; + POST; +} + + +int uv_fs_realpath(uv_loop_t* loop, + uv_fs_t* req, + const char * path, + uv_fs_cb cb) { + INIT(REALPATH); + PATH; + POST; +} + + +int uv_fs_rename(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb) { + INIT(RENAME); + PATH2; + POST; +} + + +int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(RMDIR); + PATH; + POST; +} + + +int uv_fs_sendfile(uv_loop_t* loop, + uv_fs_t* req, + uv_file out_fd, + uv_file in_fd, + int64_t off, + size_t len, + uv_fs_cb cb) { + INIT(SENDFILE); + req->flags = in_fd; /* hack */ + req->file = out_fd; + req->off = off; + req->bufsml[0].len = len; + POST; +} + + +int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(STAT); + PATH; + POST; +} + + +int uv_fs_symlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + int flags, + uv_fs_cb cb) { + INIT(SYMLINK); + PATH2; + req->flags = flags; + POST; +} + + +int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(UNLINK); + PATH; + POST; +} + + +int uv_fs_utime(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + double atime, + double mtime, + uv_fs_cb cb) { + INIT(UTIME); + PATH; + req->atime = atime; + req->mtime = mtime; + POST; +} + + +int uv_fs_write(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t off, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return -EINVAL; + + INIT(WRITE); + req->file = file; + + req->nbufs = nbufs; + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->bufs == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); + + req->off = off; + POST; +} + + +void uv_fs_req_cleanup(uv_fs_t* req) { + /* Only necessary for asychronous requests, i.e., requests with a callback. + * Synchronous ones don't copy their arguments and have req->path and + * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the + * exception to the rule, it always allocates memory. + */ + if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP)) + uv__free((void*) req->path); /* Memory is shared with req->new_path. */ + + req->path = NULL; + req->new_path = NULL; + + if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) + uv__fs_scandir_cleanup(req); + + if (req->ptr != &req->statbuf) + uv__free(req->ptr); + req->ptr = NULL; +} diff --git a/Utilities/cmlibuv/src/unix/fsevents.c b/Utilities/cmlibuv/src/unix/fsevents.c new file mode 100644 index 0000000..d331a13 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/fsevents.c @@ -0,0 +1,904 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#if TARGET_OS_IPHONE + +/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */ + +int uv__fsevents_init(uv_fs_event_t* handle) { + return 0; +} + + +int uv__fsevents_close(uv_fs_event_t* handle) { + return 0; +} + + +void uv__fsevents_loop_delete(uv_loop_t* loop) { +} + +#else /* TARGET_OS_IPHONE */ + +#include <dlfcn.h> +#include <assert.h> +#include <stdlib.h> +#include <pthread.h> + +#include <CoreFoundation/CFRunLoop.h> +#include <CoreServices/CoreServices.h> + +/* These are macros to avoid "initializer element is not constant" errors + * with old versions of gcc. + */ +#define kFSEventsModified (kFSEventStreamEventFlagItemFinderInfoMod | \ + kFSEventStreamEventFlagItemModified | \ + kFSEventStreamEventFlagItemInodeMetaMod | \ + kFSEventStreamEventFlagItemChangeOwner | \ + kFSEventStreamEventFlagItemXattrMod) + +#define kFSEventsRenamed (kFSEventStreamEventFlagItemCreated | \ + kFSEventStreamEventFlagItemRemoved | \ + kFSEventStreamEventFlagItemRenamed) + +#define kFSEventsSystem (kFSEventStreamEventFlagUserDropped | \ + kFSEventStreamEventFlagKernelDropped | \ + kFSEventStreamEventFlagEventIdsWrapped | \ + kFSEventStreamEventFlagHistoryDone | \ + kFSEventStreamEventFlagMount | \ + kFSEventStreamEventFlagUnmount | \ + kFSEventStreamEventFlagRootChanged) + +typedef struct uv__fsevents_event_s uv__fsevents_event_t; +typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t; +typedef struct uv__cf_loop_state_s uv__cf_loop_state_t; + +enum uv__cf_loop_signal_type_e { + kUVCFLoopSignalRegular, + kUVCFLoopSignalClosing +}; +typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t; + +struct uv__cf_loop_signal_s { + QUEUE member; + uv_fs_event_t* handle; + uv__cf_loop_signal_type_t type; +}; + +struct uv__fsevents_event_s { + QUEUE member; + int events; + char path[1]; +}; + +struct uv__cf_loop_state_s { + CFRunLoopRef loop; + CFRunLoopSourceRef signal_source; + int fsevent_need_reschedule; + FSEventStreamRef fsevent_stream; + uv_sem_t fsevent_sem; + uv_mutex_t fsevent_mutex; + void* fsevent_handles[2]; + unsigned int fsevent_handle_count; +}; + +/* Forward declarations */ +static void uv__cf_loop_cb(void* arg); +static void* uv__cf_loop_runner(void* arg); +static int uv__cf_loop_signal(uv_loop_t* loop, + uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type); + +/* Lazy-loaded by uv__fsevents_global_init(). */ +static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef, + const void**, + CFIndex, + const CFArrayCallBacks*); +static void (*pCFRelease)(CFTypeRef); +static void (*pCFRunLoopAddSource)(CFRunLoopRef, + CFRunLoopSourceRef, + CFStringRef); +static CFRunLoopRef (*pCFRunLoopGetCurrent)(void); +static void (*pCFRunLoopRemoveSource)(CFRunLoopRef, + CFRunLoopSourceRef, + CFStringRef); +static void (*pCFRunLoopRun)(void); +static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef, + CFIndex, + CFRunLoopSourceContext*); +static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef); +static void (*pCFRunLoopStop)(CFRunLoopRef); +static void (*pCFRunLoopWakeUp)(CFRunLoopRef); +static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)( + CFAllocatorRef, + const char*); +static CFStringEncoding (*pCFStringGetSystemEncoding)(void); +static CFStringRef (*pkCFRunLoopDefaultMode); +static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, + FSEventStreamCallback, + FSEventStreamContext*, + CFArrayRef, + FSEventStreamEventId, + CFTimeInterval, + FSEventStreamCreateFlags); +static void (*pFSEventStreamFlushSync)(FSEventStreamRef); +static void (*pFSEventStreamInvalidate)(FSEventStreamRef); +static void (*pFSEventStreamRelease)(FSEventStreamRef); +static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef, + CFRunLoopRef, + CFStringRef); +static Boolean (*pFSEventStreamStart)(FSEventStreamRef); +static void (*pFSEventStreamStop)(FSEventStreamRef); + +#define UV__FSEVENTS_PROCESS(handle, block) \ + do { \ + QUEUE events; \ + QUEUE* q; \ + uv__fsevents_event_t* event; \ + int err; \ + uv_mutex_lock(&(handle)->cf_mutex); \ + /* Split-off all events and empty original queue */ \ + QUEUE_MOVE(&(handle)->cf_events, &events); \ + /* Get error (if any) and zero original one */ \ + err = (handle)->cf_error; \ + (handle)->cf_error = 0; \ + uv_mutex_unlock(&(handle)->cf_mutex); \ + /* Loop through events, deallocating each after processing */ \ + while (!QUEUE_EMPTY(&events)) { \ + q = QUEUE_HEAD(&events); \ + event = QUEUE_DATA(q, uv__fsevents_event_t, member); \ + QUEUE_REMOVE(q); \ + /* NOTE: Checking uv__is_active() is required here, because handle \ + * callback may close handle and invoking it after it will lead to \ + * incorrect behaviour */ \ + if (!uv__is_closing((handle)) && uv__is_active((handle))) \ + block \ + /* Free allocated data */ \ + uv__free(event); \ + } \ + if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \ + (handle)->cb((handle), NULL, 0, err); \ + } while (0) + + +/* Runs in UV loop's thread, when there're events to report to handle */ +static void uv__fsevents_cb(uv_async_t* cb) { + uv_fs_event_t* handle; + + handle = cb->data; + + UV__FSEVENTS_PROCESS(handle, { + handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0); + }); +} + + +/* Runs in CF thread, pushed event into handle's event list */ +static void uv__fsevents_push_event(uv_fs_event_t* handle, + QUEUE* events, + int err) { + assert(events != NULL || err != 0); + uv_mutex_lock(&handle->cf_mutex); + + /* Concatenate two queues */ + if (events != NULL) + QUEUE_ADD(&handle->cf_events, events); + + /* Propagate error */ + if (err != 0) + handle->cf_error = err; + uv_mutex_unlock(&handle->cf_mutex); + + uv_async_send(handle->cf_cb); +} + + +/* Runs in CF thread, when there're events in FSEventStream */ +static void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef, + void* info, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]) { + size_t i; + int len; + char** paths; + char* path; + char* pos; + uv_fs_event_t* handle; + QUEUE* q; + uv_loop_t* loop; + uv__cf_loop_state_t* state; + uv__fsevents_event_t* event; + QUEUE head; + + loop = info; + state = loop->cf_state; + assert(state != NULL); + paths = eventPaths; + + /* For each handle */ + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_FOREACH(q, &state->fsevent_handles) { + handle = QUEUE_DATA(q, uv_fs_event_t, cf_member); + QUEUE_INIT(&head); + + /* Process and filter out events */ + for (i = 0; i < numEvents; i++) { + /* Ignore system events */ + if (eventFlags[i] & kFSEventsSystem) + continue; + + path = paths[i]; + len = strlen(path); + + /* Filter out paths that are outside handle's request */ + if (strncmp(path, handle->realpath, handle->realpath_len) != 0) + continue; + + if (handle->realpath_len > 1 || *handle->realpath != '/') { + path += handle->realpath_len; + len -= handle->realpath_len; + + /* Skip forward slash */ + if (*path != '\0') { + path++; + len--; + } + } + +#ifdef MAC_OS_X_VERSION_10_7 + /* Ignore events with path equal to directory itself */ + if (len == 0) + continue; +#endif /* MAC_OS_X_VERSION_10_7 */ + + /* Do not emit events from subdirectories (without option set) */ + if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != 0) { + pos = strchr(path + 1, '/'); + if (pos != NULL) + continue; + } + +#ifndef MAC_OS_X_VERSION_10_7 + path = ""; + len = 0; +#endif /* MAC_OS_X_VERSION_10_7 */ + + event = uv__malloc(sizeof(*event) + len); + if (event == NULL) + break; + + memset(event, 0, sizeof(*event)); + memcpy(event->path, path, len + 1); + + if ((eventFlags[i] & kFSEventsModified) != 0 && + (eventFlags[i] & kFSEventsRenamed) == 0) + event->events = UV_CHANGE; + else + event->events = UV_RENAME; + + QUEUE_INSERT_TAIL(&head, &event->member); + } + + if (!QUEUE_EMPTY(&head)) + uv__fsevents_push_event(handle, &head, 0); + } + uv_mutex_unlock(&state->fsevent_mutex); +} + + +/* Runs in CF thread */ +static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { + uv__cf_loop_state_t* state; + FSEventStreamContext ctx; + FSEventStreamRef ref; + CFAbsoluteTime latency; + FSEventStreamCreateFlags flags; + + /* Initialize context */ + ctx.version = 0; + ctx.info = loop; + ctx.retain = NULL; + ctx.release = NULL; + ctx.copyDescription = NULL; + + latency = 0.05; + + /* Explanation of selected flags: + * 1. NoDefer - without this flag, events that are happening continuously + * (i.e. each event is happening after time interval less than `latency`, + * counted from previous event), will be deferred and passed to callback + * once they'll either fill whole OS buffer, or when this continuous stream + * will stop (i.e. there'll be delay between events, bigger than + * `latency`). + * Specifying this flag will invoke callback after `latency` time passed + * since event. + * 2. FileEvents - fire callback for file changes too (by default it is firing + * it only for directory changes). + */ + flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents; + + /* + * NOTE: It might sound like a good idea to remember last seen StreamEventId, + * but in reality one dir might have last StreamEventId less than, the other, + * that is being watched now. Which will cause FSEventStream API to report + * changes to files from the past. + */ + ref = pFSEventStreamCreate(NULL, + &uv__fsevents_event_cb, + &ctx, + paths, + kFSEventStreamEventIdSinceNow, + latency, + flags); + assert(ref != NULL); + + state = loop->cf_state; + pFSEventStreamScheduleWithRunLoop(ref, + state->loop, + *pkCFRunLoopDefaultMode); + if (!pFSEventStreamStart(ref)) { + pFSEventStreamInvalidate(ref); + pFSEventStreamRelease(ref); + return -EMFILE; + } + + state->fsevent_stream = ref; + return 0; +} + + +/* Runs in CF thread */ +static void uv__fsevents_destroy_stream(uv_loop_t* loop) { + uv__cf_loop_state_t* state; + + state = loop->cf_state; + + if (state->fsevent_stream == NULL) + return; + + /* Flush all accumulated events */ + pFSEventStreamFlushSync(state->fsevent_stream); + + /* Stop emitting events */ + pFSEventStreamStop(state->fsevent_stream); + + /* Release stream */ + pFSEventStreamInvalidate(state->fsevent_stream); + pFSEventStreamRelease(state->fsevent_stream); + state->fsevent_stream = NULL; +} + + +/* Runs in CF thread, when there're new fsevent handles to add to stream */ +static void uv__fsevents_reschedule(uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type) { + uv__cf_loop_state_t* state; + QUEUE* q; + uv_fs_event_t* curr; + CFArrayRef cf_paths; + CFStringRef* paths; + unsigned int i; + int err; + unsigned int path_count; + + state = handle->loop->cf_state; + paths = NULL; + cf_paths = NULL; + err = 0; + /* NOTE: `i` is used in deallocation loop below */ + i = 0; + + /* Optimization to prevent O(n^2) time spent when starting to watch + * many files simultaneously + */ + uv_mutex_lock(&state->fsevent_mutex); + if (state->fsevent_need_reschedule == 0) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + state->fsevent_need_reschedule = 0; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Destroy previous FSEventStream */ + uv__fsevents_destroy_stream(handle->loop); + + /* Any failure below will be a memory failure */ + err = -ENOMEM; + + /* Create list of all watched paths */ + uv_mutex_lock(&state->fsevent_mutex); + path_count = state->fsevent_handle_count; + if (path_count != 0) { + paths = uv__malloc(sizeof(*paths) * path_count); + if (paths == NULL) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + + q = &state->fsevent_handles; + for (; i < path_count; i++) { + q = QUEUE_NEXT(q); + assert(q != &state->fsevent_handles); + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); + + assert(curr->realpath != NULL); + paths[i] = + pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath); + if (paths[i] == NULL) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + } + } + uv_mutex_unlock(&state->fsevent_mutex); + err = 0; + + if (path_count != 0) { + /* Create new FSEventStream */ + cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL); + if (cf_paths == NULL) { + err = -ENOMEM; + goto final; + } + err = uv__fsevents_create_stream(handle->loop, cf_paths); + } + +final: + /* Deallocate all paths in case of failure */ + if (err != 0) { + if (cf_paths == NULL) { + while (i != 0) + pCFRelease(paths[--i]); + uv__free(paths); + } else { + /* CFArray takes ownership of both strings and original C-array */ + pCFRelease(cf_paths); + } + + /* Broadcast error to all handles */ + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_FOREACH(q, &state->fsevent_handles) { + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); + uv__fsevents_push_event(curr, NULL, err); + } + uv_mutex_unlock(&state->fsevent_mutex); + } + + /* + * Main thread will block until the removal of handle from the list, + * we must tell it when we're ready. + * + * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close` + */ + if (type == kUVCFLoopSignalClosing) + uv_sem_post(&state->fsevent_sem); +} + + +static int uv__fsevents_global_init(void) { + static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER; + static void* core_foundation_handle; + static void* core_services_handle; + int err; + + err = 0; + pthread_mutex_lock(&global_init_mutex); + if (core_foundation_handle != NULL) + goto out; + + /* The libraries are never unloaded because we currently don't have a good + * mechanism for keeping a reference count. It's unlikely to be an issue + * but if it ever becomes one, we can turn the dynamic library handles into + * per-event loop properties and have the dynamic linker keep track for us. + */ + err = -ENOSYS; + core_foundation_handle = dlopen("/System/Library/Frameworks/" + "CoreFoundation.framework/" + "Versions/A/CoreFoundation", + RTLD_LAZY | RTLD_LOCAL); + if (core_foundation_handle == NULL) + goto out; + + core_services_handle = dlopen("/System/Library/Frameworks/" + "CoreServices.framework/" + "Versions/A/CoreServices", + RTLD_LAZY | RTLD_LOCAL); + if (core_services_handle == NULL) + goto out; + + err = -ENOENT; +#define V(handle, symbol) \ + do { \ + *(void **)(&p ## symbol) = dlsym((handle), #symbol); \ + if (p ## symbol == NULL) \ + goto out; \ + } \ + while (0) + V(core_foundation_handle, CFArrayCreate); + V(core_foundation_handle, CFRelease); + V(core_foundation_handle, CFRunLoopAddSource); + V(core_foundation_handle, CFRunLoopGetCurrent); + V(core_foundation_handle, CFRunLoopRemoveSource); + V(core_foundation_handle, CFRunLoopRun); + V(core_foundation_handle, CFRunLoopSourceCreate); + V(core_foundation_handle, CFRunLoopSourceSignal); + V(core_foundation_handle, CFRunLoopStop); + V(core_foundation_handle, CFRunLoopWakeUp); + V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation); + V(core_foundation_handle, CFStringGetSystemEncoding); + V(core_foundation_handle, kCFRunLoopDefaultMode); + V(core_services_handle, FSEventStreamCreate); + V(core_services_handle, FSEventStreamFlushSync); + V(core_services_handle, FSEventStreamInvalidate); + V(core_services_handle, FSEventStreamRelease); + V(core_services_handle, FSEventStreamScheduleWithRunLoop); + V(core_services_handle, FSEventStreamStart); + V(core_services_handle, FSEventStreamStop); +#undef V + err = 0; + +out: + if (err && core_services_handle != NULL) { + dlclose(core_services_handle); + core_services_handle = NULL; + } + + if (err && core_foundation_handle != NULL) { + dlclose(core_foundation_handle); + core_foundation_handle = NULL; + } + + pthread_mutex_unlock(&global_init_mutex); + return err; +} + + +/* Runs in UV loop */ +static int uv__fsevents_loop_init(uv_loop_t* loop) { + CFRunLoopSourceContext ctx; + uv__cf_loop_state_t* state; + pthread_attr_t attr_storage; + pthread_attr_t* attr; + int err; + + if (loop->cf_state != NULL) + return 0; + + err = uv__fsevents_global_init(); + if (err) + return err; + + state = uv__calloc(1, sizeof(*state)); + if (state == NULL) + return -ENOMEM; + + err = uv_mutex_init(&loop->cf_mutex); + if (err) + goto fail_mutex_init; + + err = uv_sem_init(&loop->cf_sem, 0); + if (err) + goto fail_sem_init; + + QUEUE_INIT(&loop->cf_signals); + + err = uv_sem_init(&state->fsevent_sem, 0); + if (err) + goto fail_fsevent_sem_init; + + err = uv_mutex_init(&state->fsevent_mutex); + if (err) + goto fail_fsevent_mutex_init; + + QUEUE_INIT(&state->fsevent_handles); + state->fsevent_need_reschedule = 0; + state->fsevent_handle_count = 0; + + memset(&ctx, 0, sizeof(ctx)); + ctx.info = loop; + ctx.perform = uv__cf_loop_cb; + state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx); + if (state->signal_source == NULL) { + err = -ENOMEM; + goto fail_signal_source_create; + } + + /* In the unlikely event that pthread_attr_init() fails, create the thread + * with the default stack size. We'll use a little more address space but + * that in itself is not a fatal error. + */ + attr = &attr_storage; + if (pthread_attr_init(attr)) + attr = NULL; + + if (attr != NULL) + if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN)) + abort(); + + loop->cf_state = state; + + /* uv_thread_t is an alias for pthread_t. */ + err = -pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop); + + if (attr != NULL) + pthread_attr_destroy(attr); + + if (err) + goto fail_thread_create; + + /* Synchronize threads */ + uv_sem_wait(&loop->cf_sem); + return 0; + +fail_thread_create: + loop->cf_state = NULL; + +fail_signal_source_create: + uv_mutex_destroy(&state->fsevent_mutex); + +fail_fsevent_mutex_init: + uv_sem_destroy(&state->fsevent_sem); + +fail_fsevent_sem_init: + uv_sem_destroy(&loop->cf_sem); + +fail_sem_init: + uv_mutex_destroy(&loop->cf_mutex); + +fail_mutex_init: + uv__free(state); + return err; +} + + +/* Runs in UV loop */ +void uv__fsevents_loop_delete(uv_loop_t* loop) { + uv__cf_loop_signal_t* s; + uv__cf_loop_state_t* state; + QUEUE* q; + + if (loop->cf_state == NULL) + return; + + if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0) + abort(); + + uv_thread_join(&loop->cf_thread); + uv_sem_destroy(&loop->cf_sem); + uv_mutex_destroy(&loop->cf_mutex); + + /* Free any remaining data */ + while (!QUEUE_EMPTY(&loop->cf_signals)) { + q = QUEUE_HEAD(&loop->cf_signals); + s = QUEUE_DATA(q, uv__cf_loop_signal_t, member); + QUEUE_REMOVE(q); + uv__free(s); + } + + /* Destroy state */ + state = loop->cf_state; + uv_sem_destroy(&state->fsevent_sem); + uv_mutex_destroy(&state->fsevent_mutex); + pCFRelease(state->signal_source); + uv__free(state); + loop->cf_state = NULL; +} + + +/* Runs in CF thread. This is the CF loop's body */ +static void* uv__cf_loop_runner(void* arg) { + uv_loop_t* loop; + uv__cf_loop_state_t* state; + + loop = arg; + state = loop->cf_state; + state->loop = pCFRunLoopGetCurrent(); + + pCFRunLoopAddSource(state->loop, + state->signal_source, + *pkCFRunLoopDefaultMode); + + uv_sem_post(&loop->cf_sem); + + pCFRunLoopRun(); + pCFRunLoopRemoveSource(state->loop, + state->signal_source, + *pkCFRunLoopDefaultMode); + + return NULL; +} + + +/* Runs in CF thread, executed after `uv__cf_loop_signal()` */ +static void uv__cf_loop_cb(void* arg) { + uv_loop_t* loop; + uv__cf_loop_state_t* state; + QUEUE* item; + QUEUE split_head; + uv__cf_loop_signal_t* s; + + loop = arg; + state = loop->cf_state; + + uv_mutex_lock(&loop->cf_mutex); + QUEUE_MOVE(&loop->cf_signals, &split_head); + uv_mutex_unlock(&loop->cf_mutex); + + while (!QUEUE_EMPTY(&split_head)) { + item = QUEUE_HEAD(&split_head); + QUEUE_REMOVE(item); + + s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); + + /* This was a termination signal */ + if (s->handle == NULL) + pCFRunLoopStop(state->loop); + else + uv__fsevents_reschedule(s->handle, s->type); + + uv__free(s); + } +} + + +/* Runs in UV loop to notify CF thread */ +int uv__cf_loop_signal(uv_loop_t* loop, + uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type) { + uv__cf_loop_signal_t* item; + uv__cf_loop_state_t* state; + + item = uv__malloc(sizeof(*item)); + if (item == NULL) + return -ENOMEM; + + item->handle = handle; + item->type = type; + + uv_mutex_lock(&loop->cf_mutex); + QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); + uv_mutex_unlock(&loop->cf_mutex); + + state = loop->cf_state; + assert(state != NULL); + pCFRunLoopSourceSignal(state->signal_source); + pCFRunLoopWakeUp(state->loop); + + return 0; +} + + +/* Runs in UV loop to initialize handle */ +int uv__fsevents_init(uv_fs_event_t* handle) { + int err; + uv__cf_loop_state_t* state; + + err = uv__fsevents_loop_init(handle->loop); + if (err) + return err; + + /* Get absolute path to file */ + handle->realpath = realpath(handle->path, NULL); + if (handle->realpath == NULL) + return -errno; + handle->realpath_len = strlen(handle->realpath); + + /* Initialize event queue */ + QUEUE_INIT(&handle->cf_events); + handle->cf_error = 0; + + /* + * Events will occur in other thread. + * Initialize callback for getting them back into event loop's thread + */ + handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb)); + if (handle->cf_cb == NULL) { + err = -ENOMEM; + goto fail_cf_cb_malloc; + } + + handle->cf_cb->data = handle; + uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb); + handle->cf_cb->flags |= UV__HANDLE_INTERNAL; + uv_unref((uv_handle_t*) handle->cf_cb); + + err = uv_mutex_init(&handle->cf_mutex); + if (err) + goto fail_cf_mutex_init; + + /* Insert handle into the list */ + state = handle->loop->cf_state; + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member); + state->fsevent_handle_count++; + state->fsevent_need_reschedule = 1; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Reschedule FSEventStream */ + assert(handle != NULL); + err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular); + if (err) + goto fail_loop_signal; + + return 0; + +fail_loop_signal: + uv_mutex_destroy(&handle->cf_mutex); + +fail_cf_mutex_init: + uv__free(handle->cf_cb); + handle->cf_cb = NULL; + +fail_cf_cb_malloc: + uv__free(handle->realpath); + handle->realpath = NULL; + handle->realpath_len = 0; + + return err; +} + + +/* Runs in UV loop to de-initialize handle */ +int uv__fsevents_close(uv_fs_event_t* handle) { + int err; + uv__cf_loop_state_t* state; + + if (handle->cf_cb == NULL) + return -EINVAL; + + /* Remove handle from the list */ + state = handle->loop->cf_state; + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_REMOVE(&handle->cf_member); + state->fsevent_handle_count--; + state->fsevent_need_reschedule = 1; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Reschedule FSEventStream */ + assert(handle != NULL); + err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing); + if (err) + return -err; + + /* Wait for deinitialization */ + uv_sem_wait(&state->fsevent_sem); + + uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free); + handle->cf_cb = NULL; + + /* Free data in queue */ + UV__FSEVENTS_PROCESS(handle, { + /* NOP */ + }); + + uv_mutex_destroy(&handle->cf_mutex); + uv__free(handle->realpath); + handle->realpath = NULL; + handle->realpath_len = 0; + + return 0; +} + +#endif /* TARGET_OS_IPHONE */ diff --git a/Utilities/cmlibuv/src/unix/getaddrinfo.c b/Utilities/cmlibuv/src/unix/getaddrinfo.c new file mode 100644 index 0000000..2049aea --- /dev/null +++ b/Utilities/cmlibuv/src/unix/getaddrinfo.c @@ -0,0 +1,202 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* Expose glibc-specific EAI_* error codes. Needs to be defined before we + * include any headers. + */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#include "uv.h" +#include "internal.h" + +#include <errno.h> +#include <stddef.h> /* NULL */ +#include <stdlib.h> +#include <string.h> + +/* EAI_* constants. */ +#include <netdb.h> + + +int uv__getaddrinfo_translate_error(int sys_err) { + switch (sys_err) { + case 0: return 0; +#if defined(EAI_ADDRFAMILY) + case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY; +#endif +#if defined(EAI_AGAIN) + case EAI_AGAIN: return UV_EAI_AGAIN; +#endif +#if defined(EAI_BADFLAGS) + case EAI_BADFLAGS: return UV_EAI_BADFLAGS; +#endif +#if defined(EAI_BADHINTS) + case EAI_BADHINTS: return UV_EAI_BADHINTS; +#endif +#if defined(EAI_CANCELED) + case EAI_CANCELED: return UV_EAI_CANCELED; +#endif +#if defined(EAI_FAIL) + case EAI_FAIL: return UV_EAI_FAIL; +#endif +#if defined(EAI_FAMILY) + case EAI_FAMILY: return UV_EAI_FAMILY; +#endif +#if defined(EAI_MEMORY) + case EAI_MEMORY: return UV_EAI_MEMORY; +#endif +#if defined(EAI_NODATA) + case EAI_NODATA: return UV_EAI_NODATA; +#endif +#if defined(EAI_NONAME) +# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME + case EAI_NONAME: return UV_EAI_NONAME; +# endif +#endif +#if defined(EAI_OVERFLOW) + case EAI_OVERFLOW: return UV_EAI_OVERFLOW; +#endif +#if defined(EAI_PROTOCOL) + case EAI_PROTOCOL: return UV_EAI_PROTOCOL; +#endif +#if defined(EAI_SERVICE) + case EAI_SERVICE: return UV_EAI_SERVICE; +#endif +#if defined(EAI_SOCKTYPE) + case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE; +#endif +#if defined(EAI_SYSTEM) + case EAI_SYSTEM: return -errno; +#endif + } + assert(!"unknown EAI_* error code"); + abort(); + return 0; /* Pacify compiler. */ +} + + +static void uv__getaddrinfo_work(struct uv__work* w) { + uv_getaddrinfo_t* req; + int err; + + req = container_of(w, uv_getaddrinfo_t, work_req); + err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo); + req->retcode = uv__getaddrinfo_translate_error(err); +} + + +static void uv__getaddrinfo_done(struct uv__work* w, int status) { + uv_getaddrinfo_t* req; + + req = container_of(w, uv_getaddrinfo_t, work_req); + uv__req_unregister(req->loop, req); + + /* See initialization in uv_getaddrinfo(). */ + if (req->hints) + uv__free(req->hints); + else if (req->service) + uv__free(req->service); + else if (req->hostname) + uv__free(req->hostname); + else + assert(0); + + req->hints = NULL; + req->service = NULL; + req->hostname = NULL; + + if (status == -ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } + + if (req->cb) + req->cb(req, req->retcode, req->addrinfo); +} + + +int uv_getaddrinfo(uv_loop_t* loop, + uv_getaddrinfo_t* req, + uv_getaddrinfo_cb cb, + const char* hostname, + const char* service, + const struct addrinfo* hints) { + size_t hostname_len; + size_t service_len; + size_t hints_len; + size_t len; + char* buf; + + if (req == NULL || (hostname == NULL && service == NULL)) + return -EINVAL; + + hostname_len = hostname ? strlen(hostname) + 1 : 0; + service_len = service ? strlen(service) + 1 : 0; + hints_len = hints ? sizeof(*hints) : 0; + buf = uv__malloc(hostname_len + service_len + hints_len); + + if (buf == NULL) + return -ENOMEM; + + uv__req_init(loop, req, UV_GETADDRINFO); + req->loop = loop; + req->cb = cb; + req->addrinfo = NULL; + req->hints = NULL; + req->service = NULL; + req->hostname = NULL; + req->retcode = 0; + + /* order matters, see uv_getaddrinfo_done() */ + len = 0; + + if (hints) { + req->hints = memcpy(buf + len, hints, sizeof(*hints)); + len += sizeof(*hints); + } + + if (service) { + req->service = memcpy(buf + len, service, service_len); + len += service_len; + } + + if (hostname) + req->hostname = memcpy(buf + len, hostname, hostname_len); + + if (cb) { + uv__work_submit(loop, + &req->work_req, + uv__getaddrinfo_work, + uv__getaddrinfo_done); + return 0; + } else { + uv__getaddrinfo_work(&req->work_req); + uv__getaddrinfo_done(&req->work_req, 0); + return req->retcode; + } +} + + +void uv_freeaddrinfo(struct addrinfo* ai) { + if (ai) + freeaddrinfo(ai); +} diff --git a/Utilities/cmlibuv/src/unix/getnameinfo.c b/Utilities/cmlibuv/src/unix/getnameinfo.c new file mode 100644 index 0000000..daa798a --- /dev/null +++ b/Utilities/cmlibuv/src/unix/getnameinfo.c @@ -0,0 +1,120 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +#include "uv.h" +#include "internal.h" + + +static void uv__getnameinfo_work(struct uv__work* w) { + uv_getnameinfo_t* req; + int err; + socklen_t salen; + + req = container_of(w, uv_getnameinfo_t, work_req); + + if (req->storage.ss_family == AF_INET) + salen = sizeof(struct sockaddr_in); + else if (req->storage.ss_family == AF_INET6) + salen = sizeof(struct sockaddr_in6); + else + abort(); + + err = getnameinfo((struct sockaddr*) &req->storage, + salen, + req->host, + sizeof(req->host), + req->service, + sizeof(req->service), + req->flags); + req->retcode = uv__getaddrinfo_translate_error(err); +} + +static void uv__getnameinfo_done(struct uv__work* w, int status) { + uv_getnameinfo_t* req; + char* host; + char* service; + + req = container_of(w, uv_getnameinfo_t, work_req); + uv__req_unregister(req->loop, req); + host = service = NULL; + + if (status == -ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } else if (req->retcode == 0) { + host = req->host; + service = req->service; + } + + if (req->getnameinfo_cb) + req->getnameinfo_cb(req, req->retcode, host, service); +} + +/* +* Entry point for getnameinfo +* return 0 if a callback will be made +* return error code if validation fails +*/ +int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags) { + if (req == NULL || addr == NULL) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in)); + } else if (addr->sa_family == AF_INET6) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in6)); + } else { + return UV_EINVAL; + } + + uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO); + + req->getnameinfo_cb = getnameinfo_cb; + req->flags = flags; + req->type = UV_GETNAMEINFO; + req->loop = loop; + req->retcode = 0; + + if (getnameinfo_cb) { + uv__work_submit(loop, + &req->work_req, + uv__getnameinfo_work, + uv__getnameinfo_done); + return 0; + } else { + uv__getnameinfo_work(&req->work_req); + uv__getnameinfo_done(&req->work_req, 0); + return req->retcode; + } +} diff --git a/Utilities/cmlibuv/src/unix/internal.h b/Utilities/cmlibuv/src/unix/internal.h new file mode 100644 index 0000000..c7b6019 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/internal.h @@ -0,0 +1,325 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_UNIX_INTERNAL_H_ +#define UV_UNIX_INTERNAL_H_ + +#include "uv-common.h" + +#include <assert.h> +#include <stdlib.h> /* abort */ +#include <string.h> /* strrchr */ +#include <fcntl.h> /* O_CLOEXEC, may be */ +#include <stdio.h> + +#if defined(__STRICT_ANSI__) +# define inline __inline +#endif + +#if defined(__linux__) +# include "linux-syscalls.h" +#endif /* __linux__ */ + +#if defined(__sun) +# include <sys/port.h> +# include <port.h> +#endif /* __sun */ + +#if defined(_AIX) +# define reqevents events +# define rtnevents revents +# include <sys/poll.h> +#else +# include <poll.h> +#endif /* _AIX */ + +#if defined(__APPLE__) && !TARGET_OS_IPHONE +# include <CoreServices/CoreServices.h> +#endif + +#if defined(__ANDROID__) +int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset); +# ifdef pthread_sigmask +# undef pthread_sigmask +# endif +# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset) +#endif + +#define ACCESS_ONCE(type, var) \ + (*(volatile type*) &(var)) + +#define ROUND_UP(a, b) \ + ((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a)) + +#define UNREACHABLE() \ + do { \ + assert(0 && "unreachable code"); \ + abort(); \ + } \ + while (0) + +#define SAVE_ERRNO(block) \ + do { \ + int _saved_errno = errno; \ + do { block; } while (0); \ + errno = _saved_errno; \ + } \ + while (0) + +/* The __clang__ and __INTEL_COMPILER checks are superfluous because they + * define __GNUC__. They are here to convey to you, dear reader, that these + * macros are enabled when compiling with clang or icc. + */ +#if defined(__clang__) || \ + defined(__GNUC__) || \ + defined(__INTEL_COMPILER) || \ + defined(__SUNPRO_C) +# define UV_DESTRUCTOR(declaration) __attribute__((destructor)) declaration +# define UV_UNUSED(declaration) __attribute__((unused)) declaration +#else +# define UV_DESTRUCTOR(declaration) declaration +# define UV_UNUSED(declaration) declaration +#endif + +/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */ +#ifdef POLLRDHUP +# define UV__POLLRDHUP POLLRDHUP +#else +# define UV__POLLRDHUP 0x2000 +#endif + +#if !defined(O_CLOEXEC) && defined(__FreeBSD__) +/* + * It may be that we are just missing `__POSIX_VISIBLE >= 200809`. + * Try using fixed value const and give up, if it doesn't work + */ +# define O_CLOEXEC 0x00100000 +#endif + +typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t; + +/* handle flags */ +enum { + UV_CLOSING = 0x01, /* uv_close() called but not finished. */ + UV_CLOSED = 0x02, /* close(2) finished. */ + UV_STREAM_READING = 0x04, /* uv_read_start() called. */ + UV_STREAM_SHUTTING = 0x08, /* uv_shutdown() called but not complete. */ + UV_STREAM_SHUT = 0x10, /* Write side closed. */ + UV_STREAM_READABLE = 0x20, /* The stream is readable */ + UV_STREAM_WRITABLE = 0x40, /* The stream is writable */ + UV_STREAM_BLOCKING = 0x80, /* Synchronous writes. */ + UV_STREAM_READ_PARTIAL = 0x100, /* read(2) read less than requested. */ + UV_STREAM_READ_EOF = 0x200, /* read(2) read EOF. */ + UV_TCP_NODELAY = 0x400, /* Disable Nagle. */ + UV_TCP_KEEPALIVE = 0x800, /* Turn on keep-alive. */ + UV_TCP_SINGLE_ACCEPT = 0x1000, /* Only accept() when idle. */ + UV_HANDLE_IPV6 = 0x10000, /* Handle is bound to a IPv6 socket. */ + UV_UDP_PROCESSING = 0x20000, /* Handle is running the send callback queue. */ + UV_HANDLE_BOUND = 0x40000 /* Handle is bound to an address and port */ +}; + +/* loop flags */ +enum { + UV_LOOP_BLOCK_SIGPROF = 1 +}; + +typedef enum { + UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */ + UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */ +} uv_clocktype_t; + +struct uv__stream_queued_fds_s { + unsigned int size; + unsigned int offset; + int fds[1]; +}; + + +#if defined(_AIX) || \ + defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) || \ + defined(__linux__) +#define uv__cloexec uv__cloexec_ioctl +#define uv__nonblock uv__nonblock_ioctl +#else +#define uv__cloexec uv__cloexec_fcntl +#define uv__nonblock uv__nonblock_fcntl +#endif + +/* core */ +int uv__cloexec_ioctl(int fd, int set); +int uv__cloexec_fcntl(int fd, int set); +int uv__nonblock_ioctl(int fd, int set); +int uv__nonblock_fcntl(int fd, int set); +int uv__close(int fd); +int uv__close_nocheckstdio(int fd); +int uv__socket(int domain, int type, int protocol); +int uv__dup(int fd); +ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags); +void uv__make_close_pending(uv_handle_t* handle); +int uv__getiovmax(void); + +void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd); +void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events); +void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events); +void uv__io_close(uv_loop_t* loop, uv__io_t* w); +void uv__io_feed(uv_loop_t* loop, uv__io_t* w); +int uv__io_active(const uv__io_t* w, unsigned int events); +int uv__io_check_fd(uv_loop_t* loop, int fd); +void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */ + +/* async */ +void uv__async_send(struct uv__async* wa); +void uv__async_init(struct uv__async* wa); +int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb); +void uv__async_stop(uv_loop_t* loop, struct uv__async* wa); + +/* loop */ +void uv__run_idle(uv_loop_t* loop); +void uv__run_check(uv_loop_t* loop); +void uv__run_prepare(uv_loop_t* loop); + +/* stream */ +void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream, + uv_handle_type type); +int uv__stream_open(uv_stream_t*, int fd, int flags); +void uv__stream_destroy(uv_stream_t* stream); +#if defined(__APPLE__) +int uv__stream_try_select(uv_stream_t* stream, int* fd); +#endif /* defined(__APPLE__) */ +void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events); +int uv__accept(int sockfd); +int uv__dup2_cloexec(int oldfd, int newfd); +int uv__open_cloexec(const char* path, int flags); + +/* tcp */ +int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb); +int uv__tcp_nodelay(int fd, int on); +int uv__tcp_keepalive(int fd, int on, unsigned int delay); + +/* pipe */ +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb); + +/* timer */ +void uv__run_timers(uv_loop_t* loop); +int uv__next_timeout(const uv_loop_t* loop); + +/* signal */ +void uv__signal_close(uv_signal_t* handle); +void uv__signal_global_once_init(void); +void uv__signal_loop_cleanup(uv_loop_t* loop); + +/* platform specific */ +uint64_t uv__hrtime(uv_clocktype_t type); +int uv__kqueue_init(uv_loop_t* loop); +int uv__platform_loop_init(uv_loop_t* loop); +void uv__platform_loop_delete(uv_loop_t* loop); +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd); + +/* various */ +void uv__async_close(uv_async_t* handle); +void uv__check_close(uv_check_t* handle); +void uv__fs_event_close(uv_fs_event_t* handle); +void uv__idle_close(uv_idle_t* handle); +void uv__pipe_close(uv_pipe_t* handle); +void uv__poll_close(uv_poll_t* handle); +void uv__prepare_close(uv_prepare_t* handle); +void uv__process_close(uv_process_t* handle); +void uv__stream_close(uv_stream_t* handle); +void uv__tcp_close(uv_tcp_t* handle); +void uv__timer_close(uv_timer_t* handle); +void uv__udp_close(uv_udp_t* handle); +void uv__udp_finish_close(uv_udp_t* handle); +uv_handle_type uv__handle_type(int fd); +FILE* uv__open_file(const char* path); +int uv__getpwuid_r(uv_passwd_t* pwd); + + +#if defined(__APPLE__) +int uv___stream_fd(const uv_stream_t* handle); +#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle))) +#else +#define uv__stream_fd(handle) ((handle)->io_watcher.fd) +#endif /* defined(__APPLE__) */ + +#ifdef UV__O_NONBLOCK +# define UV__F_NONBLOCK UV__O_NONBLOCK +#else +# define UV__F_NONBLOCK 1 +#endif + +int uv__make_socketpair(int fds[2], int flags); +int uv__make_pipe(int fds[2], int flags); + +#if defined(__APPLE__) + +int uv__fsevents_init(uv_fs_event_t* handle); +int uv__fsevents_close(uv_fs_event_t* handle); +void uv__fsevents_loop_delete(uv_loop_t* loop); + +/* OSX < 10.7 has no file events, polyfill them */ +#ifndef MAC_OS_X_VERSION_10_7 + +static const int kFSEventStreamCreateFlagFileEvents = 0x00000010; +static const int kFSEventStreamEventFlagItemCreated = 0x00000100; +static const int kFSEventStreamEventFlagItemRemoved = 0x00000200; +static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400; +static const int kFSEventStreamEventFlagItemRenamed = 0x00000800; +static const int kFSEventStreamEventFlagItemModified = 0x00001000; +static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000; +static const int kFSEventStreamEventFlagItemChangeOwner = 0x00004000; +static const int kFSEventStreamEventFlagItemXattrMod = 0x00008000; +static const int kFSEventStreamEventFlagItemIsFile = 0x00010000; +static const int kFSEventStreamEventFlagItemIsDir = 0x00020000; +static const int kFSEventStreamEventFlagItemIsSymlink = 0x00040000; + +#endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 */ + +#endif /* defined(__APPLE__) */ + +UV_UNUSED(static void uv__req_init(uv_loop_t* loop, + uv_req_t* req, + uv_req_type type)) { + req->type = type; + uv__req_register(loop, req); +} +#define uv__req_init(loop, req, type) \ + uv__req_init((loop), (uv_req_t*)(req), (type)) + +UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) { + /* Use a fast time source if available. We only need millisecond precision. + */ + loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000; +} + +UV_UNUSED(static char* uv__basename_r(const char* path)) { + char* s; + + s = strrchr(path, '/'); + if (s == NULL) + return (char*) path; + + return s + 1; +} + +#endif /* UV_UNIX_INTERNAL_H_ */ diff --git a/Utilities/cmlibuv/src/unix/kqueue.c b/Utilities/cmlibuv/src/unix/kqueue.c new file mode 100644 index 0000000..fffd462 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/kqueue.c @@ -0,0 +1,463 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include <sys/sysctl.h> +#include <sys/types.h> +#include <sys/event.h> +#include <sys/time.h> +#include <unistd.h> +#include <fcntl.h> +#include <time.h> + +static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags); + + +int uv__kqueue_init(uv_loop_t* loop) { + loop->backend_fd = kqueue(); + if (loop->backend_fd == -1) + return -errno; + + uv__cloexec(loop->backend_fd, 1); + + return 0; +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct kevent ev; + int rc; + + rc = 0; + EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + rc = -errno; + + EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0); + if (rc == 0) + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + abort(); + + return rc; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct kevent events[1024]; + struct kevent* ev; + struct timespec spec; + unsigned int nevents; + unsigned int revents; + QUEUE* q; + uv__io_t* w; + sigset_t* pset; + sigset_t set; + uint64_t base; + uint64_t diff; + int have_signals; + int filter; + int fflags; + int count; + int nfds; + int fd; + int op; + int i; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + nevents = 0; + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) { + filter = EVFILT_READ; + fflags = 0; + op = EV_ADD; + + if (w->cb == uv__fs_event) { + filter = EVFILT_VNODE; + fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME + | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE; + op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */ + } + + EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0); + + if (++nevents == ARRAY_SIZE(events)) { + if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL)) + abort(); + nevents = 0; + } + } + + if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) { + EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0); + + if (++nevents == ARRAY_SIZE(events)) { + if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL)) + abort(); + nevents = 0; + } + } + + w->events = w->pevents; + } + + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;; nevents = 0) { + if (timeout != -1) { + spec.tv_sec = timeout / 1000; + spec.tv_nsec = (timeout % 1000) * 1000000; + } + + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + + nfds = kevent(loop->backend_fd, + events, + nevents, + events, + ARRAY_SIZE(events), + timeout == -1 ? NULL : &spec); + + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + if (nfds == -1) { + if (errno != EINTR) + abort(); + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + ev = events + i; + fd = ev->ident; + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + w = loop->watchers[fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. */ + /* TODO batch up */ + struct kevent events[1]; + + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != EBADF && errno != ENOENT) + abort(); + + continue; + } + + if (ev->filter == EVFILT_VNODE) { + assert(w->events == POLLIN); + assert(w->pevents == POLLIN); + w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */ + nevents++; + continue; + } + + revents = 0; + + if (ev->filter == EVFILT_READ) { + if (w->pevents & POLLIN) { + revents |= POLLIN; + w->rcount = ev->data; + } else { + /* TODO batch up */ + struct kevent events[1]; + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != ENOENT) + abort(); + } + } + + if (ev->filter == EVFILT_WRITE) { + if (w->pevents & POLLOUT) { + revents |= POLLOUT; + w->wcount = ev->data; + } else { + /* TODO batch up */ + struct kevent events[1]; + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != ENOENT) + abort(); + } + } + + if (ev->flags & EV_ERROR) + revents |= POLLERR; + + if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP)) + revents |= UV__POLLRDHUP; + + if (revents == 0) + continue; + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, revents); + + nevents++; + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct kevent* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct kevent*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].ident == fd) + events[i].ident = -1; +} + + +static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) { + uv_fs_event_t* handle; + struct kevent ev; + int events; + const char* path; +#if defined(F_GETPATH) + /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */ + char pathbuf[MAXPATHLEN]; +#endif + + handle = container_of(w, uv_fs_event_t, event_watcher); + + if (fflags & (NOTE_ATTRIB | NOTE_EXTEND)) + events = UV_CHANGE; + else + events = UV_RENAME; + + path = NULL; +#if defined(F_GETPATH) + /* Also works when the file has been unlinked from the file system. Passing + * in the path when the file has been deleted is arguably a little strange + * but it's consistent with what the inotify backend does. + */ + if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0) + path = uv__basename_r(pathbuf); +#endif + handle->cb(handle, path, events, 0); + + if (handle->event_watcher.fd == -1) + return; + + /* Watcher operates in one-shot mode, re-arm it. */ + fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME + | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE; + + EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0); + + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + abort(); +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { +#if defined(__APPLE__) + struct stat statbuf; +#endif /* defined(__APPLE__) */ + int fd; + + if (uv__is_active(handle)) + return -EINVAL; + + /* TODO open asynchronously - but how do we report back errors? */ + fd = open(path, O_RDONLY); + if (fd == -1) + return -errno; + + uv__handle_start(handle); + uv__io_init(&handle->event_watcher, uv__fs_event, fd); + handle->path = uv__strdup(path); + handle->cb = cb; + +#if defined(__APPLE__) + /* Nullify field to perform checks later */ + handle->cf_cb = NULL; + handle->realpath = NULL; + handle->realpath_len = 0; + handle->cf_flags = flags; + + if (fstat(fd, &statbuf)) + goto fallback; + /* FSEvents works only with directories */ + if (!(statbuf.st_mode & S_IFDIR)) + goto fallback; + + /* The fallback fd is no longer needed */ + uv__close(fd); + handle->event_watcher.fd = -1; + + return uv__fsevents_init(handle); + +fallback: +#endif /* defined(__APPLE__) */ + + uv__io_start(handle->loop, &handle->event_watcher, POLLIN); + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + if (!uv__is_active(handle)) + return 0; + + uv__handle_stop(handle); + +#if defined(__APPLE__) + if (uv__fsevents_close(handle)) +#endif /* defined(__APPLE__) */ + { + uv__io_close(handle->loop, &handle->event_watcher); + } + + uv__free(handle->path); + handle->path = NULL; + + if (handle->event_watcher.fd != -1) { + /* When FSEvents is used, we don't use the event_watcher's fd under certain + * confitions. (see uv_fs_event_start) */ + uv__close(handle->event_watcher.fd); + handle->event_watcher.fd = -1; + } + + return 0; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} diff --git a/Utilities/cmlibuv/src/unix/linux-core.c b/Utilities/cmlibuv/src/unix/linux-core.c new file mode 100644 index 0000000..58dd813 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/linux-core.c @@ -0,0 +1,985 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their + * EPOLL* counterparts. We use the POLL* variants in this file because that + * is what libuv uses elsewhere and it avoids a dependency on <sys/epoll.h>. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> + +#include <net/if.h> +#include <sys/param.h> +#include <sys/prctl.h> +#include <sys/sysinfo.h> +#include <unistd.h> +#include <fcntl.h> +#include <time.h> + +#define HAVE_IFADDRS_H 1 + +#ifdef __UCLIBC__ +# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32 +# undef HAVE_IFADDRS_H +# endif +#endif + +#ifdef HAVE_IFADDRS_H +# if defined(__ANDROID__) +# include "android-ifaddrs.h" +# else +# include <ifaddrs.h> +# endif +# include <sys/socket.h> +# include <net/ethernet.h> +# include <netpacket/packet.h> +#endif /* HAVE_IFADDRS_H */ + +/* Available from 2.6.32 onwards. */ +#ifndef CLOCK_MONOTONIC_COARSE +# define CLOCK_MONOTONIC_COARSE 6 +#endif + +/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't + * include that file because it conflicts with <time.h>. We'll just have to + * define it ourselves. + */ +#ifndef CLOCK_BOOTTIME +# define CLOCK_BOOTTIME 7 +#endif + +static int read_models(unsigned int numcpus, uv_cpu_info_t* ci); +static int read_times(FILE* statfile_fp, + unsigned int numcpus, + uv_cpu_info_t* ci); +static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci); +static unsigned long read_cpufreq(unsigned int cpunum); + + +int uv__platform_loop_init(uv_loop_t* loop) { + int fd; + + fd = uv__epoll_create1(UV__EPOLL_CLOEXEC); + + /* epoll_create1() can fail either because it's not implemented (old kernel) + * or because it doesn't understand the EPOLL_CLOEXEC flag. + */ + if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) { + fd = uv__epoll_create(256); + + if (fd != -1) + uv__cloexec(fd, 1); + } + + loop->backend_fd = fd; + loop->inotify_fd = -1; + loop->inotify_watchers = NULL; + + if (fd == -1) + return -errno; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->inotify_fd == -1) return; + uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); + uv__close(loop->inotify_fd); + loop->inotify_fd = -1; +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct uv__epoll_event* events; + struct uv__epoll_event dummy; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events != NULL) + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].data == fd) + events[i].data = -1; + + /* Remove the file descriptor from the epoll. + * This avoids a problem where the same file description remains open + * in another process, causing repeated junk epoll events. + * + * We pass in a dummy epoll_event, to work around a bug in old kernels. + */ + if (loop->backend_fd >= 0) { + /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that + * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. + */ + memset(&dummy, 0, sizeof(dummy)); + uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy); + } +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct uv__epoll_event e; + int rc; + + e.events = POLLIN; + e.data = -1; + + rc = 0; + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_ADD, fd, &e)) + if (errno != EEXIST) + rc = -errno; + + if (rc == 0) + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &e)) + abort(); + + return rc; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes + * effectively infinite on 32 bits architectures. To avoid blocking + * indefinitely, we cap the timeout and poll again if necessary. + * + * Note that "30 minutes" is a simplification because it depends on + * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200, + * that being the largest value I have seen in the wild (and only once.) + */ + static const int max_safe_timeout = 1789569; + static int no_epoll_pwait; + static int no_epoll_wait; + struct uv__epoll_event events[1024]; + struct uv__epoll_event* pe; + struct uv__epoll_event e; + int real_timeout; + QUEUE* q; + uv__io_t* w; + sigset_t sigset; + uint64_t sigmask; + uint64_t base; + int have_signals; + int nevents; + int count; + int nfds; + int fd; + int op; + int i; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + e.events = w->pevents; + e.data = w->fd; + + if (w->events == 0) + op = UV__EPOLL_CTL_ADD; + else + op = UV__EPOLL_CTL_MOD; + + /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching + * events, skip the syscall and squelch the events after epoll_wait(). + */ + if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) { + if (errno != EEXIST) + abort(); + + assert(op == UV__EPOLL_CTL_ADD); + + /* We've reactivated a file descriptor that's been watched before. */ + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e)) + abort(); + } + + w->events = w->pevents; + } + + sigmask = 0; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + sigemptyset(&sigset); + sigaddset(&sigset, SIGPROF); + sigmask |= 1 << (SIGPROF - 1); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + real_timeout = timeout; + + for (;;) { + /* See the comment for max_safe_timeout for an explanation of why + * this is necessary. Executive summary: kernel bug workaround. + */ + if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) + timeout = max_safe_timeout; + + if (sigmask != 0 && no_epoll_pwait != 0) + if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) + abort(); + + if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) { + nfds = uv__epoll_pwait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout, + sigmask); + if (nfds == -1 && errno == ENOSYS) + no_epoll_pwait = 1; + } else { + nfds = uv__epoll_wait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + if (nfds == -1 && errno == ENOSYS) + no_epoll_wait = 1; + } + + if (sigmask != 0 && no_epoll_pwait != 0) + if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL)) + abort(); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + + if (timeout == 0) + return; + + /* We may have been inside the system call for longer than |timeout| + * milliseconds so we need to update the timestamp to avoid drift. + */ + goto update_timeout; + } + + if (nfds == -1) { + if (errno == ENOSYS) { + /* epoll_wait() or epoll_pwait() failed, try the other system call. */ + assert(no_epoll_wait == 0 || no_epoll_pwait == 0); + continue; + } + + if (errno != EINTR) + abort(); + + if (timeout == -1) + continue; + + if (timeout == 0) + return; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + pe = events + i; + fd = pe->data; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + + assert(fd >= 0); + assert((unsigned) fd < loop->nwatchers); + + w = loop->watchers[fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe); + continue; + } + + /* Give users only events they're interested in. Prevents spurious + * callbacks when previous callback invocation in this loop has stopped + * the current watcher. Also, filters out events that users has not + * requested us to watch. + */ + pe->events &= w->pevents | POLLERR | POLLHUP; + + /* Work around an epoll quirk where it sometimes reports just the + * EPOLLERR or EPOLLHUP event. In order to force the event loop to + * move forward, we merge in the read/write events that the watcher + * is interested in; uv__read() and uv__write() will then deal with + * the error or hangup in the usual fashion. + * + * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user + * reads the available data, calls uv_read_stop(), then sometime later + * calls uv_read_start() again. By then, libuv has forgotten about the + * hangup and the kernel won't report EPOLLIN again because there's + * nothing left to read. If anything, libuv is to blame here. The + * current hack is just a quick bandaid; to properly fix it, libuv + * needs to remember the error/hangup event. We should get that for + * free when we switch over to edge-triggered I/O. + */ + if (pe->events == POLLERR || pe->events == POLLHUP) + pe->events |= w->pevents & (POLLIN | POLLOUT); + + if (pe->events != 0) { + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->events); + + nevents++; + } + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + real_timeout -= (loop->time - base); + if (real_timeout <= 0) + return; + + timeout = real_timeout; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + static clock_t fast_clock_id = -1; + struct timespec t; + clock_t clock_id; + + /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has + * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is + * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may + * decide to make a costly system call. + */ + /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE + * when it has microsecond granularity or better (unlikely). + */ + if (type == UV_CLOCK_FAST && fast_clock_id == -1) { + if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 && + t.tv_nsec <= 1 * 1000 * 1000) { + fast_clock_id = CLOCK_MONOTONIC_COARSE; + } else { + fast_clock_id = CLOCK_MONOTONIC; + } + } + + clock_id = CLOCK_MONOTONIC; + if (type == UV_CLOCK_FAST) + clock_id = fast_clock_id; + + if (clock_gettime(clock_id, &t)) + return 0; /* Not really possible. */ + + return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec; +} + + +void uv_loadavg(double avg[3]) { + struct sysinfo info; + + if (sysinfo(&info) < 0) return; + + avg[0] = (double) info.loads[0] / 65536.0; + avg[1] = (double) info.loads[1] / 65536.0; + avg[2] = (double) info.loads[2] / 65536.0; +} + + +int uv_exepath(char* buffer, size_t* size) { + ssize_t n; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + n = *size - 1; + if (n > 0) + n = readlink("/proc/self/exe", buffer, n); + + if (n == -1) + return -errno; + + buffer[n] = '\0'; + *size = n; + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + struct sysinfo info; + + if (sysinfo(&info) == 0) + return (uint64_t) info.freeram * info.mem_unit; + return 0; +} + + +uint64_t uv_get_total_memory(void) { + struct sysinfo info; + + if (sysinfo(&info) == 0) + return (uint64_t) info.totalram * info.mem_unit; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + char buf[1024]; + const char* s; + ssize_t n; + long val; + int fd; + int i; + + do + fd = open("/proc/self/stat", O_RDONLY); + while (fd == -1 && errno == EINTR); + + if (fd == -1) + return -errno; + + do + n = read(fd, buf, sizeof(buf) - 1); + while (n == -1 && errno == EINTR); + + uv__close(fd); + if (n == -1) + return -errno; + buf[n] = '\0'; + + s = strchr(buf, ' '); + if (s == NULL) + goto err; + + s += 1; + if (*s != '(') + goto err; + + s = strchr(s, ')'); + if (s == NULL) + goto err; + + for (i = 1; i <= 22; i++) { + s = strchr(s + 1, ' '); + if (s == NULL) + goto err; + } + + errno = 0; + val = strtol(s, NULL, 10); + if (errno != 0) + goto err; + if (val < 0) + goto err; + + *rss = val * getpagesize(); + return 0; + +err: + return -EINVAL; +} + + +int uv_uptime(double* uptime) { + static volatile int no_clock_boottime; + struct timespec now; + int r; + + /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available + * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system + * is suspended. + */ + if (no_clock_boottime) { + retry: r = clock_gettime(CLOCK_MONOTONIC, &now); + } + else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) { + no_clock_boottime = 1; + goto retry; + } + + if (r) + return -errno; + + *uptime = now.tv_sec; + return 0; +} + + +static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) { + unsigned int num; + char buf[1024]; + + if (!fgets(buf, sizeof(buf), statfile_fp)) + return -EIO; + + num = 0; + while (fgets(buf, sizeof(buf), statfile_fp)) { + if (strncmp(buf, "cpu", 3)) + break; + num++; + } + + if (num == 0) + return -EIO; + + *numcpus = num; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int numcpus; + uv_cpu_info_t* ci; + int err; + FILE* statfile_fp; + + *cpu_infos = NULL; + *count = 0; + + statfile_fp = uv__open_file("/proc/stat"); + if (statfile_fp == NULL) + return -errno; + + err = uv__cpu_num(statfile_fp, &numcpus); + if (err < 0) + goto out; + + err = -ENOMEM; + ci = uv__calloc(numcpus, sizeof(*ci)); + if (ci == NULL) + goto out; + + err = read_models(numcpus, ci); + if (err == 0) + err = read_times(statfile_fp, numcpus, ci); + + if (err) { + uv_free_cpu_info(ci, numcpus); + goto out; + } + + /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo. + * We don't check for errors here. Worst case, the field is left zero. + */ + if (ci[0].speed == 0) + read_speeds(numcpus, ci); + + *cpu_infos = ci; + *count = numcpus; + err = 0; + +out: + + if (fclose(statfile_fp)) + if (errno != EINTR && errno != EINPROGRESS) + abort(); + + return err; +} + + +static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) { + unsigned int num; + + for (num = 0; num < numcpus; num++) + ci[num].speed = read_cpufreq(num) / 1000; +} + + +/* Also reads the CPU frequency on x86. The other architectures only have + * a BogoMIPS field, which may not be very accurate. + * + * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup. + */ +static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) { + static const char model_marker[] = "model name\t: "; + static const char speed_marker[] = "cpu MHz\t\t: "; + const char* inferred_model; + unsigned int model_idx; + unsigned int speed_idx; + char buf[1024]; + char* model; + FILE* fp; + + /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */ + (void) &model_marker; + (void) &speed_marker; + (void) &speed_idx; + (void) &model; + (void) &buf; + (void) &fp; + + model_idx = 0; + speed_idx = 0; + +#if defined(__arm__) || \ + defined(__i386__) || \ + defined(__mips__) || \ + defined(__x86_64__) + fp = uv__open_file("/proc/cpuinfo"); + if (fp == NULL) + return -errno; + + while (fgets(buf, sizeof(buf), fp)) { + if (model_idx < numcpus) { + if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { + model = buf + sizeof(model_marker) - 1; + model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ + if (model == NULL) { + fclose(fp); + return -ENOMEM; + } + ci[model_idx++].model = model; + continue; + } + } +#if defined(__arm__) || defined(__mips__) + if (model_idx < numcpus) { +#if defined(__arm__) + /* Fallback for pre-3.8 kernels. */ + static const char model_marker[] = "Processor\t: "; +#else /* defined(__mips__) */ + static const char model_marker[] = "cpu model\t\t: "; +#endif + if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { + model = buf + sizeof(model_marker) - 1; + model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ + if (model == NULL) { + fclose(fp); + return -ENOMEM; + } + ci[model_idx++].model = model; + continue; + } + } +#else /* !__arm__ && !__mips__ */ + if (speed_idx < numcpus) { + if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) { + ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1); + continue; + } + } +#endif /* __arm__ || __mips__ */ + } + + fclose(fp); +#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */ + + /* Now we want to make sure that all the models contain *something* because + * it's not safe to leave them as null. Copy the last entry unless there + * isn't one, in that case we simply put "unknown" into everything. + */ + inferred_model = "unknown"; + if (model_idx > 0) + inferred_model = ci[model_idx - 1].model; + + while (model_idx < numcpus) { + model = uv__strndup(inferred_model, strlen(inferred_model)); + if (model == NULL) + return -ENOMEM; + ci[model_idx++].model = model; + } + + return 0; +} + + +static int read_times(FILE* statfile_fp, + unsigned int numcpus, + uv_cpu_info_t* ci) { + unsigned long clock_ticks; + struct uv_cpu_times_s ts; + unsigned long user; + unsigned long nice; + unsigned long sys; + unsigned long idle; + unsigned long dummy; + unsigned long irq; + unsigned int num; + unsigned int len; + char buf[1024]; + + clock_ticks = sysconf(_SC_CLK_TCK); + assert(clock_ticks != (unsigned long) -1); + assert(clock_ticks != 0); + + rewind(statfile_fp); + + if (!fgets(buf, sizeof(buf), statfile_fp)) + abort(); + + num = 0; + + while (fgets(buf, sizeof(buf), statfile_fp)) { + if (num >= numcpus) + break; + + if (strncmp(buf, "cpu", 3)) + break; + + /* skip "cpu<num> " marker */ + { + unsigned int n; + int r = sscanf(buf, "cpu%u ", &n); + assert(r == 1); + (void) r; /* silence build warning */ + for (len = sizeof("cpu0"); n /= 10; len++); + } + + /* Line contains user, nice, system, idle, iowait, irq, softirq, steal, + * guest, guest_nice but we're only interested in the first four + irq. + * + * Don't use %*s to skip fields or %ll to read straight into the uint64_t + * fields, they're not allowed in C89 mode. + */ + if (6 != sscanf(buf + len, + "%lu %lu %lu %lu %lu %lu", + &user, + &nice, + &sys, + &idle, + &dummy, + &irq)) + abort(); + + ts.user = clock_ticks * user; + ts.nice = clock_ticks * nice; + ts.sys = clock_ticks * sys; + ts.idle = clock_ticks * idle; + ts.irq = clock_ticks * irq; + ci[num++].cpu_times = ts; + } + assert(num == numcpus); + + return 0; +} + + +static unsigned long read_cpufreq(unsigned int cpunum) { + unsigned long val; + char buf[1024]; + FILE* fp; + + snprintf(buf, + sizeof(buf), + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq", + cpunum); + + fp = uv__open_file(buf); + if (fp == NULL) + return 0; + + if (fscanf(fp, "%lu", &val) != 1) + val = 0; + + fclose(fp); + + return val; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { +#ifndef HAVE_IFADDRS_H + return -ENOSYS; +#else + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_ll *sll; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + *addresses = NULL; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == PF_PACKET)) { + continue; + } + + (*count)++; + } + + if (*count == 0) + return 0; + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On Linux getifaddrs returns information related to the raw underlying + * devices. We're not interested in this information yet. + */ + if (ent->ifa_addr->sa_family == PF_PACKET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_PACKET)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sll = (struct sockaddr_ll*)ent->ifa_addr; + memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +#endif +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} + + +void uv__set_process_title(const char* title) { +#if defined(PR_SET_NAME) + prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */ +#endif +} diff --git a/Utilities/cmlibuv/src/unix/linux-inotify.c b/Utilities/cmlibuv/src/unix/linux-inotify.c new file mode 100644 index 0000000..4708c05 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/linux-inotify.c @@ -0,0 +1,285 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "tree.h" +#include "internal.h" + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> + +#include <sys/types.h> +#include <unistd.h> + +struct watcher_list { + RB_ENTRY(watcher_list) entry; + QUEUE watchers; + int iterating; + char* path; + int wd; +}; + +struct watcher_root { + struct watcher_list* rbh_root; +}; +#define CAST(p) ((struct watcher_root*)(p)) + + +static int compare_watchers(const struct watcher_list* a, + const struct watcher_list* b) { + if (a->wd < b->wd) return -1; + if (a->wd > b->wd) return 1; + return 0; +} + + +RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) + + +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* w, + unsigned int revents); + + +static int new_inotify_fd(void) { + int err; + int fd; + + fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC); + if (fd != -1) + return fd; + + if (errno != ENOSYS) + return -errno; + + fd = uv__inotify_init(); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err == 0) + err = uv__nonblock(fd, 1); + + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +static int init_inotify(uv_loop_t* loop) { + int err; + + if (loop->inotify_fd != -1) + return 0; + + err = new_inotify_fd(); + if (err < 0) + return err; + + loop->inotify_fd = err; + uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd); + uv__io_start(loop, &loop->inotify_read_watcher, POLLIN); + + return 0; +} + + +static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { + struct watcher_list w; + w.wd = wd; + return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w); +} + +static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { + /* if the watcher_list->watchers is being iterated over, we can't free it. */ + if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { + /* No watchers left for this path. Clean up. */ + RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w); + uv__inotify_rm_watch(loop->inotify_fd, w->wd); + uv__free(w); + } +} + +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* dummy, + unsigned int events) { + const struct uv__inotify_event* e; + struct watcher_list* w; + uv_fs_event_t* h; + QUEUE queue; + QUEUE* q; + const char* path; + ssize_t size; + const char *p; + /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ + char buf[4096]; + + while (1) { + do + size = read(loop->inotify_fd, buf, sizeof(buf)); + while (size == -1 && errno == EINTR); + + if (size == -1) { + assert(errno == EAGAIN || errno == EWOULDBLOCK); + break; + } + + assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ + + /* Now we have one or more inotify_event structs. */ + for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { + e = (const struct uv__inotify_event*)p; + + events = 0; + if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY)) + events |= UV_CHANGE; + if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY)) + events |= UV_RENAME; + + w = find_watcher(loop, e->wd); + if (w == NULL) + continue; /* Stale event, no watchers left. */ + + /* inotify does not return the filename when monitoring a single file + * for modifications. Repurpose the filename for API compatibility. + * I'm not convinced this is a good thing, maybe it should go. + */ + path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); + + /* We're about to iterate over the queue and call user's callbacks. + * What can go wrong? + * A callback could call uv_fs_event_stop() + * and the queue can change under our feet. + * So, we use QUEUE_MOVE() trick to safely iterate over the queue. + * And we don't free the watcher_list until we're done iterating. + * + * First, + * tell uv_fs_event_stop() (that could be called from a user's callback) + * not to free watcher_list. + */ + w->iterating = 1; + QUEUE_MOVE(&w->watchers, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_fs_event_t, watchers); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&w->watchers, q); + + h->cb(h, path, events, 0); + } + /* done iterating, time to (maybe) free empty watcher_list */ + w->iterating = 0; + maybe_free_watcher_list(w, loop); + } + } +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + struct watcher_list* w; + int events; + int err; + int wd; + + if (uv__is_active(handle)) + return -EINVAL; + + err = init_inotify(handle->loop); + if (err) + return err; + + events = UV__IN_ATTRIB + | UV__IN_CREATE + | UV__IN_MODIFY + | UV__IN_DELETE + | UV__IN_DELETE_SELF + | UV__IN_MOVE_SELF + | UV__IN_MOVED_FROM + | UV__IN_MOVED_TO; + + wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events); + if (wd == -1) + return -errno; + + w = find_watcher(handle->loop, wd); + if (w) + goto no_insert; + + w = uv__malloc(sizeof(*w) + strlen(path) + 1); + if (w == NULL) + return -ENOMEM; + + w->wd = wd; + w->path = strcpy((char*)(w + 1), path); + QUEUE_INIT(&w->watchers); + w->iterating = 0; + RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); + +no_insert: + uv__handle_start(handle); + QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); + handle->path = w->path; + handle->cb = cb; + handle->wd = wd; + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + struct watcher_list* w; + + if (!uv__is_active(handle)) + return 0; + + w = find_watcher(handle->loop, handle->wd); + assert(w != NULL); + + handle->wd = -1; + handle->path = NULL; + uv__handle_stop(handle); + QUEUE_REMOVE(&handle->watchers); + + maybe_free_watcher_list(w, handle->loop); + + return 0; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.c b/Utilities/cmlibuv/src/unix/linux-syscalls.c new file mode 100644 index 0000000..89998de --- /dev/null +++ b/Utilities/cmlibuv/src/unix/linux-syscalls.c @@ -0,0 +1,471 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "linux-syscalls.h" +#include <unistd.h> +#include <signal.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <errno.h> + +#if defined(__has_feature) +# if __has_feature(memory_sanitizer) +# define MSAN_ACTIVE 1 +# include <sanitizer/msan_interface.h> +# endif +#endif + +#if defined(__i386__) +# ifndef __NR_socketcall +# define __NR_socketcall 102 +# endif +#endif + +#if defined(__arm__) +# if defined(__thumb__) || defined(__ARM_EABI__) +# define UV_SYSCALL_BASE 0 +# else +# define UV_SYSCALL_BASE 0x900000 +# endif +#endif /* __arm__ */ + +#ifndef __NR_accept4 +# if defined(__x86_64__) +# define __NR_accept4 288 +# elif defined(__i386__) + /* Nothing. Handled through socketcall(). */ +# elif defined(__arm__) +# define __NR_accept4 (UV_SYSCALL_BASE + 366) +# endif +#endif /* __NR_accept4 */ + +#ifndef __NR_eventfd +# if defined(__x86_64__) +# define __NR_eventfd 284 +# elif defined(__i386__) +# define __NR_eventfd 323 +# elif defined(__arm__) +# define __NR_eventfd (UV_SYSCALL_BASE + 351) +# endif +#endif /* __NR_eventfd */ + +#ifndef __NR_eventfd2 +# if defined(__x86_64__) +# define __NR_eventfd2 290 +# elif defined(__i386__) +# define __NR_eventfd2 328 +# elif defined(__arm__) +# define __NR_eventfd2 (UV_SYSCALL_BASE + 356) +# endif +#endif /* __NR_eventfd2 */ + +#ifndef __NR_epoll_create +# if defined(__x86_64__) +# define __NR_epoll_create 213 +# elif defined(__i386__) +# define __NR_epoll_create 254 +# elif defined(__arm__) +# define __NR_epoll_create (UV_SYSCALL_BASE + 250) +# endif +#endif /* __NR_epoll_create */ + +#ifndef __NR_epoll_create1 +# if defined(__x86_64__) +# define __NR_epoll_create1 291 +# elif defined(__i386__) +# define __NR_epoll_create1 329 +# elif defined(__arm__) +# define __NR_epoll_create1 (UV_SYSCALL_BASE + 357) +# endif +#endif /* __NR_epoll_create1 */ + +#ifndef __NR_epoll_ctl +# if defined(__x86_64__) +# define __NR_epoll_ctl 233 /* used to be 214 */ +# elif defined(__i386__) +# define __NR_epoll_ctl 255 +# elif defined(__arm__) +# define __NR_epoll_ctl (UV_SYSCALL_BASE + 251) +# endif +#endif /* __NR_epoll_ctl */ + +#ifndef __NR_epoll_wait +# if defined(__x86_64__) +# define __NR_epoll_wait 232 /* used to be 215 */ +# elif defined(__i386__) +# define __NR_epoll_wait 256 +# elif defined(__arm__) +# define __NR_epoll_wait (UV_SYSCALL_BASE + 252) +# endif +#endif /* __NR_epoll_wait */ + +#ifndef __NR_epoll_pwait +# if defined(__x86_64__) +# define __NR_epoll_pwait 281 +# elif defined(__i386__) +# define __NR_epoll_pwait 319 +# elif defined(__arm__) +# define __NR_epoll_pwait (UV_SYSCALL_BASE + 346) +# endif +#endif /* __NR_epoll_pwait */ + +#ifndef __NR_inotify_init +# if defined(__x86_64__) +# define __NR_inotify_init 253 +# elif defined(__i386__) +# define __NR_inotify_init 291 +# elif defined(__arm__) +# define __NR_inotify_init (UV_SYSCALL_BASE + 316) +# endif +#endif /* __NR_inotify_init */ + +#ifndef __NR_inotify_init1 +# if defined(__x86_64__) +# define __NR_inotify_init1 294 +# elif defined(__i386__) +# define __NR_inotify_init1 332 +# elif defined(__arm__) +# define __NR_inotify_init1 (UV_SYSCALL_BASE + 360) +# endif +#endif /* __NR_inotify_init1 */ + +#ifndef __NR_inotify_add_watch +# if defined(__x86_64__) +# define __NR_inotify_add_watch 254 +# elif defined(__i386__) +# define __NR_inotify_add_watch 292 +# elif defined(__arm__) +# define __NR_inotify_add_watch (UV_SYSCALL_BASE + 317) +# endif +#endif /* __NR_inotify_add_watch */ + +#ifndef __NR_inotify_rm_watch +# if defined(__x86_64__) +# define __NR_inotify_rm_watch 255 +# elif defined(__i386__) +# define __NR_inotify_rm_watch 293 +# elif defined(__arm__) +# define __NR_inotify_rm_watch (UV_SYSCALL_BASE + 318) +# endif +#endif /* __NR_inotify_rm_watch */ + +#ifndef __NR_pipe2 +# if defined(__x86_64__) +# define __NR_pipe2 293 +# elif defined(__i386__) +# define __NR_pipe2 331 +# elif defined(__arm__) +# define __NR_pipe2 (UV_SYSCALL_BASE + 359) +# endif +#endif /* __NR_pipe2 */ + +#ifndef __NR_recvmmsg +# if defined(__x86_64__) +# define __NR_recvmmsg 299 +# elif defined(__i386__) +# define __NR_recvmmsg 337 +# elif defined(__arm__) +# define __NR_recvmmsg (UV_SYSCALL_BASE + 365) +# endif +#endif /* __NR_recvmsg */ + +#ifndef __NR_sendmmsg +# if defined(__x86_64__) +# define __NR_sendmmsg 307 +# elif defined(__i386__) +# define __NR_sendmmsg 345 +# elif defined(__arm__) +# define __NR_sendmmsg (UV_SYSCALL_BASE + 374) +# endif +#endif /* __NR_sendmmsg */ + +#ifndef __NR_utimensat +# if defined(__x86_64__) +# define __NR_utimensat 280 +# elif defined(__i386__) +# define __NR_utimensat 320 +# elif defined(__arm__) +# define __NR_utimensat (UV_SYSCALL_BASE + 348) +# endif +#endif /* __NR_utimensat */ + +#ifndef __NR_preadv +# if defined(__x86_64__) +# define __NR_preadv 295 +# elif defined(__i386__) +# define __NR_preadv 333 +# elif defined(__arm__) +# define __NR_preadv (UV_SYSCALL_BASE + 361) +# endif +#endif /* __NR_preadv */ + +#ifndef __NR_pwritev +# if defined(__x86_64__) +# define __NR_pwritev 296 +# elif defined(__i386__) +# define __NR_pwritev 334 +# elif defined(__arm__) +# define __NR_pwritev (UV_SYSCALL_BASE + 362) +# endif +#endif /* __NR_pwritev */ + +#ifndef __NR_dup3 +# if defined(__x86_64__) +# define __NR_dup3 292 +# elif defined(__i386__) +# define __NR_dup3 330 +# elif defined(__arm__) +# define __NR_dup3 (UV_SYSCALL_BASE + 358) +# endif +#endif /* __NR_pwritev */ + + +int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) { +#if defined(__i386__) + unsigned long args[4]; + int r; + + args[0] = (unsigned long) fd; + args[1] = (unsigned long) addr; + args[2] = (unsigned long) addrlen; + args[3] = (unsigned long) flags; + + r = syscall(__NR_socketcall, 18 /* SYS_ACCEPT4 */, args); + + /* socketcall() raises EINVAL when SYS_ACCEPT4 is not supported but so does + * a bad flags argument. Try to distinguish between the two cases. + */ + if (r == -1) + if (errno == EINVAL) + if ((flags & ~(UV__SOCK_CLOEXEC|UV__SOCK_NONBLOCK)) == 0) + errno = ENOSYS; + + return r; +#elif defined(__NR_accept4) + return syscall(__NR_accept4, fd, addr, addrlen, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__eventfd(unsigned int count) { +#if defined(__NR_eventfd) + return syscall(__NR_eventfd, count); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__eventfd2(unsigned int count, int flags) { +#if defined(__NR_eventfd2) + return syscall(__NR_eventfd2, count, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_create(int size) { +#if defined(__NR_epoll_create) + return syscall(__NR_epoll_create, size); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_create1(int flags) { +#if defined(__NR_epoll_create1) + return syscall(__NR_epoll_create1, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) { +#if defined(__NR_epoll_ctl) + return syscall(__NR_epoll_ctl, epfd, op, fd, events); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_wait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout) { +#if defined(__NR_epoll_wait) + int result; + result = syscall(__NR_epoll_wait, epfd, events, nevents, timeout); +#if MSAN_ACTIVE + if (result > 0) + __msan_unpoison(events, sizeof(events[0]) * result); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_pwait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout, + uint64_t sigmask) { +#if defined(__NR_epoll_pwait) + int result; + result = syscall(__NR_epoll_pwait, + epfd, + events, + nevents, + timeout, + &sigmask, + sizeof(sigmask)); +#if MSAN_ACTIVE + if (result > 0) + __msan_unpoison(events, sizeof(events[0]) * result); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_init(void) { +#if defined(__NR_inotify_init) + return syscall(__NR_inotify_init); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_init1(int flags) { +#if defined(__NR_inotify_init1) + return syscall(__NR_inotify_init1, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_add_watch(int fd, const char* path, uint32_t mask) { +#if defined(__NR_inotify_add_watch) + return syscall(__NR_inotify_add_watch, fd, path, mask); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_rm_watch(int fd, int32_t wd) { +#if defined(__NR_inotify_rm_watch) + return syscall(__NR_inotify_rm_watch, fd, wd); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__pipe2(int pipefd[2], int flags) { +#if defined(__NR_pipe2) + int result; + result = syscall(__NR_pipe2, pipefd, flags); +#if MSAN_ACTIVE + if (!result) + __msan_unpoison(pipefd, sizeof(int[2])); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__sendmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags) { +#if defined(__NR_sendmmsg) + return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__recvmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags, + struct timespec* timeout) { +#if defined(__NR_recvmmsg) + return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__utimesat(int dirfd, + const char* path, + const struct timespec times[2], + int flags) +{ +#if defined(__NR_utimensat) + return syscall(__NR_utimensat, dirfd, path, times, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { +#if defined(__NR_preadv) + return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); +#else + return errno = ENOSYS, -1; +#endif +} + + +ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { +#if defined(__NR_pwritev) + return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__dup3(int oldfd, int newfd, int flags) { +#if defined(__NR_dup3) + return syscall(__NR_dup3, oldfd, newfd, flags); +#else + return errno = ENOSYS, -1; +#endif +} diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.h b/Utilities/cmlibuv/src/unix/linux-syscalls.h new file mode 100644 index 0000000..4c095e9 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/linux-syscalls.h @@ -0,0 +1,151 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_LINUX_SYSCALL_H_ +#define UV_LINUX_SYSCALL_H_ + +#undef _GNU_SOURCE +#define _GNU_SOURCE + +#include <stdint.h> +#include <signal.h> +#include <sys/types.h> +#include <sys/time.h> +#include <sys/socket.h> + +#if defined(__alpha__) +# define UV__O_CLOEXEC 0x200000 +#elif defined(__hppa__) +# define UV__O_CLOEXEC 0x200000 +#elif defined(__sparc__) +# define UV__O_CLOEXEC 0x400000 +#else +# define UV__O_CLOEXEC 0x80000 +#endif + +#if defined(__alpha__) +# define UV__O_NONBLOCK 0x4 +#elif defined(__hppa__) +# define UV__O_NONBLOCK O_NONBLOCK +#elif defined(__mips__) +# define UV__O_NONBLOCK 0x80 +#elif defined(__sparc__) +# define UV__O_NONBLOCK 0x4000 +#else +# define UV__O_NONBLOCK 0x800 +#endif + +#define UV__EFD_CLOEXEC UV__O_CLOEXEC +#define UV__EFD_NONBLOCK UV__O_NONBLOCK + +#define UV__IN_CLOEXEC UV__O_CLOEXEC +#define UV__IN_NONBLOCK UV__O_NONBLOCK + +#define UV__SOCK_CLOEXEC UV__O_CLOEXEC +#if defined(SOCK_NONBLOCK) +# define UV__SOCK_NONBLOCK SOCK_NONBLOCK +#else +# define UV__SOCK_NONBLOCK UV__O_NONBLOCK +#endif + +/* epoll flags */ +#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC +#define UV__EPOLL_CTL_ADD 1 +#define UV__EPOLL_CTL_DEL 2 +#define UV__EPOLL_CTL_MOD 3 + +/* inotify flags */ +#define UV__IN_ACCESS 0x001 +#define UV__IN_MODIFY 0x002 +#define UV__IN_ATTRIB 0x004 +#define UV__IN_CLOSE_WRITE 0x008 +#define UV__IN_CLOSE_NOWRITE 0x010 +#define UV__IN_OPEN 0x020 +#define UV__IN_MOVED_FROM 0x040 +#define UV__IN_MOVED_TO 0x080 +#define UV__IN_CREATE 0x100 +#define UV__IN_DELETE 0x200 +#define UV__IN_DELETE_SELF 0x400 +#define UV__IN_MOVE_SELF 0x800 + +#if defined(__x86_64__) +struct uv__epoll_event { + uint32_t events; + uint64_t data; +} __attribute__((packed)); +#else +struct uv__epoll_event { + uint32_t events; + uint64_t data; +}; +#endif + +struct uv__inotify_event { + int32_t wd; + uint32_t mask; + uint32_t cookie; + uint32_t len; + /* char name[0]; */ +}; + +struct uv__mmsghdr { + struct msghdr msg_hdr; + unsigned int msg_len; +}; + +int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags); +int uv__eventfd(unsigned int count); +int uv__epoll_create(int size); +int uv__epoll_create1(int flags); +int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev); +int uv__epoll_wait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout); +int uv__epoll_pwait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout, + uint64_t sigmask); +int uv__eventfd2(unsigned int count, int flags); +int uv__inotify_init(void); +int uv__inotify_init1(int flags); +int uv__inotify_add_watch(int fd, const char* path, uint32_t mask); +int uv__inotify_rm_watch(int fd, int32_t wd); +int uv__pipe2(int pipefd[2], int flags); +int uv__recvmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags, + struct timespec* timeout); +int uv__sendmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags); +int uv__utimesat(int dirfd, + const char* path, + const struct timespec times[2], + int flags); +ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset); +ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset); +int uv__dup3(int oldfd, int newfd, int flags); + +#endif /* UV_LINUX_SYSCALL_H_ */ diff --git a/Utilities/cmlibuv/src/unix/loop-watcher.c b/Utilities/cmlibuv/src/unix/loop-watcher.c new file mode 100644 index 0000000..340bb0d --- /dev/null +++ b/Utilities/cmlibuv/src/unix/loop-watcher.c @@ -0,0 +1,68 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#define UV_LOOP_WATCHER_DEFINE(name, type) \ + int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \ + uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \ + handle->name##_cb = NULL; \ + return 0; \ + } \ + \ + int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \ + if (uv__is_active(handle)) return 0; \ + if (cb == NULL) return -EINVAL; \ + QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \ + handle->name##_cb = cb; \ + uv__handle_start(handle); \ + return 0; \ + } \ + \ + int uv_##name##_stop(uv_##name##_t* handle) { \ + if (!uv__is_active(handle)) return 0; \ + QUEUE_REMOVE(&handle->queue); \ + uv__handle_stop(handle); \ + return 0; \ + } \ + \ + void uv__run_##name(uv_loop_t* loop) { \ + uv_##name##_t* h; \ + QUEUE queue; \ + QUEUE* q; \ + QUEUE_MOVE(&loop->name##_handles, &queue); \ + while (!QUEUE_EMPTY(&queue)) { \ + q = QUEUE_HEAD(&queue); \ + h = QUEUE_DATA(q, uv_##name##_t, queue); \ + QUEUE_REMOVE(q); \ + QUEUE_INSERT_TAIL(&loop->name##_handles, q); \ + h->name##_cb(h); \ + } \ + } \ + \ + void uv__##name##_close(uv_##name##_t* handle) { \ + uv_##name##_stop(handle); \ + } + +UV_LOOP_WATCHER_DEFINE(prepare, PREPARE) +UV_LOOP_WATCHER_DEFINE(check, CHECK) +UV_LOOP_WATCHER_DEFINE(idle, IDLE) diff --git a/Utilities/cmlibuv/src/unix/loop.c b/Utilities/cmlibuv/src/unix/loop.c new file mode 100644 index 0000000..bd63c2f --- /dev/null +++ b/Utilities/cmlibuv/src/unix/loop.c @@ -0,0 +1,159 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "tree.h" +#include "internal.h" +#include "heap-inl.h" +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +int uv_loop_init(uv_loop_t* loop) { + void* saved_data; + int err; + + uv__signal_global_once_init(); + + saved_data = loop->data; + memset(loop, 0, sizeof(*loop)); + loop->data = saved_data; + + heap_init((struct heap*) &loop->timer_heap); + QUEUE_INIT(&loop->wq); + QUEUE_INIT(&loop->active_reqs); + QUEUE_INIT(&loop->idle_handles); + QUEUE_INIT(&loop->async_handles); + QUEUE_INIT(&loop->check_handles); + QUEUE_INIT(&loop->prepare_handles); + QUEUE_INIT(&loop->handle_queue); + + loop->nfds = 0; + loop->watchers = NULL; + loop->nwatchers = 0; + QUEUE_INIT(&loop->pending_queue); + QUEUE_INIT(&loop->watcher_queue); + + loop->closing_handles = NULL; + uv__update_time(loop); + uv__async_init(&loop->async_watcher); + loop->signal_pipefd[0] = -1; + loop->signal_pipefd[1] = -1; + loop->backend_fd = -1; + loop->emfile_fd = -1; + + loop->timer_counter = 0; + loop->stop_flag = 0; + + err = uv__platform_loop_init(loop); + if (err) + return err; + + err = uv_signal_init(loop, &loop->child_watcher); + if (err) + goto fail_signal_init; + + uv__handle_unref(&loop->child_watcher); + loop->child_watcher.flags |= UV__HANDLE_INTERNAL; + QUEUE_INIT(&loop->process_handles); + + err = uv_rwlock_init(&loop->cloexec_lock); + if (err) + goto fail_rwlock_init; + + err = uv_mutex_init(&loop->wq_mutex); + if (err) + goto fail_mutex_init; + + err = uv_async_init(loop, &loop->wq_async, uv__work_done); + if (err) + goto fail_async_init; + + uv__handle_unref(&loop->wq_async); + loop->wq_async.flags |= UV__HANDLE_INTERNAL; + + return 0; + +fail_async_init: + uv_mutex_destroy(&loop->wq_mutex); + +fail_mutex_init: + uv_rwlock_destroy(&loop->cloexec_lock); + +fail_rwlock_init: + uv__signal_loop_cleanup(loop); + +fail_signal_init: + uv__platform_loop_delete(loop); + + return err; +} + + +void uv__loop_close(uv_loop_t* loop) { + uv__signal_loop_cleanup(loop); + uv__platform_loop_delete(loop); + uv__async_stop(loop, &loop->async_watcher); + + if (loop->emfile_fd != -1) { + uv__close(loop->emfile_fd); + loop->emfile_fd = -1; + } + + if (loop->backend_fd != -1) { + uv__close(loop->backend_fd); + loop->backend_fd = -1; + } + + uv_mutex_lock(&loop->wq_mutex); + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); + assert(!uv__has_active_reqs(loop)); + uv_mutex_unlock(&loop->wq_mutex); + uv_mutex_destroy(&loop->wq_mutex); + + /* + * Note that all thread pool stuff is finished at this point and + * it is safe to just destroy rw lock + */ + uv_rwlock_destroy(&loop->cloexec_lock); + +#if 0 + assert(QUEUE_EMPTY(&loop->pending_queue)); + assert(QUEUE_EMPTY(&loop->watcher_queue)); + assert(loop->nfds == 0); +#endif + + uv__free(loop->watchers); + loop->watchers = NULL; + loop->nwatchers = 0; +} + + +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { + if (option != UV_LOOP_BLOCK_SIGNAL) + return UV_ENOSYS; + + if (va_arg(ap, int) != SIGPROF) + return UV_EINVAL; + + loop->flags |= UV_LOOP_BLOCK_SIGPROF; + return 0; +} diff --git a/Utilities/cmlibuv/src/unix/netbsd.c b/Utilities/cmlibuv/src/unix/netbsd.c new file mode 100644 index 0000000..4a9e6cb --- /dev/null +++ b/Utilities/cmlibuv/src/unix/netbsd.c @@ -0,0 +1,380 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <string.h> +#include <errno.h> + +#include <kvm.h> +#include <paths.h> +#include <ifaddrs.h> +#include <unistd.h> +#include <time.h> +#include <stdlib.h> +#include <fcntl.h> + +#include <net/if.h> +#include <net/if_dl.h> +#include <sys/resource.h> +#include <sys/types.h> +#include <sys/sysctl.h> +#include <uvm/uvm_extern.h> + +#include <unistd.h> +#include <time.h> + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) == -1) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_exepath(char* buffer, size_t* size) { + int mib[4]; + size_t cb; + pid_t mypid; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mypid = getpid(); + mib[0] = CTL_KERN; + mib[1] = KERN_PROC_ARGS; + mib[2] = mypid; + mib[3] = KERN_PROC_ARGV; + + cb = *size; + if (sysctl(mib, 4, buffer, &cb, NULL, 0)) + return -errno; + *size = strlen(buffer); + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + struct uvmexp info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_UVMEXP}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info.free * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { +#if defined(HW_PHYSMEM64) + uint64_t info; + int which[] = {CTL_HW, HW_PHYSMEM64}; +#else + unsigned int info; + int which[] = {CTL_HW, HW_PHYSMEM}; +#endif + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + if (process_title) uv__free(process_title); + + process_title = uv__strdup(title); + setproctitle("%s", title); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + kvm_t *kd = NULL; + struct kinfo_proc2 *kinfo = NULL; + pid_t pid; + int nprocs; + int max_size = sizeof(struct kinfo_proc2); + int page_size; + + page_size = getpagesize(); + pid = getpid(); + + kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open"); + + if (kd == NULL) goto error; + + kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs); + if (kinfo == NULL) goto error; + + *rss = kinfo->p_vm_rssize * page_size; + + kvm_close(kd); + + return 0; + +error: + if (kd) kvm_close(kd); + return -EPERM; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + + *uptime = (double)(now - info.tv_sec); + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK); + unsigned int multiplier = ((uint64_t)1000L / ticks); + unsigned int cur = 0; + uv_cpu_info_t* cpu_info; + u_int64_t* cp_times; + char model[512]; + u_int64_t cpuspeed; + int numcpus; + size_t size; + int i; + + size = sizeof(model); + if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) && + sysctlbyname("hw.model", &model, &size, NULL, 0)) { + return -errno; + } + + size = sizeof(numcpus); + if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0)) + return -errno; + *count = numcpus; + + /* Only i386 and amd64 have machdep.tsc_freq */ + size = sizeof(cpuspeed); + if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0)) + cpuspeed = 0; + + size = numcpus * CPUSTATES * sizeof(*cp_times); + cp_times = uv__malloc(size); + if (cp_times == NULL) + return -ENOMEM; + + if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + uv__free(cp_times); + uv__free(*cpu_infos); + return -ENOMEM; + } + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier; + cpu_info->model = uv__strdup(model); + cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6); + cur += CPUSTATES; + } + uv__free(cp_times); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_INET)) { + continue; + } + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + if (ent->ifa_addr->sa_family != PF_INET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/Utilities/cmlibuv/src/unix/openbsd.c b/Utilities/cmlibuv/src/unix/openbsd.c new file mode 100644 index 0000000..909288c --- /dev/null +++ b/Utilities/cmlibuv/src/unix/openbsd.c @@ -0,0 +1,396 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <sys/types.h> +#include <sys/param.h> +#include <sys/resource.h> +#include <sys/sched.h> +#include <sys/time.h> +#include <sys/sysctl.h> + +#include <ifaddrs.h> +#include <net/if.h> +#include <net/if_dl.h> + +#include <errno.h> +#include <fcntl.h> +#include <kvm.h> +#include <paths.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_exepath(char* buffer, size_t* size) { + int mib[4]; + char **argsbuf = NULL; + char **argsbuf_tmp; + size_t argsbuf_size = 100U; + size_t exepath_size; + pid_t mypid; + int err; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mypid = getpid(); + for (;;) { + err = -ENOMEM; + argsbuf_tmp = uv__realloc(argsbuf, argsbuf_size); + if (argsbuf_tmp == NULL) + goto out; + argsbuf = argsbuf_tmp; + mib[0] = CTL_KERN; + mib[1] = KERN_PROC_ARGS; + mib[2] = mypid; + mib[3] = KERN_PROC_ARGV; + if (sysctl(mib, 4, argsbuf, &argsbuf_size, NULL, 0) == 0) { + break; + } + if (errno != ENOMEM) { + err = -errno; + goto out; + } + argsbuf_size *= 2U; + } + + if (argsbuf[0] == NULL) { + err = -EINVAL; /* FIXME(bnoordhuis) More appropriate error. */ + goto out; + } + + *size -= 1; + exepath_size = strlen(argsbuf[0]); + if (*size > exepath_size) + *size = exepath_size; + + memcpy(buffer, argsbuf[0], *size); + buffer[*size] = '\0'; + err = 0; + +out: + uv__free(argsbuf); + + return err; +} + + +uint64_t uv_get_free_memory(void) { + struct uvmexp info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_UVMEXP}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info.free * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { + uint64_t info; + int which[] = {CTL_HW, HW_PHYSMEM64}; + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + uv__free(process_title); + process_title = uv__strdup(title); + setproctitle(title); + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + struct kinfo_proc kinfo; + size_t page_size = getpagesize(); + size_t size = sizeof(struct kinfo_proc); + int mib[6]; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + mib[4] = sizeof(struct kinfo_proc); + mib[5] = 1; + + if (sysctl(mib, 6, &kinfo, &size, NULL, 0) < 0) + return -errno; + + *rss = kinfo.p_vm_rssize * page_size; + return 0; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + + *uptime = (double)(now - info.tv_sec); + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks), cpuspeed; + uint64_t info[CPUSTATES]; + char model[512]; + int numcpus = 1; + int which[] = {CTL_HW,HW_MODEL,0}; + size_t size; + int i; + uv_cpu_info_t* cpu_info; + + size = sizeof(model); + if (sysctl(which, 2, &model, &size, NULL, 0)) + return -errno; + + which[1] = HW_NCPU; + size = sizeof(numcpus); + if (sysctl(which, 2, &numcpus, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) + return -ENOMEM; + + *count = numcpus; + + which[1] = HW_CPUSPEED; + size = sizeof(cpuspeed); + if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + size = sizeof(info); + which[0] = CTL_KERN; + which[1] = KERN_CPTIME2; + for (i = 0; i < numcpus; i++) { + which[2] = i; + size = sizeof(info); + if (sysctl(which, 3, &info, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed; + } + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs) != 0) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_INET)) { + continue; + } + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + if (ent->ifa_addr->sa_family != PF_INET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/Utilities/cmlibuv/src/unix/os390.c b/Utilities/cmlibuv/src/unix/os390.c new file mode 100644 index 0000000..bcdbc4b --- /dev/null +++ b/Utilities/cmlibuv/src/unix/os390.c @@ -0,0 +1,42 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "internal.h" + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct pollfd p[1]; + int rv; + + p[0].fd = fd; + p[0].events = POLLIN; + + do + rv = poll(p, 1, 0); + while (rv == -1 && errno == EINTR); + + if (rv == -1) + abort(); + + if (p[0].revents & POLLNVAL) + return -1; + + return 0; +} diff --git a/Utilities/cmlibuv/src/unix/pipe.c b/Utilities/cmlibuv/src/unix/pipe.c new file mode 100644 index 0000000..b73994c --- /dev/null +++ b/Utilities/cmlibuv/src/unix/pipe.c @@ -0,0 +1,298 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <errno.h> +#include <string.h> +#include <sys/un.h> +#include <unistd.h> +#include <stdlib.h> + + +int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { + uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE); + handle->shutdown_req = NULL; + handle->connect_req = NULL; + handle->pipe_fname = NULL; + handle->ipc = ipc; + return 0; +} + + +int uv_pipe_bind(uv_pipe_t* handle, const char* name) { + struct sockaddr_un saddr; + const char* pipe_fname; + int sockfd; + int err; + + pipe_fname = NULL; + sockfd = -1; + + /* Already bound? */ + if (uv__stream_fd(handle) >= 0) + return -EINVAL; + + /* Make a copy of the file name, it outlives this function's scope. */ + pipe_fname = uv__strdup(name); + if (pipe_fname == NULL) + return -ENOMEM; + + /* We've got a copy, don't touch the original any more. */ + name = NULL; + + err = uv__socket(AF_UNIX, SOCK_STREAM, 0); + if (err < 0) + goto err_socket; + sockfd = err; + + memset(&saddr, 0, sizeof saddr); + strncpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path) - 1); + saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0'; + saddr.sun_family = AF_UNIX; + + if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) { + err = -errno; + /* Convert ENOENT to EACCES for compatibility with Windows. */ + if (err == -ENOENT) + err = -EACCES; + goto err_bind; + } + + /* Success. */ + handle->flags |= UV_HANDLE_BOUND; + handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */ + handle->io_watcher.fd = sockfd; + return 0; + +err_bind: + uv__close(sockfd); + +err_socket: + uv__free((void*)pipe_fname); + return err; +} + + +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { + if (uv__stream_fd(handle) == -1) + return -EINVAL; + +#if defined(__MVS__) + /* On zOS, backlog=0 has undefined behaviour */ + if (backlog == 0) + backlog = 1; + else if (backlog < 0) + backlog = SOMAXCONN; +#endif + + if (listen(uv__stream_fd(handle), backlog)) + return -errno; + + handle->connection_cb = cb; + handle->io_watcher.cb = uv__server_io; + uv__io_start(handle->loop, &handle->io_watcher, POLLIN); + return 0; +} + + +void uv__pipe_close(uv_pipe_t* handle) { + if (handle->pipe_fname) { + /* + * Unlink the file system entity before closing the file descriptor. + * Doing it the other way around introduces a race where our process + * unlinks a socket with the same name that's just been created by + * another thread or process. + */ + unlink(handle->pipe_fname); + uv__free((void*)handle->pipe_fname); + handle->pipe_fname = NULL; + } + + uv__stream_close((uv_stream_t*)handle); +} + + +int uv_pipe_open(uv_pipe_t* handle, uv_file fd) { + int err; + + err = uv__nonblock(fd, 1); + if (err) + return err; + +#if defined(__APPLE__) + err = uv__stream_try_select((uv_stream_t*) handle, &fd); + if (err) + return err; +#endif /* defined(__APPLE__) */ + + return uv__stream_open((uv_stream_t*)handle, + fd, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); +} + + +void uv_pipe_connect(uv_connect_t* req, + uv_pipe_t* handle, + const char* name, + uv_connect_cb cb) { + struct sockaddr_un saddr; + int new_sock; + int err; + int r; + + new_sock = (uv__stream_fd(handle) == -1); + + if (new_sock) { + err = uv__socket(AF_UNIX, SOCK_STREAM, 0); + if (err < 0) + goto out; + handle->io_watcher.fd = err; + } + + memset(&saddr, 0, sizeof saddr); + strncpy(saddr.sun_path, name, sizeof(saddr.sun_path) - 1); + saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0'; + saddr.sun_family = AF_UNIX; + + do { + r = connect(uv__stream_fd(handle), + (struct sockaddr*)&saddr, sizeof saddr); + } + while (r == -1 && errno == EINTR); + + if (r == -1 && errno != EINPROGRESS) { + err = -errno; + goto out; + } + + err = 0; + if (new_sock) { + err = uv__stream_open((uv_stream_t*)handle, + uv__stream_fd(handle), + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + } + + if (err == 0) + uv__io_start(handle->loop, &handle->io_watcher, POLLIN | POLLOUT); + +out: + handle->delayed_error = err; + handle->connect_req = req; + + uv__req_init(handle->loop, req, UV_CONNECT); + req->handle = (uv_stream_t*)handle; + req->cb = cb; + QUEUE_INIT(&req->queue); + + /* Force callback to run on next tick in case of error. */ + if (err) + uv__io_feed(handle->loop, &handle->io_watcher); + +} + + +typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); + + +static int uv__pipe_getsockpeername(const uv_pipe_t* handle, + uv__peersockfunc func, + char* buffer, + size_t* size) { + struct sockaddr_un sa; + socklen_t addrlen; + int err; + + addrlen = sizeof(sa); + memset(&sa, 0, addrlen); + err = func(uv__stream_fd(handle), (struct sockaddr*) &sa, &addrlen); + if (err < 0) { + *size = 0; + return -errno; + } + +#if defined(__linux__) + if (sa.sun_path[0] == 0) + /* Linux abstract namespace */ + addrlen -= offsetof(struct sockaddr_un, sun_path); + else +#endif + addrlen = strlen(sa.sun_path); + + + if (addrlen >= *size) { + *size = addrlen + 1; + return UV_ENOBUFS; + } + + memcpy(buffer, sa.sun_path, addrlen); + *size = addrlen; + + /* only null-terminate if it's not an abstract socket */ + if (buffer[0] != '\0') + buffer[addrlen] = '\0'; + + return 0; +} + + +int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) { + return uv__pipe_getsockpeername(handle, getsockname, buffer, size); +} + + +int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) { + return uv__pipe_getsockpeername(handle, getpeername, buffer, size); +} + + +void uv_pipe_pending_instances(uv_pipe_t* handle, int count) { +} + + +int uv_pipe_pending_count(uv_pipe_t* handle) { + uv__stream_queued_fds_t* queued_fds; + + if (!handle->ipc) + return 0; + + if (handle->accepted_fd == -1) + return 0; + + if (handle->queued_fds == NULL) + return 1; + + queued_fds = handle->queued_fds; + return queued_fds->offset + 1; +} + + +uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) { + if (!handle->ipc) + return UV_UNKNOWN_HANDLE; + + if (handle->accepted_fd == -1) + return UV_UNKNOWN_HANDLE; + else + return uv__handle_type(handle->accepted_fd); +} diff --git a/Utilities/cmlibuv/src/unix/poll.c b/Utilities/cmlibuv/src/unix/poll.c new file mode 100644 index 0000000..4c0d478 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/poll.c @@ -0,0 +1,130 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <unistd.h> +#include <assert.h> +#include <errno.h> + + +static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_poll_t* handle; + int pevents; + + handle = container_of(w, uv_poll_t, io_watcher); + + if (events & POLLERR) { + uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP); + uv__handle_stop(handle); + handle->poll_cb(handle, -EBADF, 0); + return; + } + + pevents = 0; + if (events & POLLIN) + pevents |= UV_READABLE; + if (events & POLLOUT) + pevents |= UV_WRITABLE; + if (events & UV__POLLRDHUP) + pevents |= UV_DISCONNECT; + + handle->poll_cb(handle, 0, pevents); +} + + +int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) { + int err; + + err = uv__io_check_fd(loop, fd); + if (err) + return err; + + /* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL). + * Workaround for e.g. kqueue fds not supporting ioctls. + */ + err = uv__nonblock(fd, 1); + if (err == -ENOTTY) + if (uv__nonblock == uv__nonblock_ioctl) + err = uv__nonblock_fcntl(fd, 1); + + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL); + uv__io_init(&handle->io_watcher, uv__poll_io, fd); + handle->poll_cb = NULL; + return 0; +} + + +int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, + uv_os_sock_t socket) { + return uv_poll_init(loop, handle, socket); +} + + +static void uv__poll_stop(uv_poll_t* handle) { + uv__io_stop(handle->loop, + &handle->io_watcher, + POLLIN | POLLOUT | UV__POLLRDHUP); + uv__handle_stop(handle); +} + + +int uv_poll_stop(uv_poll_t* handle) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + uv__poll_stop(handle); + return 0; +} + + +int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) { + int events; + + assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0); + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + uv__poll_stop(handle); + + if (pevents == 0) + return 0; + + events = 0; + if (pevents & UV_READABLE) + events |= POLLIN; + if (pevents & UV_WRITABLE) + events |= POLLOUT; + if (pevents & UV_DISCONNECT) + events |= UV__POLLRDHUP; + + uv__io_start(handle->loop, &handle->io_watcher, events); + uv__handle_start(handle); + handle->poll_cb = poll_cb; + + return 0; +} + + +void uv__poll_close(uv_poll_t* handle) { + uv__poll_stop(handle); +} diff --git a/Utilities/cmlibuv/src/unix/process.c b/Utilities/cmlibuv/src/unix/process.c new file mode 100644 index 0000000..45f5b45 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/process.c @@ -0,0 +1,563 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <errno.h> + +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> +#include <fcntl.h> +#include <poll.h> + +#if defined(__APPLE__) && !TARGET_OS_IPHONE +# include <crt_externs.h> +# define environ (*_NSGetEnviron()) +#else +extern char **environ; +#endif + +#if defined(__linux__) || defined(__GLIBC__) +# include <grp.h> +#endif + + +static void uv__chld(uv_signal_t* handle, int signum) { + uv_process_t* process; + uv_loop_t* loop; + int exit_status; + int term_signal; + int status; + pid_t pid; + QUEUE pending; + QUEUE* q; + QUEUE* h; + + assert(signum == SIGCHLD); + + QUEUE_INIT(&pending); + loop = handle->loop; + + h = &loop->process_handles; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + do + pid = waitpid(process->pid, &status, WNOHANG); + while (pid == -1 && errno == EINTR); + + if (pid == 0) + continue; + + if (pid == -1) { + if (errno != ECHILD) + abort(); + continue; + } + + process->status = status; + QUEUE_REMOVE(&process->queue); + QUEUE_INSERT_TAIL(&pending, &process->queue); + } + + h = &pending; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + QUEUE_REMOVE(&process->queue); + QUEUE_INIT(&process->queue); + uv__handle_stop(process); + + if (process->exit_cb == NULL) + continue; + + exit_status = 0; + if (WIFEXITED(process->status)) + exit_status = WEXITSTATUS(process->status); + + term_signal = 0; + if (WIFSIGNALED(process->status)) + term_signal = WTERMSIG(process->status); + + process->exit_cb(process, exit_status, term_signal); + } + assert(QUEUE_EMPTY(&pending)); +} + + +int uv__make_socketpair(int fds[2], int flags) { +#if defined(__linux__) + static int no_cloexec; + + if (no_cloexec) + goto skip; + + if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0) + return 0; + + /* Retry on EINVAL, it means SOCK_CLOEXEC is not supported. + * Anything else is a genuine error. + */ + if (errno != EINVAL) + return -errno; + + no_cloexec = 1; + +skip: +#endif + + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) + return -errno; + + uv__cloexec(fds[0], 1); + uv__cloexec(fds[1], 1); + + if (flags & UV__F_NONBLOCK) { + uv__nonblock(fds[0], 1); + uv__nonblock(fds[1], 1); + } + + return 0; +} + + +int uv__make_pipe(int fds[2], int flags) { +#if defined(__linux__) + static int no_pipe2; + + if (no_pipe2) + goto skip; + + if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0) + return 0; + + if (errno != ENOSYS) + return -errno; + + no_pipe2 = 1; + +skip: +#endif + + if (pipe(fds)) + return -errno; + + uv__cloexec(fds[0], 1); + uv__cloexec(fds[1], 1); + + if (flags & UV__F_NONBLOCK) { + uv__nonblock(fds[0], 1); + uv__nonblock(fds[1], 1); + } + + return 0; +} + + +/* + * Used for initializing stdio streams like options.stdin_stream. Returns + * zero on success. See also the cleanup section in uv_spawn(). + */ +static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) { + int mask; + int fd; + + mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM; + + switch (container->flags & mask) { + case UV_IGNORE: + return 0; + + case UV_CREATE_PIPE: + assert(container->data.stream != NULL); + if (container->data.stream->type != UV_NAMED_PIPE) + return -EINVAL; + else + return uv__make_socketpair(fds, 0); + + case UV_INHERIT_FD: + case UV_INHERIT_STREAM: + if (container->flags & UV_INHERIT_FD) + fd = container->data.fd; + else + fd = uv__stream_fd(container->data.stream); + + if (fd == -1) + return -EINVAL; + + fds[1] = fd; + return 0; + + default: + assert(0 && "Unexpected flags"); + return -EINVAL; + } +} + + +static int uv__process_open_stream(uv_stdio_container_t* container, + int pipefds[2], + int writable) { + int flags; + int err; + + if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0) + return 0; + + err = uv__close(pipefds[1]); + if (err != 0) + abort(); + + pipefds[1] = -1; + uv__nonblock(pipefds[0], 1); + + if (container->data.stream->type == UV_NAMED_PIPE && + ((uv_pipe_t*)container->data.stream)->ipc) + flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE; + else if (writable) + flags = UV_STREAM_WRITABLE; + else + flags = UV_STREAM_READABLE; + + return uv__stream_open(container->data.stream, pipefds[0], flags); +} + + +static void uv__process_close_stream(uv_stdio_container_t* container) { + if (!(container->flags & UV_CREATE_PIPE)) return; + uv__stream_close((uv_stream_t*)container->data.stream); +} + + +static void uv__write_int(int fd, int val) { + ssize_t n; + + do + n = write(fd, &val, sizeof(val)); + while (n == -1 && errno == EINTR); + + if (n == -1 && errno == EPIPE) + return; /* parent process has quit */ + + assert(n == sizeof(val)); +} + + +#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)) +/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be + * avoided. Since this isn't called on those targets, the function + * doesn't even need to be defined for them. + */ +static void uv__process_child_init(const uv_process_options_t* options, + int stdio_count, + int (*pipes)[2], + int error_fd) { + int close_fd; + int use_fd; + int fd; + + if (options->flags & UV_PROCESS_DETACHED) + setsid(); + + /* First duplicate low numbered fds, since it's not safe to duplicate them, + * they could get replaced. Example: swapping stdout and stderr; without + * this fd 2 (stderr) would be duplicated into fd 1, thus making both + * stdout and stderr go to the same fd, which was not the intention. */ + for (fd = 0; fd < stdio_count; fd++) { + use_fd = pipes[fd][1]; + if (use_fd < 0 || use_fd >= fd) + continue; + pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count); + if (pipes[fd][1] == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + } + + for (fd = 0; fd < stdio_count; fd++) { + close_fd = pipes[fd][0]; + use_fd = pipes[fd][1]; + + if (use_fd < 0) { + if (fd >= 3) + continue; + else { + /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is + * set + */ + use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR); + close_fd = use_fd; + + if (use_fd == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + } + } + + if (fd == use_fd) + uv__cloexec(use_fd, 0); + else + fd = dup2(use_fd, fd); + + if (fd == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (fd <= 2) + uv__nonblock(fd, 0); + + if (close_fd >= stdio_count) + uv__close(close_fd); + } + + for (fd = 0; fd < stdio_count; fd++) { + use_fd = pipes[fd][1]; + + if (use_fd >= stdio_count) + uv__close(use_fd); + } + + if (options->cwd != NULL && chdir(options->cwd)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) { + /* When dropping privileges from root, the `setgroups` call will + * remove any extraneous groups. If we don't call this, then + * even though our uid has dropped, we may still have groups + * that enable us to do super-user things. This will fail if we + * aren't root, so don't bother checking the return value, this + * is just done as an optimistic privilege dropping function. + */ + SAVE_ERRNO(setgroups(0, NULL)); + } + + if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (options->env != NULL) { + environ = options->env; + } + + execvp(options->file, options->args); + uv__write_int(error_fd, -errno); + _exit(127); +} +#endif + + +int uv_spawn(uv_loop_t* loop, + uv_process_t* process, + const uv_process_options_t* options) { +#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH) + /* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */ + return -ENOSYS; +#else + int signal_pipe[2] = { -1, -1 }; + int (*pipes)[2]; + int stdio_count; + ssize_t r; + pid_t pid; + int err; + int exec_errorno; + int i; + int status; + + assert(options->file != NULL); + assert(!(options->flags & ~(UV_PROCESS_DETACHED | + UV_PROCESS_SETGID | + UV_PROCESS_SETUID | + UV_PROCESS_WINDOWS_HIDE | + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); + + uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS); + QUEUE_INIT(&process->queue); + + stdio_count = options->stdio_count; + if (stdio_count < 3) + stdio_count = 3; + + err = -ENOMEM; + pipes = uv__malloc(stdio_count * sizeof(*pipes)); + if (pipes == NULL) + goto error; + + for (i = 0; i < stdio_count; i++) { + pipes[i][0] = -1; + pipes[i][1] = -1; + } + + for (i = 0; i < options->stdio_count; i++) { + err = uv__process_init_stdio(options->stdio + i, pipes[i]); + if (err) + goto error; + } + + /* This pipe is used by the parent to wait until + * the child has called `execve()`. We need this + * to avoid the following race condition: + * + * if ((pid = fork()) > 0) { + * kill(pid, SIGTERM); + * } + * else if (pid == 0) { + * execve("/bin/cat", argp, envp); + * } + * + * The parent sends a signal immediately after forking. + * Since the child may not have called `execve()` yet, + * there is no telling what process receives the signal, + * our fork or /bin/cat. + * + * To avoid ambiguity, we create a pipe with both ends + * marked close-on-exec. Then, after the call to `fork()`, + * the parent polls the read end until it EOFs or errors with EPIPE. + */ + err = uv__make_pipe(signal_pipe, 0); + if (err) + goto error; + + uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD); + + /* Acquire write lock to prevent opening new fds in worker threads */ + uv_rwlock_wrlock(&loop->cloexec_lock); + pid = fork(); + + if (pid == -1) { + err = -errno; + uv_rwlock_wrunlock(&loop->cloexec_lock); + uv__close(signal_pipe[0]); + uv__close(signal_pipe[1]); + goto error; + } + + if (pid == 0) { + uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]); + abort(); + } + + /* Release lock in parent process */ + uv_rwlock_wrunlock(&loop->cloexec_lock); + uv__close(signal_pipe[1]); + + process->status = 0; + exec_errorno = 0; + do + r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno)); + while (r == -1 && errno == EINTR); + + if (r == 0) + ; /* okay, EOF */ + else if (r == sizeof(exec_errorno)) { + do + err = waitpid(pid, &status, 0); /* okay, read errorno */ + while (err == -1 && errno == EINTR); + assert(err == pid); + } else if (r == -1 && errno == EPIPE) { + do + err = waitpid(pid, &status, 0); /* okay, got EPIPE */ + while (err == -1 && errno == EINTR); + assert(err == pid); + } else + abort(); + + uv__close_nocheckstdio(signal_pipe[0]); + + for (i = 0; i < options->stdio_count; i++) { + err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0); + if (err == 0) + continue; + + while (i--) + uv__process_close_stream(options->stdio + i); + + goto error; + } + + /* Only activate this handle if exec() happened successfully */ + if (exec_errorno == 0) { + QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue); + uv__handle_start(process); + } + + process->pid = pid; + process->exit_cb = options->exit_cb; + + uv__free(pipes); + return exec_errorno; + +error: + if (pipes != NULL) { + for (i = 0; i < stdio_count; i++) { + if (i < options->stdio_count) + if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM)) + continue; + if (pipes[i][0] != -1) + uv__close_nocheckstdio(pipes[i][0]); + if (pipes[i][1] != -1) + uv__close_nocheckstdio(pipes[i][1]); + } + uv__free(pipes); + } + + return err; +#endif +} + + +int uv_process_kill(uv_process_t* process, int signum) { + return uv_kill(process->pid, signum); +} + + +int uv_kill(int pid, int signum) { + if (kill(pid, signum)) + return -errno; + else + return 0; +} + + +void uv__process_close(uv_process_t* handle) { + QUEUE_REMOVE(&handle->queue); + uv__handle_stop(handle); + if (QUEUE_EMPTY(&handle->loop->process_handles)) + uv_signal_stop(&handle->loop->child_watcher); +} diff --git a/Utilities/cmlibuv/src/unix/proctitle.c b/Utilities/cmlibuv/src/unix/proctitle.c new file mode 100644 index 0000000..08d875f --- /dev/null +++ b/Utilities/cmlibuv/src/unix/proctitle.c @@ -0,0 +1,105 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdlib.h> +#include <string.h> + +extern void uv__set_process_title(const char* title); + +static void* args_mem; + +static struct { + char* str; + size_t len; +} process_title; + + +char** uv_setup_args(int argc, char** argv) { + char** new_argv; + size_t size; + char* s; + int i; + + if (argc <= 0) + return argv; + + /* Calculate how much memory we need for the argv strings. */ + size = 0; + for (i = 0; i < argc; i++) + size += strlen(argv[i]) + 1; + + process_title.str = argv[0]; + process_title.len = argv[argc - 1] + strlen(argv[argc - 1]) - argv[0]; + assert(process_title.len + 1 == size); /* argv memory should be adjacent. */ + + /* Add space for the argv pointers. */ + size += (argc + 1) * sizeof(char*); + + new_argv = uv__malloc(size); + if (new_argv == NULL) + return argv; + args_mem = new_argv; + + /* Copy over the strings and set up the pointer table. */ + s = (char*) &new_argv[argc + 1]; + for (i = 0; i < argc; i++) { + size = strlen(argv[i]) + 1; + memcpy(s, argv[i], size); + new_argv[i] = s; + s += size; + } + new_argv[i] = NULL; + + return new_argv; +} + + +int uv_set_process_title(const char* title) { + if (process_title.len == 0) + return 0; + + /* No need to terminate, byte after is always '\0'. */ + strncpy(process_title.str, title, process_title.len); + uv__set_process_title(title); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + else if (size <= process_title.len) + return -ENOBUFS; + + memcpy(buffer, process_title.str, process_title.len + 1); + buffer[process_title.len] = '\0'; + + return 0; +} + + +UV_DESTRUCTOR(static void free_args_mem(void)) { + uv__free(args_mem); /* Keep valgrind happy. */ + args_mem = NULL; +} diff --git a/Utilities/cmlibuv/src/unix/pthread-barrier.c b/Utilities/cmlibuv/src/unix/pthread-barrier.c new file mode 100644 index 0000000..f57bf25 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/pthread-barrier.c @@ -0,0 +1,120 @@ +/* +Copyright (c) 2016, Kari Tristan Helgason <kthelgason@gmail.com> + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +#include "uv-common.h" +#include "pthread-barrier.h" + +#include <stdlib.h> +#include <assert.h> + +/* TODO: support barrier_attr */ +int pthread_barrier_init(pthread_barrier_t* barrier, + const void* barrier_attr, + unsigned count) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || count == 0) + return EINVAL; + + if (barrier_attr != NULL) + return ENOTSUP; + + b = uv__malloc(sizeof(*b)); + if (b == NULL) + return ENOMEM; + + b->in = 0; + b->out = 0; + b->threshold = count; + + if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0) + goto error2; + if ((rc = pthread_cond_init(&b->cond, NULL)) != 0) + goto error; + + barrier->b = b; + return 0; + +error: + pthread_mutex_destroy(&b->mutex); +error2: + uv__free(b); + return rc; +} + +int pthread_barrier_wait(pthread_barrier_t* barrier) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || barrier->b == NULL) + return EINVAL; + + b = barrier->b; + /* Lock the mutex*/ + if ((rc = pthread_mutex_lock(&b->mutex)) != 0) + return rc; + + /* Increment the count. If this is the first thread to reach the threshold, + wake up waiters, unlock the mutex, then return + PTHREAD_BARRIER_SERIAL_THREAD. */ + if (++b->in == b->threshold) { + b->in = 0; + b->out = b->threshold - 1; + assert(pthread_cond_signal(&b->cond) == 0); + + pthread_mutex_unlock(&b->mutex); + return PTHREAD_BARRIER_SERIAL_THREAD; + } + /* Otherwise, wait for other threads until in is set to 0, + then return 0 to indicate this is not the first thread. */ + do { + if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0) + break; + } while (b->in != 0); + + /* mark thread exit */ + b->out--; + pthread_cond_signal(&b->cond); + pthread_mutex_unlock(&b->mutex); + return rc; +} + +int pthread_barrier_destroy(pthread_barrier_t* barrier) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || barrier->b == NULL) + return EINVAL; + + b = barrier->b; + + if ((rc = pthread_mutex_lock(&b->mutex)) != 0) + return rc; + + if (b->in > 0 || b->out > 0) + rc = EBUSY; + + pthread_mutex_unlock(&b->mutex); + + if (rc) + return rc; + + pthread_cond_destroy(&b->cond); + pthread_mutex_destroy(&b->mutex); + uv__free(barrier->b); + barrier->b = NULL; + return 0; +} diff --git a/Utilities/cmlibuv/src/unix/pthread-fixes.c b/Utilities/cmlibuv/src/unix/pthread-fixes.c new file mode 100644 index 0000000..fb17995 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/pthread-fixes.c @@ -0,0 +1,56 @@ +/* Copyright (c) 2013, Sony Mobile Communications AB + * Copyright (c) 2012, Google Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* Android versions < 4.1 have a broken pthread_sigmask. */ +#include <errno.h> +#include <pthread.h> +#include <signal.h> + +int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) { + static int workaround; + int err; + + if (workaround) { + return sigprocmask(how, set, oset); + } else { + err = pthread_sigmask(how, set, oset); + if (err) { + if (err == EINVAL && sigprocmask(how, set, oset) == 0) { + workaround = 1; + return 0; + } else { + return -1; + } + } + } + + return 0; +} diff --git a/Utilities/cmlibuv/src/unix/signal.c b/Utilities/cmlibuv/src/unix/signal.c new file mode 100644 index 0000000..d82b9b7 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/signal.c @@ -0,0 +1,467 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <errno.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + + +typedef struct { + uv_signal_t* handle; + int signum; +} uv__signal_msg_t; + +RB_HEAD(uv__signal_tree_s, uv_signal_s); + + +static int uv__signal_unlock(void); +static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events); +static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2); +static void uv__signal_stop(uv_signal_t* handle); + + +static pthread_once_t uv__signal_global_init_guard = PTHREAD_ONCE_INIT; +static struct uv__signal_tree_s uv__signal_tree = + RB_INITIALIZER(uv__signal_tree); +static int uv__signal_lock_pipefd[2]; + + +RB_GENERATE_STATIC(uv__signal_tree_s, + uv_signal_s, tree_entry, + uv__signal_compare) + + +static void uv__signal_global_init(void) { + if (uv__make_pipe(uv__signal_lock_pipefd, 0)) + abort(); + + if (uv__signal_unlock()) + abort(); +} + + +void uv__signal_global_once_init(void) { + pthread_once(&uv__signal_global_init_guard, uv__signal_global_init); +} + + + +static int uv__signal_lock(void) { + int r; + char data; + + do { + r = read(uv__signal_lock_pipefd[0], &data, sizeof data); + } while (r < 0 && errno == EINTR); + + return (r < 0) ? -1 : 0; +} + + +static int uv__signal_unlock(void) { + int r; + char data = 42; + + do { + r = write(uv__signal_lock_pipefd[1], &data, sizeof data); + } while (r < 0 && errno == EINTR); + + return (r < 0) ? -1 : 0; +} + + +static void uv__signal_block_and_lock(sigset_t* saved_sigmask) { + sigset_t new_mask; + + if (sigfillset(&new_mask)) + abort(); + + if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask)) + abort(); + + if (uv__signal_lock()) + abort(); +} + + +static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) { + if (uv__signal_unlock()) + abort(); + + if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL)) + abort(); +} + + +static uv_signal_t* uv__signal_first_handle(int signum) { + /* This function must be called with the signal lock held. */ + uv_signal_t lookup; + uv_signal_t* handle; + + lookup.signum = signum; + lookup.loop = NULL; + + handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup); + + if (handle != NULL && handle->signum == signum) + return handle; + + return NULL; +} + + +static void uv__signal_handler(int signum) { + uv__signal_msg_t msg; + uv_signal_t* handle; + int saved_errno; + + saved_errno = errno; + memset(&msg, 0, sizeof msg); + + if (uv__signal_lock()) { + errno = saved_errno; + return; + } + + for (handle = uv__signal_first_handle(signum); + handle != NULL && handle->signum == signum; + handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) { + int r; + + msg.signum = signum; + msg.handle = handle; + + /* write() should be atomic for small data chunks, so the entire message + * should be written at once. In theory the pipe could become full, in + * which case the user is out of luck. + */ + do { + r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg); + } while (r == -1 && errno == EINTR); + + assert(r == sizeof msg || + (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))); + + if (r != -1) + handle->caught_signals++; + } + + uv__signal_unlock(); + errno = saved_errno; +} + + +static int uv__signal_register_handler(int signum) { + /* When this function is called, the signal lock must be held. */ + struct sigaction sa; + + /* XXX use a separate signal stack? */ + memset(&sa, 0, sizeof(sa)); + if (sigfillset(&sa.sa_mask)) + abort(); + sa.sa_handler = uv__signal_handler; + + /* XXX save old action so we can restore it later on? */ + if (sigaction(signum, &sa, NULL)) + return -errno; + + return 0; +} + + +static void uv__signal_unregister_handler(int signum) { + /* When this function is called, the signal lock must be held. */ + struct sigaction sa; + + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + + /* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a + * signal implies that it was successfully registered earlier, so EINVAL + * should never happen. + */ + if (sigaction(signum, &sa, NULL)) + abort(); +} + + +static int uv__signal_loop_once_init(uv_loop_t* loop) { + int err; + + /* Return if already initialized. */ + if (loop->signal_pipefd[0] != -1) + return 0; + + err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK); + if (err) + return err; + + uv__io_init(&loop->signal_io_watcher, + uv__signal_event, + loop->signal_pipefd[0]); + uv__io_start(loop, &loop->signal_io_watcher, POLLIN); + + return 0; +} + + +void uv__signal_loop_cleanup(uv_loop_t* loop) { + QUEUE* q; + + /* Stop all the signal watchers that are still attached to this loop. This + * ensures that the (shared) signal tree doesn't contain any invalid entries + * entries, and that signal handlers are removed when appropriate. + * It's safe to use QUEUE_FOREACH here because the handles and the handle + * queue are not modified by uv__signal_stop(). + */ + QUEUE_FOREACH(q, &loop->handle_queue) { + uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue); + + if (handle->type == UV_SIGNAL) + uv__signal_stop((uv_signal_t*) handle); + } + + if (loop->signal_pipefd[0] != -1) { + uv__close(loop->signal_pipefd[0]); + loop->signal_pipefd[0] = -1; + } + + if (loop->signal_pipefd[1] != -1) { + uv__close(loop->signal_pipefd[1]); + loop->signal_pipefd[1] = -1; + } +} + + +int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { + int err; + + err = uv__signal_loop_once_init(loop); + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL); + handle->signum = 0; + handle->caught_signals = 0; + handle->dispatched_signals = 0; + + return 0; +} + + +void uv__signal_close(uv_signal_t* handle) { + + uv__signal_stop(handle); + + /* If there are any caught signals "trapped" in the signal pipe, we can't + * call the close callback yet. Otherwise, add the handle to the finish_close + * queue. + */ + if (handle->caught_signals == handle->dispatched_signals) { + uv__make_close_pending((uv_handle_t*) handle); + } +} + + +int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { + sigset_t saved_sigmask; + int err; + + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + /* If the user supplies signum == 0, then return an error already. If the + * signum is otherwise invalid then uv__signal_register will find out + * eventually. + */ + if (signum == 0) + return -EINVAL; + + /* Short circuit: if the signal watcher is already watching {signum} don't + * go through the process of deregistering and registering the handler. + * Additionally, this avoids pending signals getting lost in the small time + * time frame that handle->signum == 0. + */ + if (signum == handle->signum) { + handle->signal_cb = signal_cb; + return 0; + } + + /* If the signal handler was already active, stop it first. */ + if (handle->signum != 0) { + uv__signal_stop(handle); + } + + uv__signal_block_and_lock(&saved_sigmask); + + /* If at this point there are no active signal watchers for this signum (in + * any of the loops), it's time to try and register a handler for it here. + */ + if (uv__signal_first_handle(signum) == NULL) { + err = uv__signal_register_handler(signum); + if (err) { + /* Registering the signal handler failed. Must be an invalid signal. */ + uv__signal_unlock_and_unblock(&saved_sigmask); + return err; + } + } + + handle->signum = signum; + RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle); + + uv__signal_unlock_and_unblock(&saved_sigmask); + + handle->signal_cb = signal_cb; + uv__handle_start(handle); + + return 0; +} + + +static void uv__signal_event(uv_loop_t* loop, + uv__io_t* w, + unsigned int events) { + uv__signal_msg_t* msg; + uv_signal_t* handle; + char buf[sizeof(uv__signal_msg_t) * 32]; + size_t bytes, end, i; + int r; + + bytes = 0; + end = 0; + + do { + r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes); + + if (r == -1 && errno == EINTR) + continue; + + if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { + /* If there are bytes in the buffer already (which really is extremely + * unlikely if possible at all) we can't exit the function here. We'll + * spin until more bytes are read instead. + */ + if (bytes > 0) + continue; + + /* Otherwise, there was nothing there. */ + return; + } + + /* Other errors really should never happen. */ + if (r == -1) + abort(); + + bytes += r; + + /* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */ + end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t); + + for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) { + msg = (uv__signal_msg_t*) (buf + i); + handle = msg->handle; + + if (msg->signum == handle->signum) { + assert(!(handle->flags & UV_CLOSING)); + handle->signal_cb(handle, handle->signum); + } + + handle->dispatched_signals++; + + /* If uv_close was called while there were caught signals that were not + * yet dispatched, the uv__finish_close was deferred. Make close pending + * now if this has happened. + */ + if ((handle->flags & UV_CLOSING) && + (handle->caught_signals == handle->dispatched_signals)) { + uv__make_close_pending((uv_handle_t*) handle); + } + } + + bytes -= end; + + /* If there are any "partial" messages left, move them to the start of the + * the buffer, and spin. This should not happen. + */ + if (bytes) { + memmove(buf, buf + end, bytes); + continue; + } + } while (end == sizeof buf); +} + + +static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) { + /* Compare signums first so all watchers with the same signnum end up + * adjacent. + */ + if (w1->signum < w2->signum) return -1; + if (w1->signum > w2->signum) return 1; + + /* Sort by loop pointer, so we can easily look up the first item after + * { .signum = x, .loop = NULL }. + */ + if (w1->loop < w2->loop) return -1; + if (w1->loop > w2->loop) return 1; + + if (w1 < w2) return -1; + if (w1 > w2) return 1; + + return 0; +} + + +int uv_signal_stop(uv_signal_t* handle) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + uv__signal_stop(handle); + return 0; +} + + +static void uv__signal_stop(uv_signal_t* handle) { + uv_signal_t* removed_handle; + sigset_t saved_sigmask; + + /* If the watcher wasn't started, this is a no-op. */ + if (handle->signum == 0) + return; + + uv__signal_block_and_lock(&saved_sigmask); + + removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle); + assert(removed_handle == handle); + (void) removed_handle; + + /* Check if there are other active signal watchers observing this signal. If + * not, unregister the signal handler. + */ + if (uv__signal_first_handle(handle->signum) == NULL) + uv__signal_unregister_handler(handle->signum); + + uv__signal_unlock_and_unblock(&saved_sigmask); + + handle->signum = 0; + uv__handle_stop(handle); +} diff --git a/Utilities/cmlibuv/src/unix/spinlock.h b/Utilities/cmlibuv/src/unix/spinlock.h new file mode 100644 index 0000000..a20c83c --- /dev/null +++ b/Utilities/cmlibuv/src/unix/spinlock.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef UV_SPINLOCK_H_ +#define UV_SPINLOCK_H_ + +#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */ +#include "atomic-ops.h" + +#define UV_SPINLOCK_INITIALIZER { 0 } + +typedef struct { + int lock; +} uv_spinlock_t; + +UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)); +UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)); +UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)); +UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)); + +UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) { + ACCESS_ONCE(int, spinlock->lock) = 0; +} + +UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) { + while (!uv_spinlock_trylock(spinlock)) cpu_relax(); +} + +UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) { + ACCESS_ONCE(int, spinlock->lock) = 0; +} + +UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) { + /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing. + * Not really critical until we have locks that are (frequently) contended + * for by several threads. + */ + return 0 == cmpxchgi(&spinlock->lock, 0, 1); +} + +#endif /* UV_SPINLOCK_H_ */ diff --git a/Utilities/cmlibuv/src/unix/stream.c b/Utilities/cmlibuv/src/unix/stream.c new file mode 100644 index 0000000..d20d0bc --- /dev/null +++ b/Utilities/cmlibuv/src/unix/stream.c @@ -0,0 +1,1638 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/uio.h> +#include <sys/un.h> +#include <unistd.h> +#include <limits.h> /* IOV_MAX */ + +#if defined(__APPLE__) +# include <sys/event.h> +# include <sys/time.h> +# include <sys/select.h> + +/* Forward declaration */ +typedef struct uv__stream_select_s uv__stream_select_t; + +struct uv__stream_select_s { + uv_stream_t* stream; + uv_thread_t thread; + uv_sem_t close_sem; + uv_sem_t async_sem; + uv_async_t async; + int events; + int fake_fd; + int int_fd; + int fd; + fd_set* sread; + size_t sread_sz; + fd_set* swrite; + size_t swrite_sz; +}; +#endif /* defined(__APPLE__) */ + +static void uv__stream_connect(uv_stream_t*); +static void uv__write(uv_stream_t* stream); +static void uv__read(uv_stream_t* stream); +static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events); +static void uv__write_callbacks(uv_stream_t* stream); +static size_t uv__write_req_size(uv_write_t* req); + + +void uv__stream_init(uv_loop_t* loop, + uv_stream_t* stream, + uv_handle_type type) { + int err; + + uv__handle_init(loop, (uv_handle_t*)stream, type); + stream->read_cb = NULL; + stream->alloc_cb = NULL; + stream->close_cb = NULL; + stream->connection_cb = NULL; + stream->connect_req = NULL; + stream->shutdown_req = NULL; + stream->accepted_fd = -1; + stream->queued_fds = NULL; + stream->delayed_error = 0; + QUEUE_INIT(&stream->write_queue); + QUEUE_INIT(&stream->write_completed_queue); + stream->write_queue_size = 0; + + if (loop->emfile_fd == -1) { + err = uv__open_cloexec("/dev/null", O_RDONLY); + if (err < 0) + /* In the rare case that "/dev/null" isn't mounted open "/" + * instead. + */ + err = uv__open_cloexec("/", O_RDONLY); + if (err >= 0) + loop->emfile_fd = err; + } + +#if defined(__APPLE__) + stream->select = NULL; +#endif /* defined(__APPLE_) */ + + uv__io_init(&stream->io_watcher, uv__stream_io, -1); +} + + +static void uv__stream_osx_interrupt_select(uv_stream_t* stream) { +#if defined(__APPLE__) + /* Notify select() thread about state change */ + uv__stream_select_t* s; + int r; + + s = stream->select; + if (s == NULL) + return; + + /* Interrupt select() loop + * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will + * emit read event on other side + */ + do + r = write(s->fake_fd, "x", 1); + while (r == -1 && errno == EINTR); + + assert(r == 1); +#else /* !defined(__APPLE__) */ + /* No-op on any other platform */ +#endif /* !defined(__APPLE__) */ +} + + +#if defined(__APPLE__) +static void uv__stream_osx_select(void* arg) { + uv_stream_t* stream; + uv__stream_select_t* s; + char buf[1024]; + int events; + int fd; + int r; + int max_fd; + + stream = arg; + s = stream->select; + fd = s->fd; + + if (fd > s->int_fd) + max_fd = fd; + else + max_fd = s->int_fd; + + while (1) { + /* Terminate on semaphore */ + if (uv_sem_trywait(&s->close_sem) == 0) + break; + + /* Watch fd using select(2) */ + memset(s->sread, 0, s->sread_sz); + memset(s->swrite, 0, s->swrite_sz); + + if (uv__io_active(&stream->io_watcher, POLLIN)) + FD_SET(fd, s->sread); + if (uv__io_active(&stream->io_watcher, POLLOUT)) + FD_SET(fd, s->swrite); + FD_SET(s->int_fd, s->sread); + + /* Wait indefinitely for fd events */ + r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL); + if (r == -1) { + if (errno == EINTR) + continue; + + /* XXX: Possible?! */ + abort(); + } + + /* Ignore timeouts */ + if (r == 0) + continue; + + /* Empty socketpair's buffer in case of interruption */ + if (FD_ISSET(s->int_fd, s->sread)) + while (1) { + r = read(s->int_fd, buf, sizeof(buf)); + + if (r == sizeof(buf)) + continue; + + if (r != -1) + break; + + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + if (errno == EINTR) + continue; + + abort(); + } + + /* Handle events */ + events = 0; + if (FD_ISSET(fd, s->sread)) + events |= POLLIN; + if (FD_ISSET(fd, s->swrite)) + events |= POLLOUT; + + assert(events != 0 || FD_ISSET(s->int_fd, s->sread)); + if (events != 0) { + ACCESS_ONCE(int, s->events) = events; + + uv_async_send(&s->async); + uv_sem_wait(&s->async_sem); + + /* Should be processed at this stage */ + assert((s->events == 0) || (stream->flags & UV_CLOSING)); + } + } +} + + +static void uv__stream_osx_select_cb(uv_async_t* handle) { + uv__stream_select_t* s; + uv_stream_t* stream; + int events; + + s = container_of(handle, uv__stream_select_t, async); + stream = s->stream; + + /* Get and reset stream's events */ + events = s->events; + ACCESS_ONCE(int, s->events) = 0; + + assert(events != 0); + assert(events == (events & (POLLIN | POLLOUT))); + + /* Invoke callback on event-loop */ + if ((events & POLLIN) && uv__io_active(&stream->io_watcher, POLLIN)) + uv__stream_io(stream->loop, &stream->io_watcher, POLLIN); + + if ((events & POLLOUT) && uv__io_active(&stream->io_watcher, POLLOUT)) + uv__stream_io(stream->loop, &stream->io_watcher, POLLOUT); + + if (stream->flags & UV_CLOSING) + return; + + /* NOTE: It is important to do it here, otherwise `select()` might be called + * before the actual `uv__read()`, leading to the blocking syscall + */ + uv_sem_post(&s->async_sem); +} + + +static void uv__stream_osx_cb_close(uv_handle_t* async) { + uv__stream_select_t* s; + + s = container_of(async, uv__stream_select_t, async); + uv__free(s); +} + + +int uv__stream_try_select(uv_stream_t* stream, int* fd) { + /* + * kqueue doesn't work with some files from /dev mount on osx. + * select(2) in separate thread for those fds + */ + + struct kevent filter[1]; + struct kevent events[1]; + struct timespec timeout; + uv__stream_select_t* s; + int fds[2]; + int err; + int ret; + int kq; + int old_fd; + int max_fd; + size_t sread_sz; + size_t swrite_sz; + + kq = kqueue(); + if (kq == -1) { + perror("(libuv) kqueue()"); + return -errno; + } + + EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0); + + /* Use small timeout, because we only want to capture EINVALs */ + timeout.tv_sec = 0; + timeout.tv_nsec = 1; + + do + ret = kevent(kq, filter, 1, events, 1, &timeout); + while (ret == -1 && errno == EINTR); + + uv__close(kq); + + if (ret == -1) + return -errno; + + if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL) + return 0; + + /* At this point we definitely know that this fd won't work with kqueue */ + + /* + * Create fds for io watcher and to interrupt the select() loop. + * NOTE: do it ahead of malloc below to allocate enough space for fd_sets + */ + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) + return -errno; + + max_fd = *fd; + if (fds[1] > max_fd) + max_fd = fds[1]; + + sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY; + swrite_sz = sread_sz; + + s = uv__malloc(sizeof(*s) + sread_sz + swrite_sz); + if (s == NULL) { + err = -ENOMEM; + goto failed_malloc; + } + + s->events = 0; + s->fd = *fd; + s->sread = (fd_set*) ((char*) s + sizeof(*s)); + s->sread_sz = sread_sz; + s->swrite = (fd_set*) ((char*) s->sread + sread_sz); + s->swrite_sz = swrite_sz; + + err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb); + if (err) + goto failed_async_init; + + s->async.flags |= UV__HANDLE_INTERNAL; + uv__handle_unref(&s->async); + + err = uv_sem_init(&s->close_sem, 0); + if (err != 0) + goto failed_close_sem_init; + + err = uv_sem_init(&s->async_sem, 0); + if (err != 0) + goto failed_async_sem_init; + + s->fake_fd = fds[0]; + s->int_fd = fds[1]; + + old_fd = *fd; + s->stream = stream; + stream->select = s; + *fd = s->fake_fd; + + err = uv_thread_create(&s->thread, uv__stream_osx_select, stream); + if (err != 0) + goto failed_thread_create; + + return 0; + +failed_thread_create: + s->stream = NULL; + stream->select = NULL; + *fd = old_fd; + + uv_sem_destroy(&s->async_sem); + +failed_async_sem_init: + uv_sem_destroy(&s->close_sem); + +failed_close_sem_init: + uv__close(fds[0]); + uv__close(fds[1]); + uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); + return err; + +failed_async_init: + uv__free(s); + +failed_malloc: + uv__close(fds[0]); + uv__close(fds[1]); + + return err; +} +#endif /* defined(__APPLE__) */ + + +int uv__stream_open(uv_stream_t* stream, int fd, int flags) { +#if defined(__APPLE__) + int enable; +#endif + + if (!(stream->io_watcher.fd == -1 || stream->io_watcher.fd == fd)) + return -EBUSY; + + assert(fd >= 0); + stream->flags |= flags; + + if (stream->type == UV_TCP) { + if ((stream->flags & UV_TCP_NODELAY) && uv__tcp_nodelay(fd, 1)) + return -errno; + + /* TODO Use delay the user passed in. */ + if ((stream->flags & UV_TCP_KEEPALIVE) && uv__tcp_keepalive(fd, 1, 60)) + return -errno; + } + +#if defined(__APPLE__) + enable = 1; + if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) && + errno != ENOTSOCK && + errno != EINVAL) { + return -errno; + } +#endif + + stream->io_watcher.fd = fd; + + return 0; +} + + +void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { + uv_write_t* req; + QUEUE* q; + while (!QUEUE_EMPTY(&stream->write_queue)) { + q = QUEUE_HEAD(&stream->write_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_write_t, queue); + req->error = error; + + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); + } +} + + +void uv__stream_destroy(uv_stream_t* stream) { + assert(!uv__io_active(&stream->io_watcher, POLLIN | POLLOUT)); + assert(stream->flags & UV_CLOSED); + + if (stream->connect_req) { + uv__req_unregister(stream->loop, stream->connect_req); + stream->connect_req->cb(stream->connect_req, -ECANCELED); + stream->connect_req = NULL; + } + + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + + if (stream->shutdown_req) { + /* The ECANCELED error code is a lie, the shutdown(2) syscall is a + * fait accompli at this point. Maybe we should revisit this in v0.11. + * A possible reason for leaving it unchanged is that it informs the + * callee that the handle has been destroyed. + */ + uv__req_unregister(stream->loop, stream->shutdown_req); + stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED); + stream->shutdown_req = NULL; + } + + assert(stream->write_queue_size == 0); +} + + +/* Implements a best effort approach to mitigating accept() EMFILE errors. + * We have a spare file descriptor stashed away that we close to get below + * the EMFILE limit. Next, we accept all pending connections and close them + * immediately to signal the clients that we're overloaded - and we are, but + * we still keep on trucking. + * + * There is one caveat: it's not reliable in a multi-threaded environment. + * The file descriptor limit is per process. Our party trick fails if another + * thread opens a file or creates a socket in the time window between us + * calling close() and accept(). + */ +static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) { + int err; + int emfile_fd; + + if (loop->emfile_fd == -1) + return -EMFILE; + + uv__close(loop->emfile_fd); + loop->emfile_fd = -1; + + do { + err = uv__accept(accept_fd); + if (err >= 0) + uv__close(err); + } while (err >= 0 || err == -EINTR); + + emfile_fd = uv__open_cloexec("/", O_RDONLY); + if (emfile_fd >= 0) + loop->emfile_fd = emfile_fd; + + return err; +} + + +#if defined(UV_HAVE_KQUEUE) +# define UV_DEC_BACKLOG(w) w->rcount--; +#else +# define UV_DEC_BACKLOG(w) /* no-op */ +#endif /* defined(UV_HAVE_KQUEUE) */ + + +void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_stream_t* stream; + int err; + + stream = container_of(w, uv_stream_t, io_watcher); + assert(events == POLLIN); + assert(stream->accepted_fd == -1); + assert(!(stream->flags & UV_CLOSING)); + + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + + /* connection_cb can close the server socket while we're + * in the loop so check it on each iteration. + */ + while (uv__stream_fd(stream) != -1) { + assert(stream->accepted_fd == -1); + +#if defined(UV_HAVE_KQUEUE) + if (w->rcount <= 0) + return; +#endif /* defined(UV_HAVE_KQUEUE) */ + + err = uv__accept(uv__stream_fd(stream)); + if (err < 0) { + if (err == -EAGAIN || err == -EWOULDBLOCK) + return; /* Not an error. */ + + if (err == -ECONNABORTED) + continue; /* Ignore. Nothing we can do about that. */ + + if (err == -EMFILE || err == -ENFILE) { + err = uv__emfile_trick(loop, uv__stream_fd(stream)); + if (err == -EAGAIN || err == -EWOULDBLOCK) + break; + } + + stream->connection_cb(stream, err); + continue; + } + + UV_DEC_BACKLOG(w) + stream->accepted_fd = err; + stream->connection_cb(stream, 0); + + if (stream->accepted_fd != -1) { + /* The user hasn't yet accepted called uv_accept() */ + uv__io_stop(loop, &stream->io_watcher, POLLIN); + return; + } + + if (stream->type == UV_TCP && (stream->flags & UV_TCP_SINGLE_ACCEPT)) { + /* Give other processes a chance to accept connections. */ + struct timespec timeout = { 0, 1 }; + nanosleep(&timeout, NULL); + } + } +} + + +#undef UV_DEC_BACKLOG + + +int uv_accept(uv_stream_t* server, uv_stream_t* client) { + int err; + + assert(server->loop == client->loop); + + if (server->accepted_fd == -1) + return -EAGAIN; + + switch (client->type) { + case UV_NAMED_PIPE: + case UV_TCP: + err = uv__stream_open(client, + server->accepted_fd, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) { + /* TODO handle error */ + uv__close(server->accepted_fd); + goto done; + } + break; + + case UV_UDP: + err = uv_udp_open((uv_udp_t*) client, server->accepted_fd); + if (err) { + uv__close(server->accepted_fd); + goto done; + } + break; + + default: + return -EINVAL; + } + + client->flags |= UV_HANDLE_BOUND; + +done: + /* Process queued fds */ + if (server->queued_fds != NULL) { + uv__stream_queued_fds_t* queued_fds; + + queued_fds = server->queued_fds; + + /* Read first */ + server->accepted_fd = queued_fds->fds[0]; + + /* All read, free */ + assert(queued_fds->offset > 0); + if (--queued_fds->offset == 0) { + uv__free(queued_fds); + server->queued_fds = NULL; + } else { + /* Shift rest */ + memmove(queued_fds->fds, + queued_fds->fds + 1, + queued_fds->offset * sizeof(*queued_fds->fds)); + } + } else { + server->accepted_fd = -1; + if (err == 0) + uv__io_start(server->loop, &server->io_watcher, POLLIN); + } + return err; +} + + +int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { + int err; + + switch (stream->type) { + case UV_TCP: + err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb); + break; + + case UV_NAMED_PIPE: + err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb); + break; + + default: + err = -EINVAL; + } + + if (err == 0) + uv__handle_start(stream); + + return err; +} + + +static void uv__drain(uv_stream_t* stream) { + uv_shutdown_t* req; + int err; + + assert(QUEUE_EMPTY(&stream->write_queue)); + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + + /* Shutdown? */ + if ((stream->flags & UV_STREAM_SHUTTING) && + !(stream->flags & UV_CLOSING) && + !(stream->flags & UV_STREAM_SHUT)) { + assert(stream->shutdown_req); + + req = stream->shutdown_req; + stream->shutdown_req = NULL; + stream->flags &= ~UV_STREAM_SHUTTING; + uv__req_unregister(stream->loop, req); + + err = 0; + if (shutdown(uv__stream_fd(stream), SHUT_WR)) + err = -errno; + + if (err == 0) + stream->flags |= UV_STREAM_SHUT; + + if (req->cb != NULL) + req->cb(req, err); + } +} + + +static size_t uv__write_req_size(uv_write_t* req) { + size_t size; + + assert(req->bufs != NULL); + size = uv__count_bufs(req->bufs + req->write_index, + req->nbufs - req->write_index); + assert(req->handle->write_queue_size >= size); + + return size; +} + + +static void uv__write_req_finish(uv_write_t* req) { + uv_stream_t* stream = req->handle; + + /* Pop the req off tcp->write_queue. */ + QUEUE_REMOVE(&req->queue); + + /* Only free when there was no error. On error, we touch up write_queue_size + * right before making the callback. The reason we don't do that right away + * is that a write_queue_size > 0 is our only way to signal to the user that + * they should stop writing - which they should if we got an error. Something + * to revisit in future revisions of the libuv API. + */ + if (req->error == 0) { + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + } + + /* Add it to the write_completed_queue where it will have its + * callback called in the near future. + */ + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); + uv__io_feed(stream->loop, &stream->io_watcher); +} + + +static int uv__handle_fd(uv_handle_t* handle) { + switch (handle->type) { + case UV_NAMED_PIPE: + case UV_TCP: + return ((uv_stream_t*) handle)->io_watcher.fd; + + case UV_UDP: + return ((uv_udp_t*) handle)->io_watcher.fd; + + default: + return -1; + } +} + +static void uv__write(uv_stream_t* stream) { + struct iovec* iov; + QUEUE* q; + uv_write_t* req; + int iovmax; + int iovcnt; + ssize_t n; + +start: + + assert(uv__stream_fd(stream) >= 0); + + if (QUEUE_EMPTY(&stream->write_queue)) + return; + + q = QUEUE_HEAD(&stream->write_queue); + req = QUEUE_DATA(q, uv_write_t, queue); + assert(req->handle == stream); + + /* + * Cast to iovec. We had to have our own uv_buf_t instead of iovec + * because Windows's WSABUF is not an iovec. + */ + assert(sizeof(uv_buf_t) == sizeof(struct iovec)); + iov = (struct iovec*) &(req->bufs[req->write_index]); + iovcnt = req->nbufs - req->write_index; + + iovmax = uv__getiovmax(); + + /* Limit iov count to avoid EINVALs from writev() */ + if (iovcnt > iovmax) + iovcnt = iovmax; + + /* + * Now do the actual writev. Note that we've been updating the pointers + * inside the iov each time we write. So there is no need to offset it. + */ + + if (req->send_handle) { + struct msghdr msg; + struct cmsghdr *cmsg; + int fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle); + char scratch[64] = {0}; + + assert(fd_to_send >= 0); + + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_iov = iov; + msg.msg_iovlen = iovcnt; + msg.msg_flags = 0; + + msg.msg_control = (void*) scratch; + msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send)); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send)); + + /* silence aliasing warning */ + { + void* pv = CMSG_DATA(cmsg); + int* pi = pv; + *pi = fd_to_send; + } + + do { + n = sendmsg(uv__stream_fd(stream), &msg, 0); + } +#if defined(__APPLE__) + /* + * Due to a possible kernel bug at least in OS X 10.10 "Yosemite", + * EPROTOTYPE can be returned while trying to write to a socket that is + * shutting down. If we retry the write, we should get the expected EPIPE + * instead. + */ + while (n == -1 && (errno == EINTR || errno == EPROTOTYPE)); +#else + while (n == -1 && errno == EINTR); +#endif + } else { + do { + if (iovcnt == 1) { + n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len); + } else { + n = writev(uv__stream_fd(stream), iov, iovcnt); + } + } +#if defined(__APPLE__) + /* + * Due to a possible kernel bug at least in OS X 10.10 "Yosemite", + * EPROTOTYPE can be returned while trying to write to a socket that is + * shutting down. If we retry the write, we should get the expected EPIPE + * instead. + */ + while (n == -1 && (errno == EINTR || errno == EPROTOTYPE)); +#else + while (n == -1 && errno == EINTR); +#endif + } + + if (n < 0) { + if (errno != EAGAIN && errno != EWOULDBLOCK) { + /* Error */ + req->error = -errno; + uv__write_req_finish(req); + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + if (!uv__io_active(&stream->io_watcher, POLLIN)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + return; + } else if (stream->flags & UV_STREAM_BLOCKING) { + /* If this is a blocking stream, try again. */ + goto start; + } + } else { + /* Successful write */ + + while (n >= 0) { + uv_buf_t* buf = &(req->bufs[req->write_index]); + size_t len = buf->len; + + assert(req->write_index < req->nbufs); + + if ((size_t)n < len) { + buf->base += n; + buf->len -= n; + stream->write_queue_size -= n; + n = 0; + + /* There is more to write. */ + if (stream->flags & UV_STREAM_BLOCKING) { + /* + * If we're blocking then we should not be enabling the write + * watcher - instead we need to try again. + */ + goto start; + } else { + /* Break loop and ensure the watcher is pending. */ + break; + } + + } else { + /* Finished writing the buf at index req->write_index. */ + req->write_index++; + + assert((size_t)n >= len); + n -= len; + + assert(stream->write_queue_size >= len); + stream->write_queue_size -= len; + + if (req->write_index == req->nbufs) { + /* Then we're done! */ + assert(n == 0); + uv__write_req_finish(req); + /* TODO: start trying to write the next request. */ + return; + } + } + } + } + + /* Either we've counted n down to zero or we've got EAGAIN. */ + assert(n == 0 || n == -1); + + /* Only non-blocking streams should use the write_watcher. */ + assert(!(stream->flags & UV_STREAM_BLOCKING)); + + /* We're not done. */ + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + + /* Notify select() thread about state change */ + uv__stream_osx_interrupt_select(stream); +} + + +static void uv__write_callbacks(uv_stream_t* stream) { + uv_write_t* req; + QUEUE* q; + + while (!QUEUE_EMPTY(&stream->write_completed_queue)) { + /* Pop a req off write_completed_queue. */ + q = QUEUE_HEAD(&stream->write_completed_queue); + req = QUEUE_DATA(q, uv_write_t, queue); + QUEUE_REMOVE(q); + uv__req_unregister(stream->loop, req); + + if (req->bufs != NULL) { + stream->write_queue_size -= uv__write_req_size(req); + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + } + + /* NOTE: call callback AFTER freeing the request data. */ + if (req->cb) + req->cb(req, req->error); + } + + assert(QUEUE_EMPTY(&stream->write_completed_queue)); +} + + +uv_handle_type uv__handle_type(int fd) { + struct sockaddr_storage ss; + socklen_t sslen; + socklen_t len; + int type; + + memset(&ss, 0, sizeof(ss)); + sslen = sizeof(ss); + + if (getsockname(fd, (struct sockaddr*)&ss, &sslen)) + return UV_UNKNOWN_HANDLE; + + len = sizeof type; + + if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len)) + return UV_UNKNOWN_HANDLE; + + if (type == SOCK_STREAM) { +#if defined(_AIX) || defined(__DragonFly__) + /* on AIX/DragonFly the getsockname call returns an empty sa structure + * for sockets of type AF_UNIX. For all other types it will + * return a properly filled in structure. + */ + if (sslen == 0) + return UV_NAMED_PIPE; +#endif + switch (ss.ss_family) { + case AF_UNIX: + return UV_NAMED_PIPE; + case AF_INET: + case AF_INET6: + return UV_TCP; + } + } + + if (type == SOCK_DGRAM && + (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)) + return UV_UDP; + + return UV_UNKNOWN_HANDLE; +} + + +static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) { + stream->flags |= UV_STREAM_READ_EOF; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + stream->read_cb(stream, UV_EOF, buf); + stream->flags &= ~UV_STREAM_READING; +} + + +static int uv__stream_queue_fd(uv_stream_t* stream, int fd) { + uv__stream_queued_fds_t* queued_fds; + unsigned int queue_size; + + queued_fds = stream->queued_fds; + if (queued_fds == NULL) { + queue_size = 8; + queued_fds = uv__malloc((queue_size - 1) * sizeof(*queued_fds->fds) + + sizeof(*queued_fds)); + if (queued_fds == NULL) + return -ENOMEM; + queued_fds->size = queue_size; + queued_fds->offset = 0; + stream->queued_fds = queued_fds; + + /* Grow */ + } else if (queued_fds->size == queued_fds->offset) { + queue_size = queued_fds->size + 8; + queued_fds = uv__realloc(queued_fds, + (queue_size - 1) * sizeof(*queued_fds->fds) + + sizeof(*queued_fds)); + + /* + * Allocation failure, report back. + * NOTE: if it is fatal - sockets will be closed in uv__stream_close + */ + if (queued_fds == NULL) + return -ENOMEM; + queued_fds->size = queue_size; + stream->queued_fds = queued_fds; + } + + /* Put fd in a queue */ + queued_fds->fds[queued_fds->offset++] = fd; + + return 0; +} + + +#define UV__CMSG_FD_COUNT 64 +#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int)) + + +static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { + struct cmsghdr* cmsg; + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { + char* start; + char* end; + int err; + void* pv; + int* pi; + unsigned int i; + unsigned int count; + + if (cmsg->cmsg_type != SCM_RIGHTS) { + fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n", + cmsg->cmsg_type); + continue; + } + + /* silence aliasing warning */ + pv = CMSG_DATA(cmsg); + pi = pv; + + /* Count available fds */ + start = (char*) cmsg; + end = (char*) cmsg + cmsg->cmsg_len; + count = 0; + while (start + CMSG_LEN(count * sizeof(*pi)) < end) + count++; + assert(start + CMSG_LEN(count * sizeof(*pi)) == end); + + for (i = 0; i < count; i++) { + /* Already has accepted fd, queue now */ + if (stream->accepted_fd != -1) { + err = uv__stream_queue_fd(stream, pi[i]); + if (err != 0) { + /* Close rest */ + for (; i < count; i++) + uv__close(pi[i]); + return err; + } + } else { + stream->accepted_fd = pi[i]; + } + } + } + + return 0; +} + + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-folding-constant" +#endif + +static void uv__read(uv_stream_t* stream) { + uv_buf_t buf; + ssize_t nread; + struct msghdr msg; + char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)]; + int count; + int err; + int is_ipc; + + stream->flags &= ~UV_STREAM_READ_PARTIAL; + + /* Prevent loop starvation when the data comes in as fast as (or faster than) + * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O. + */ + count = 32; + + is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc; + + /* XXX: Maybe instead of having UV_STREAM_READING we just test if + * tcp->read_cb is NULL or not? + */ + while (stream->read_cb + && (stream->flags & UV_STREAM_READING) + && (count-- > 0)) { + assert(stream->alloc_cb != NULL); + + buf = uv_buf_init(NULL, 0); + stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf); + if (buf.base == NULL || buf.len == 0) { + /* User indicates it can't or won't handle the read. */ + stream->read_cb(stream, UV_ENOBUFS, &buf); + return; + } + + assert(buf.base != NULL); + assert(uv__stream_fd(stream) >= 0); + + if (!is_ipc) { + do { + nread = read(uv__stream_fd(stream), buf.base, buf.len); + } + while (nread < 0 && errno == EINTR); + } else { + /* ipc uses recvmsg */ + msg.msg_flags = 0; + msg.msg_iov = (struct iovec*) &buf; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Set up to receive a descriptor even if one isn't in the message */ + msg.msg_controllen = sizeof(cmsg_space); + msg.msg_control = cmsg_space; + + do { + nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0); + } + while (nread < 0 && errno == EINTR); + } + + if (nread < 0) { + /* Error */ + if (errno == EAGAIN || errno == EWOULDBLOCK) { + /* Wait for the next one. */ + if (stream->flags & UV_STREAM_READING) { + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + uv__stream_osx_interrupt_select(stream); + } + stream->read_cb(stream, 0, &buf); + } else { + /* Error. User should call uv_close(). */ + stream->read_cb(stream, -errno, &buf); + if (stream->flags & UV_STREAM_READING) { + stream->flags &= ~UV_STREAM_READING; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + } + } + return; + } else if (nread == 0) { + uv__stream_eof(stream, &buf); + return; + } else { + /* Successful read */ + ssize_t buflen = buf.len; + + if (is_ipc) { + err = uv__stream_recv_cmsg(stream, &msg); + if (err != 0) { + stream->read_cb(stream, err, &buf); + return; + } + } + stream->read_cb(stream, nread, &buf); + + /* Return if we didn't fill the buffer, there is no more data to read. */ + if (nread < buflen) { + stream->flags |= UV_STREAM_READ_PARTIAL; + return; + } + } + } +} + + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#undef UV__CMSG_FD_COUNT +#undef UV__CMSG_FD_SIZE + + +int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { + assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) && + "uv_shutdown (unix) only supports uv_handle_t right now"); + + if (!(stream->flags & UV_STREAM_WRITABLE) || + stream->flags & UV_STREAM_SHUT || + stream->flags & UV_STREAM_SHUTTING || + stream->flags & UV_CLOSED || + stream->flags & UV_CLOSING) { + return -ENOTCONN; + } + + assert(uv__stream_fd(stream) >= 0); + + /* Initialize request */ + uv__req_init(stream->loop, req, UV_SHUTDOWN); + req->handle = stream; + req->cb = cb; + stream->shutdown_req = req; + stream->flags |= UV_STREAM_SHUTTING; + + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + + return 0; +} + + +static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_stream_t* stream; + + stream = container_of(w, uv_stream_t, io_watcher); + + assert(stream->type == UV_TCP || + stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY); + assert(!(stream->flags & UV_CLOSING)); + + if (stream->connect_req) { + uv__stream_connect(stream); + return; + } + + assert(uv__stream_fd(stream) >= 0); + + /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */ + if (events & (POLLIN | POLLERR | POLLHUP)) + uv__read(stream); + + if (uv__stream_fd(stream) == -1) + return; /* read_cb closed stream. */ + + /* Short-circuit iff POLLHUP is set, the user is still interested in read + * events and uv__read() reported a partial read but not EOF. If the EOF + * flag is set, uv__read() called read_cb with err=UV_EOF and we don't + * have to do anything. If the partial read flag is not set, we can't + * report the EOF yet because there is still data to read. + */ + if ((events & POLLHUP) && + (stream->flags & UV_STREAM_READING) && + (stream->flags & UV_STREAM_READ_PARTIAL) && + !(stream->flags & UV_STREAM_READ_EOF)) { + uv_buf_t buf = { NULL, 0 }; + uv__stream_eof(stream, &buf); + } + + if (uv__stream_fd(stream) == -1) + return; /* read_cb closed stream. */ + + if (events & (POLLOUT | POLLERR | POLLHUP)) { + uv__write(stream); + uv__write_callbacks(stream); + + /* Write queue drained. */ + if (QUEUE_EMPTY(&stream->write_queue)) + uv__drain(stream); + } +} + + +/** + * We get called here from directly following a call to connect(2). + * In order to determine if we've errored out or succeeded must call + * getsockopt. + */ +static void uv__stream_connect(uv_stream_t* stream) { + int error; + uv_connect_t* req = stream->connect_req; + socklen_t errorsize = sizeof(int); + + assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE); + assert(req); + + if (stream->delayed_error) { + /* To smooth over the differences between unixes errors that + * were reported synchronously on the first connect can be delayed + * until the next tick--which is now. + */ + error = stream->delayed_error; + stream->delayed_error = 0; + } else { + /* Normal situation: we need to get the socket error from the kernel. */ + assert(uv__stream_fd(stream) >= 0); + getsockopt(uv__stream_fd(stream), + SOL_SOCKET, + SO_ERROR, + &error, + &errorsize); + error = -error; + } + + if (error == -EINPROGRESS) + return; + + stream->connect_req = NULL; + uv__req_unregister(stream->loop, req); + + if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + } + + if (req->cb) + req->cb(req, error); + + if (uv__stream_fd(stream) == -1) + return; + + if (error < 0) { + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + } +} + + +int uv_write2(uv_write_t* req, + uv_stream_t* stream, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb) { + int empty_queue; + + assert(nbufs > 0); + assert((stream->type == UV_TCP || + stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY) && + "uv_write (unix) does not yet support other types of streams"); + + if (uv__stream_fd(stream) < 0) + return -EBADF; + + if (send_handle) { + if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) + return -EINVAL; + + /* XXX We abuse uv_write2() to send over UDP handles to child processes. + * Don't call uv__stream_fd() on those handles, it's a macro that on OS X + * evaluates to a function that operates on a uv_stream_t with a couple of + * OS X specific fields. On other Unices it does (handle)->io_watcher.fd, + * which works but only by accident. + */ + if (uv__handle_fd((uv_handle_t*) send_handle) < 0) + return -EBADF; + } + + /* It's legal for write_queue_size > 0 even when the write_queue is empty; + * it means there are error-state requests in the write_completed_queue that + * will touch up write_queue_size later, see also uv__write_req_finish(). + * We could check that write_queue is empty instead but that implies making + * a write() syscall when we know that the handle is in error mode. + */ + empty_queue = (stream->write_queue_size == 0); + + /* Initialize the req */ + uv__req_init(stream->loop, req, UV_WRITE); + req->cb = cb; + req->handle = stream; + req->error = 0; + req->send_handle = send_handle; + QUEUE_INIT(&req->queue); + + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); + + if (req->bufs == NULL) + return -ENOMEM; + + memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); + req->nbufs = nbufs; + req->write_index = 0; + stream->write_queue_size += uv__count_bufs(bufs, nbufs); + + /* Append the request to write_queue. */ + QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); + + /* If the queue was empty when this function began, we should attempt to + * do the write immediately. Otherwise start the write_watcher and wait + * for the fd to become writable. + */ + if (stream->connect_req) { + /* Still connecting, do nothing. */ + } + else if (empty_queue) { + uv__write(stream); + } + else { + /* + * blocking streams should never have anything in the queue. + * if this assert fires then somehow the blocking stream isn't being + * sufficiently flushed in uv__write. + */ + assert(!(stream->flags & UV_STREAM_BLOCKING)); + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + } + + return 0; +} + + +/* The buffers to be written must remain valid until the callback is called. + * This is not required for the uv_buf_t array. + */ +int uv_write(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + return uv_write2(req, handle, bufs, nbufs, NULL, cb); +} + + +void uv_try_write_cb(uv_write_t* req, int status) { + /* Should not be called */ + abort(); +} + + +int uv_try_write(uv_stream_t* stream, + const uv_buf_t bufs[], + unsigned int nbufs) { + int r; + int has_pollout; + size_t written; + size_t req_size; + uv_write_t req; + + /* Connecting or already writing some data */ + if (stream->connect_req != NULL || stream->write_queue_size != 0) + return -EAGAIN; + + has_pollout = uv__io_active(&stream->io_watcher, POLLOUT); + + r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb); + if (r != 0) + return r; + + /* Remove not written bytes from write queue size */ + written = uv__count_bufs(bufs, nbufs); + if (req.bufs != NULL) + req_size = uv__write_req_size(&req); + else + req_size = 0; + written -= req_size; + stream->write_queue_size -= req_size; + + /* Unqueue request, regardless of immediateness */ + QUEUE_REMOVE(&req.queue); + uv__req_unregister(stream->loop, &req); + if (req.bufs != req.bufsml) + uv__free(req.bufs); + req.bufs = NULL; + + /* Do not poll for writable, if we wasn't before calling this */ + if (!has_pollout) { + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + } + + if (written == 0 && req_size != 0) + return -EAGAIN; + else + return written; +} + + +int uv_read_start(uv_stream_t* stream, + uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY); + + if (stream->flags & UV_CLOSING) + return -EINVAL; + + /* The UV_STREAM_READING flag is irrelevant of the state of the tcp - it just + * expresses the desired state of the user. + */ + stream->flags |= UV_STREAM_READING; + + /* TODO: try to do the read inline? */ + /* TODO: keep track of tcp state. If we've gotten a EOF then we should + * not start the IO watcher. + */ + assert(uv__stream_fd(stream) >= 0); + assert(alloc_cb); + + stream->read_cb = read_cb; + stream->alloc_cb = alloc_cb; + + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + uv__handle_start(stream); + uv__stream_osx_interrupt_select(stream); + + return 0; +} + + +int uv_read_stop(uv_stream_t* stream) { + if (!(stream->flags & UV_STREAM_READING)) + return 0; + + stream->flags &= ~UV_STREAM_READING; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + + stream->read_cb = NULL; + stream->alloc_cb = NULL; + return 0; +} + + +int uv_is_readable(const uv_stream_t* stream) { + return !!(stream->flags & UV_STREAM_READABLE); +} + + +int uv_is_writable(const uv_stream_t* stream) { + return !!(stream->flags & UV_STREAM_WRITABLE); +} + + +#if defined(__APPLE__) +int uv___stream_fd(const uv_stream_t* handle) { + const uv__stream_select_t* s; + + assert(handle->type == UV_TCP || + handle->type == UV_TTY || + handle->type == UV_NAMED_PIPE); + + s = handle->select; + if (s != NULL) + return s->fd; + + return handle->io_watcher.fd; +} +#endif /* defined(__APPLE__) */ + + +void uv__stream_close(uv_stream_t* handle) { + unsigned int i; + uv__stream_queued_fds_t* queued_fds; + +#if defined(__APPLE__) + /* Terminate select loop first */ + if (handle->select != NULL) { + uv__stream_select_t* s; + + s = handle->select; + + uv_sem_post(&s->close_sem); + uv_sem_post(&s->async_sem); + uv__stream_osx_interrupt_select(handle); + uv_thread_join(&s->thread); + uv_sem_destroy(&s->close_sem); + uv_sem_destroy(&s->async_sem); + uv__close(s->fake_fd); + uv__close(s->int_fd); + uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); + + handle->select = NULL; + } +#endif /* defined(__APPLE__) */ + + uv__io_close(handle->loop, &handle->io_watcher); + uv_read_stop(handle); + uv__handle_stop(handle); + + if (handle->io_watcher.fd != -1) { + /* Don't close stdio file descriptors. Nothing good comes from it. */ + if (handle->io_watcher.fd > STDERR_FILENO) + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + } + + if (handle->accepted_fd != -1) { + uv__close(handle->accepted_fd); + handle->accepted_fd = -1; + } + + /* Close all queued fds */ + if (handle->queued_fds != NULL) { + queued_fds = handle->queued_fds; + for (i = 0; i < queued_fds->offset; i++) + uv__close(queued_fds->fds[i]); + uv__free(handle->queued_fds); + handle->queued_fds = NULL; + } + + assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); +} + + +int uv_stream_set_blocking(uv_stream_t* handle, int blocking) { + /* Don't need to check the file descriptor, uv__nonblock() + * will fail with EBADF if it's not valid. + */ + return uv__nonblock(uv__stream_fd(handle), !blocking); +} diff --git a/Utilities/cmlibuv/src/unix/sunos.c b/Utilities/cmlibuv/src/unix/sunos.c new file mode 100644 index 0000000..3e7a759 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/sunos.c @@ -0,0 +1,821 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <errno.h> + +#ifndef SUNOS_NO_IFADDRS +# include <ifaddrs.h> +#endif +#include <net/if.h> +#include <net/if_dl.h> +#include <net/if_arp.h> +#include <sys/sockio.h> + +#include <sys/loadavg.h> +#include <sys/time.h> +#include <unistd.h> +#include <kstat.h> +#include <fcntl.h> + +#include <sys/port.h> +#include <port.h> + +#define PORT_FIRED 0x69 +#define PORT_UNUSED 0x0 +#define PORT_LOADED 0x99 +#define PORT_DELETED -1 + +#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64) +#define PROCFS_FILE_OFFSET_BITS_HACK 1 +#undef _FILE_OFFSET_BITS +#else +#define PROCFS_FILE_OFFSET_BITS_HACK 0 +#endif + +#include <procfs.h> + +#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1) +#define _FILE_OFFSET_BITS 64 +#endif + + +int uv__platform_loop_init(uv_loop_t* loop) { + int err; + int fd; + + loop->fs_fd = -1; + loop->backend_fd = -1; + + fd = port_create(); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + loop->backend_fd = fd; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->fs_fd != -1) { + uv__close(loop->fs_fd); + loop->fs_fd = -1; + } + + if (loop->backend_fd != -1) { + uv__close(loop->backend_fd); + loop->backend_fd = -1; + } +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct port_event* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct port_event*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].portev_object == fd) + events[i].portev_object = -1; +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0)) + return -errno; + + if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) + abort(); + + return 0; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct port_event events[1024]; + struct port_event* pe; + struct timespec spec; + QUEUE* q; + uv__io_t* w; + sigset_t* pset; + sigset_t set; + uint64_t base; + uint64_t diff; + unsigned int nfds; + unsigned int i; + int saved_errno; + int have_signals; + int nevents; + int count; + int err; + int fd; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + + if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0)) + abort(); + + w->events = w->pevents; + } + + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;;) { + if (timeout != -1) { + spec.tv_sec = timeout / 1000; + spec.tv_nsec = (timeout % 1000) * 1000000; + } + + /* Work around a kernel bug where nfds is not updated. */ + events[0].portev_source = 0; + + nfds = 1; + saved_errno = 0; + + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + + err = port_getn(loop->backend_fd, + events, + ARRAY_SIZE(events), + &nfds, + timeout == -1 ? NULL : &spec); + + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + + if (err) { + /* Work around another kernel bug: port_getn() may return events even + * on error. + */ + if (errno == EINTR || errno == ETIME) + saved_errno = errno; + else + abort(); + } + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (events[0].portev_source == 0) { + if (timeout == 0) + return; + + if (timeout == -1) + continue; + + goto update_timeout; + } + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + pe = events + i; + fd = pe->portev_object; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + + assert(fd >= 0); + assert((unsigned) fd < loop->nwatchers); + + w = loop->watchers[fd]; + + /* File descriptor that we've stopped watching, ignore. */ + if (w == NULL) + continue; + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->portev_events); + + nevents++; + + if (w != loop->watchers[fd]) + continue; /* Disabled by callback. */ + + /* Events Ports operates in oneshot mode, rearm timer on next run. */ + if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (saved_errno == ETIME) { + assert(timeout != -1); + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + return gethrtime(); +} + + +/* + * We could use a static buffer for the path manipulations that we need outside + * of the function, but this function could be called by multiple consumers and + * we don't want to potentially create a race condition in the use of snprintf. + */ +int uv_exepath(char* buffer, size_t* size) { + ssize_t res; + char buf[128]; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid()); + + res = *size - 1; + if (res > 0) + res = readlink(buf, buffer, res); + + if (res == -1) + return -errno; + + buffer[res] = '\0'; + *size = res; + return 0; +} + + +uint64_t uv_get_free_memory(void) { + return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES); +} + + +uint64_t uv_get_total_memory(void) { + return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES); +} + + +void uv_loadavg(double avg[3]) { + (void) getloadavg(avg, 3); +} + + +#if defined(PORT_SOURCE_FILE) + +static int uv__fs_event_rearm(uv_fs_event_t *handle) { + if (handle->fd == -1) + return -EBADF; + + if (port_associate(handle->loop->fs_fd, + PORT_SOURCE_FILE, + (uintptr_t) &handle->fo, + FILE_ATTRIB | FILE_MODIFIED, + handle) == -1) { + return -errno; + } + handle->fd = PORT_LOADED; + + return 0; +} + + +static void uv__fs_event_read(uv_loop_t* loop, + uv__io_t* w, + unsigned int revents) { + uv_fs_event_t *handle = NULL; + timespec_t timeout; + port_event_t pe; + int events; + int r; + + (void) w; + (void) revents; + + do { + uint_t n = 1; + + /* + * Note that our use of port_getn() here (and not port_get()) is deliberate: + * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout + * causes port_get() to return success instead of ETIME when there aren't + * actually any events (!); by using port_getn() in lieu of port_get(), + * we can at least workaround the bug by checking for zero returned events + * and treating it as we would ETIME. + */ + do { + memset(&timeout, 0, sizeof timeout); + r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout); + } + while (r == -1 && errno == EINTR); + + if ((r == -1 && errno == ETIME) || n == 0) + break; + + handle = (uv_fs_event_t*) pe.portev_user; + assert((r == 0) && "unexpected port_get() error"); + + events = 0; + if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED)) + events |= UV_CHANGE; + if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED)) + events |= UV_RENAME; + assert(events != 0); + handle->fd = PORT_FIRED; + handle->cb(handle, NULL, events, 0); + + if (handle->fd != PORT_DELETED) { + r = uv__fs_event_rearm(handle); + if (r != 0) + handle->cb(handle, NULL, 0, r); + } + } + while (handle->fd != PORT_DELETED); +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + int portfd; + int first_run; + int err; + + if (uv__is_active(handle)) + return -EINVAL; + + first_run = 0; + if (handle->loop->fs_fd == -1) { + portfd = port_create(); + if (portfd == -1) + return -errno; + handle->loop->fs_fd = portfd; + first_run = 1; + } + + uv__handle_start(handle); + handle->path = uv__strdup(path); + handle->fd = PORT_UNUSED; + handle->cb = cb; + + memset(&handle->fo, 0, sizeof handle->fo); + handle->fo.fo_name = handle->path; + err = uv__fs_event_rearm(handle); + if (err != 0) + return err; + + if (first_run) { + uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd); + uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN); + } + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + if (!uv__is_active(handle)) + return 0; + + if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) { + port_dissociate(handle->loop->fs_fd, + PORT_SOURCE_FILE, + (uintptr_t) &handle->fo); + } + + handle->fd = PORT_DELETED; + uv__free(handle->path); + handle->path = NULL; + handle->fo.fo_name = NULL; + uv__handle_stop(handle); + + return 0; +} + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} + +#else /* !defined(PORT_SOURCE_FILE) */ + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + return -ENOSYS; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* filename, + unsigned int flags) { + return -ENOSYS; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + return -ENOSYS; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + UNREACHABLE(); +} + +#endif /* defined(PORT_SOURCE_FILE) */ + + +char** uv_setup_args(int argc, char** argv) { + return argv; +} + + +int uv_set_process_title(const char* title) { + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + + buffer[0] = '\0'; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + psinfo_t psinfo; + int err; + int fd; + + fd = open("/proc/self/psinfo", O_RDONLY); + if (fd == -1) + return -errno; + + /* FIXME(bnoordhuis) Handle EINTR. */ + err = -EINVAL; + if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) { + *rss = (size_t)psinfo.pr_rssize * 1024; + err = 0; + } + uv__close(fd); + + return err; +} + + +int uv_uptime(double* uptime) { + kstat_ctl_t *kc; + kstat_t *ksp; + kstat_named_t *knp; + + long hz = sysconf(_SC_CLK_TCK); + + kc = kstat_open(); + if (kc == NULL) + return -EPERM; + + ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc"); + if (kstat_read(kc, ksp, NULL) == -1) { + *uptime = -1; + } else { + knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr"); + *uptime = knp->value.ul / hz; + } + kstat_close(kc); + + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + int lookup_instance; + kstat_ctl_t *kc; + kstat_t *ksp; + kstat_named_t *knp; + uv_cpu_info_t* cpu_info; + + kc = kstat_open(); + if (kc == NULL) + return -EPERM; + + /* Get count of cpus */ + lookup_instance = 0; + while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { + lookup_instance++; + } + + *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + kstat_close(kc); + return -ENOMEM; + } + + *count = lookup_instance; + + cpu_info = *cpu_infos; + lookup_instance = 0; + while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { + if (kstat_read(kc, ksp, NULL) == -1) { + cpu_info->speed = 0; + cpu_info->model = NULL; + } else { + knp = kstat_data_lookup(ksp, (char*) "clock_MHz"); + assert(knp->data_type == KSTAT_DATA_INT32 || + knp->data_type == KSTAT_DATA_INT64); + cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32 + : knp->value.i64; + + knp = kstat_data_lookup(ksp, (char*) "brand"); + assert(knp->data_type == KSTAT_DATA_STRING); + cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp)); + } + + lookup_instance++; + cpu_info++; + } + + cpu_info = *cpu_infos; + lookup_instance = 0; + for (;;) { + ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys"); + + if (ksp == NULL) + break; + + if (kstat_read(kc, ksp, NULL) == -1) { + cpu_info->cpu_times.user = 0; + cpu_info->cpu_times.nice = 0; + cpu_info->cpu_times.sys = 0; + cpu_info->cpu_times.idle = 0; + cpu_info->cpu_times.irq = 0; + } else { + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.user = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.sys = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.idle = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "intr"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.irq = knp->value.ui64; + cpu_info->cpu_times.nice = 0; + } + + lookup_instance++; + cpu_info++; + } + + kstat_close(kc); + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + +/* + * Inspired By: + * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris + * http://www.pauliesworld.org/project/getmac.c + */ +static int uv__set_phys_addr(uv_interface_address_t* address, + struct ifaddrs* ent) { + + struct sockaddr_dl* sa_addr; + int sockfd; + int i; + struct arpreq arpreq; + + /* This appears to only work as root */ + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + for (i = 0; i < sizeof(address->phys_addr); i++) { + if (address->phys_addr[i] != 0) + return 0; + } + memset(&arpreq, 0, sizeof(arpreq)); + if (address->address.address4.sin_family == AF_INET) { + struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa); + sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr; + } else if (address->address.address4.sin_family == AF_INET6) { + struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa); + memcpy(sin->sin6_addr.s6_addr, + address->address.address6.sin6_addr.s6_addr, + sizeof(address->address.address6.sin6_addr.s6_addr)); + } else { + return 0; + } + + sockfd = socket(AF_INET, SOCK_DGRAM, 0); + if (sockfd < 0) + return -errno; + + if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) { + uv__close(sockfd); + return -errno; + } + memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr)); + uv__close(sockfd); + return 0; +} + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { +#ifdef SUNOS_NO_IFADDRS + return -ENOSYS; +#else + uv_interface_address_t* address; + struct ifaddrs* addrs; + struct ifaddrs* ent; + int i; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == PF_PACKET)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) || + (ent->ifa_flags & IFF_LOOPBACK)); + + uv__set_phys_addr(address, ent); + address++; + } + + freeifaddrs(addrs); + + return 0; +#endif /* SUNOS_NO_IFADDRS */ +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/Utilities/cmlibuv/src/unix/tcp.c b/Utilities/cmlibuv/src/unix/tcp.c new file mode 100644 index 0000000..c423dcb --- /dev/null +++ b/Utilities/cmlibuv/src/unix/tcp.c @@ -0,0 +1,395 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <stdlib.h> +#include <unistd.h> +#include <assert.h> +#include <errno.h> + + +static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) { + int sockfd; + int err; + + if (domain == AF_UNSPEC || uv__stream_fd(handle) != -1) { + handle->flags |= flags; + return 0; + } + + err = uv__socket(domain, SOCK_STREAM, 0); + if (err < 0) + return err; + sockfd = err; + + err = uv__stream_open((uv_stream_t*) handle, sockfd, flags); + if (err) { + uv__close(sockfd); + return err; + } + + return 0; +} + + +int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { + int domain; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return -EINVAL; + + if (flags & ~0xFF) + return -EINVAL; + + uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP); + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init in uv_stream_init. + */ + + if (domain != AF_UNSPEC) { + int err = maybe_new_socket(tcp, domain, 0); + if (err) { + QUEUE_REMOVE(&tcp->handle_queue); + return err; + } + } + + return 0; +} + + +int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) { + return uv_tcp_init_ex(loop, tcp, AF_UNSPEC); +} + + +int uv__tcp_bind(uv_tcp_t* tcp, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + int on; + + /* Cannot set IPv6-only mode on non-IPv6 socket. */ + if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6) + return -EINVAL; + + err = maybe_new_socket(tcp, + addr->sa_family, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) + return err; + + on = 1; + if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) + return -errno; + +#ifdef IPV6_V6ONLY + if (addr->sa_family == AF_INET6) { + on = (flags & UV_TCP_IPV6ONLY) != 0; + if (setsockopt(tcp->io_watcher.fd, + IPPROTO_IPV6, + IPV6_V6ONLY, + &on, + sizeof on) == -1) { +#if defined(__MVS__) + if (errno == EOPNOTSUPP) + return -EINVAL; +#endif + return -errno; + } + } +#endif + + errno = 0; + if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) { + if (errno == EAFNOSUPPORT) + /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a + * socket created with AF_INET to an AF_INET6 address or vice versa. */ + return -EINVAL; + return -errno; + } + tcp->delayed_error = -errno; + + tcp->flags |= UV_HANDLE_BOUND; + if (addr->sa_family == AF_INET6) + tcp->flags |= UV_HANDLE_IPV6; + + return 0; +} + + +int uv__tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + uv_connect_cb cb) { + int err; + int r; + + assert(handle->type == UV_TCP); + + if (handle->connect_req != NULL) + return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */ + + err = maybe_new_socket(handle, + addr->sa_family, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) + return err; + + handle->delayed_error = 0; + + do { + errno = 0; + r = connect(uv__stream_fd(handle), addr, addrlen); + } while (r == -1 && errno == EINTR); + + /* We not only check the return value, but also check the errno != 0. + * Because in rare cases connect() will return -1 but the errno + * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227) + * and actually the tcp three-way handshake is completed. + */ + if (r == -1 && errno != 0) { + if (errno == EINPROGRESS) + ; /* not an error */ + else if (errno == ECONNREFUSED) + /* If we get a ECONNREFUSED wait until the next tick to report the + * error. Solaris wants to report immediately--other unixes want to + * wait. + */ + handle->delayed_error = -errno; + else + return -errno; + } + + uv__req_init(handle->loop, req, UV_CONNECT); + req->cb = cb; + req->handle = (uv_stream_t*) handle; + QUEUE_INIT(&req->queue); + handle->connect_req = req; + + uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); + + if (handle->delayed_error) + uv__io_feed(handle->loop, &handle->io_watcher); + + return 0; +} + + +int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) { + int err; + + err = uv__nonblock(sock, 1); + if (err) + return err; + + return uv__stream_open((uv_stream_t*)handle, + sock, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); +} + + +int uv_tcp_getsockname(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->delayed_error) + return handle->delayed_error; + + if (uv__stream_fd(handle) < 0) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getsockname(uv__stream_fd(handle), name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv_tcp_getpeername(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->delayed_error) + return handle->delayed_error; + + if (uv__stream_fd(handle) < 0) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getpeername(uv__stream_fd(handle), name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { + static int single_accept = -1; + int err; + + if (tcp->delayed_error) + return tcp->delayed_error; + + if (single_accept == -1) { + const char* val = getenv("UV_TCP_SINGLE_ACCEPT"); + single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */ + } + + if (single_accept) + tcp->flags |= UV_TCP_SINGLE_ACCEPT; + + err = maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE); + if (err) + return err; + +#ifdef __MVS__ + /* on zOS the listen call does not bind automatically + if the socket is unbound. Hence the manual binding to + an arbitrary port is required to be done manually + */ + + if (!(tcp->flags & UV_HANDLE_BOUND)) { + struct sockaddr_storage saddr; + socklen_t slen = sizeof(saddr); + memset(&saddr, 0, sizeof(saddr)); + + if (getsockname(tcp->io_watcher.fd, (struct sockaddr*) &saddr, &slen)) + return -errno; + + if (bind(tcp->io_watcher.fd, (struct sockaddr*) &saddr, slen)) + return -errno; + + tcp->flags |= UV_HANDLE_BOUND; + } +#endif + + if (listen(tcp->io_watcher.fd, backlog)) + return -errno; + + tcp->connection_cb = cb; + tcp->flags |= UV_HANDLE_BOUND; + + /* Start listening for connections. */ + tcp->io_watcher.cb = uv__server_io; + uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN); + + return 0; +} + + +int uv__tcp_nodelay(int fd, int on) { + if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on))) + return -errno; + return 0; +} + + +int uv__tcp_keepalive(int fd, int on, unsigned int delay) { + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on))) + return -errno; + +#ifdef TCP_KEEPIDLE + if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay))) + return -errno; +#endif + + /* Solaris/SmartOS, if you don't support keep-alive, + * then don't advertise it in your system headers... + */ + /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */ +#if defined(TCP_KEEPALIVE) && !defined(__sun) + if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay))) + return -errno; +#endif + + return 0; +} + + +int uv_tcp_nodelay(uv_tcp_t* handle, int on) { + int err; + + if (uv__stream_fd(handle) != -1) { + err = uv__tcp_nodelay(uv__stream_fd(handle), on); + if (err) + return err; + } + + if (on) + handle->flags |= UV_TCP_NODELAY; + else + handle->flags &= ~UV_TCP_NODELAY; + + return 0; +} + + +int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) { + int err; + + if (uv__stream_fd(handle) != -1) { + err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay); + if (err) + return err; + } + + if (on) + handle->flags |= UV_TCP_KEEPALIVE; + else + handle->flags &= ~UV_TCP_KEEPALIVE; + + /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge + * uv_tcp_t with an int that's almost never used... + */ + + return 0; +} + + +int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { + if (enable) + handle->flags &= ~UV_TCP_SINGLE_ACCEPT; + else + handle->flags |= UV_TCP_SINGLE_ACCEPT; + return 0; +} + + +void uv__tcp_close(uv_tcp_t* handle) { + uv__stream_close((uv_stream_t*)handle); +} diff --git a/Utilities/cmlibuv/src/unix/thread.c b/Utilities/cmlibuv/src/unix/thread.c new file mode 100644 index 0000000..52989f7 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/thread.c @@ -0,0 +1,605 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <pthread.h> +#include <assert.h> +#include <errno.h> + +#include <sys/time.h> +#include <sys/resource.h> /* getrlimit() */ +#include <unistd.h> /* getpagesize() */ + +#include <limits.h> + +#ifdef __MVS__ +#include <sys/ipc.h> +#include <sys/sem.h> +#endif + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + +struct thread_ctx { + void (*entry)(void* arg); + void* arg; +}; + + +static void* uv__thread_start(void *arg) +{ + struct thread_ctx *ctx_p; + struct thread_ctx ctx; + + ctx_p = arg; + ctx = *ctx_p; + uv__free(ctx_p); + ctx.entry(ctx.arg); + + return 0; +} + + +int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { + struct thread_ctx* ctx; + int err; + pthread_attr_t* attr; +#if defined(__APPLE__) + pthread_attr_t attr_storage; + struct rlimit lim; +#endif + + ctx = uv__malloc(sizeof(*ctx)); + if (ctx == NULL) + return UV_ENOMEM; + + ctx->entry = entry; + ctx->arg = arg; + + /* On OSX threads other than the main thread are created with a reduced stack + * size by default, adjust it to RLIMIT_STACK. + */ +#if defined(__APPLE__) + if (getrlimit(RLIMIT_STACK, &lim)) + abort(); + + attr = &attr_storage; + if (pthread_attr_init(attr)) + abort(); + + if (lim.rlim_cur != RLIM_INFINITY) { + /* pthread_attr_setstacksize() expects page-aligned values. */ + lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize(); + + if (lim.rlim_cur >= PTHREAD_STACK_MIN) + if (pthread_attr_setstacksize(attr, lim.rlim_cur)) + abort(); + } +#else + attr = NULL; +#endif + + err = pthread_create(tid, attr, uv__thread_start, ctx); + + if (attr != NULL) + pthread_attr_destroy(attr); + + if (err) + uv__free(ctx); + + return -err; +} + + +uv_thread_t uv_thread_self(void) { + return pthread_self(); +} + +int uv_thread_join(uv_thread_t *tid) { + return -pthread_join(*tid, NULL); +} + + +int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { + return pthread_equal(*t1, *t2); +} + + +int uv_mutex_init(uv_mutex_t* mutex) { +#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) + return -pthread_mutex_init(mutex, NULL); +#else + pthread_mutexattr_t attr; + int err; + + if (pthread_mutexattr_init(&attr)) + abort(); + + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) + abort(); + + err = pthread_mutex_init(mutex, &attr); + + if (pthread_mutexattr_destroy(&attr)) + abort(); + + return -err; +#endif +} + + +void uv_mutex_destroy(uv_mutex_t* mutex) { + if (pthread_mutex_destroy(mutex)) + abort(); +} + + +void uv_mutex_lock(uv_mutex_t* mutex) { + if (pthread_mutex_lock(mutex)) + abort(); +} + + +int uv_mutex_trylock(uv_mutex_t* mutex) { + int err; + + err = pthread_mutex_trylock(mutex); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_mutex_unlock(uv_mutex_t* mutex) { + if (pthread_mutex_unlock(mutex)) + abort(); +} + + +int uv_rwlock_init(uv_rwlock_t* rwlock) { + return -pthread_rwlock_init(rwlock, NULL); +} + + +void uv_rwlock_destroy(uv_rwlock_t* rwlock) { + if (pthread_rwlock_destroy(rwlock)) + abort(); +} + + +void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_rdlock(rwlock)) + abort(); +} + + +int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) { + int err; + + err = pthread_rwlock_tryrdlock(rwlock); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_unlock(rwlock)) + abort(); +} + + +void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_wrlock(rwlock)) + abort(); +} + + +int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { + int err; + + err = pthread_rwlock_trywrlock(rwlock); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_unlock(rwlock)) + abort(); +} + + +void uv_once(uv_once_t* guard, void (*callback)(void)) { + if (pthread_once(guard, callback)) + abort(); +} + +#if defined(__APPLE__) && defined(__MACH__) + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + kern_return_t err; + + err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value); + if (err == KERN_SUCCESS) + return 0; + if (err == KERN_INVALID_ARGUMENT) + return -EINVAL; + if (err == KERN_RESOURCE_SHORTAGE) + return -ENOMEM; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + + +void uv_sem_destroy(uv_sem_t* sem) { + if (semaphore_destroy(mach_task_self(), *sem)) + abort(); +} + + +void uv_sem_post(uv_sem_t* sem) { + if (semaphore_signal(*sem)) + abort(); +} + + +void uv_sem_wait(uv_sem_t* sem) { + int r; + + do + r = semaphore_wait(*sem); + while (r == KERN_ABORTED); + + if (r != KERN_SUCCESS) + abort(); +} + + +int uv_sem_trywait(uv_sem_t* sem) { + mach_timespec_t interval; + kern_return_t err; + + interval.tv_sec = 0; + interval.tv_nsec = 0; + + err = semaphore_timedwait(*sem, interval); + if (err == KERN_SUCCESS) + return 0; + if (err == KERN_OPERATION_TIMED_OUT) + return -EAGAIN; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + +#elif defined(__MVS__) + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + uv_sem_t semid; + struct sembuf buf; + int err; + + buf.sem_num = 0; + buf.sem_op = value; + buf.sem_flg = 0; + + semid = semget(IPC_PRIVATE, 1, S_IRUSR | S_IWUSR); + if (semid == -1) + return -errno; + + if (-1 == semop(semid, &buf, 1)) { + err = errno; + if (-1 == semctl(*sem, 0, IPC_RMID)) + abort(); + return -err; + } + + *sem = semid; + return 0; +} + +void uv_sem_destroy(uv_sem_t* sem) { + if (-1 == semctl(*sem, 0, IPC_RMID)) + abort(); +} + +void uv_sem_post(uv_sem_t* sem) { + struct sembuf buf; + + buf.sem_num = 0; + buf.sem_op = 1; + buf.sem_flg = 0; + + if (-1 == semop(*sem, &buf, 1)) + abort(); +} + +void uv_sem_wait(uv_sem_t* sem) { + struct sembuf buf; + int op_status; + + buf.sem_num = 0; + buf.sem_op = -1; + buf.sem_flg = 0; + + do + op_status = semop(*sem, &buf, 1); + while (op_status == -1 && errno == EINTR); + + if (op_status) + abort(); +} + +int uv_sem_trywait(uv_sem_t* sem) { + struct sembuf buf; + int op_status; + + buf.sem_num = 0; + buf.sem_op = -1; + buf.sem_flg = IPC_NOWAIT; + + do + op_status = semop(*sem, &buf, 1); + while (op_status == -1 && errno == EINTR); + + if (op_status) { + if (errno == EAGAIN) + return -EAGAIN; + abort(); + } + + return 0; +} + +#else /* !(defined(__APPLE__) && defined(__MACH__)) */ + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + if (sem_init(sem, 0, value)) + return -errno; + return 0; +} + + +void uv_sem_destroy(uv_sem_t* sem) { + if (sem_destroy(sem)) + abort(); +} + + +void uv_sem_post(uv_sem_t* sem) { + if (sem_post(sem)) + abort(); +} + + +void uv_sem_wait(uv_sem_t* sem) { + int r; + + do + r = sem_wait(sem); + while (r == -1 && errno == EINTR); + + if (r) + abort(); +} + + +int uv_sem_trywait(uv_sem_t* sem) { + int r; + + do + r = sem_trywait(sem); + while (r == -1 && errno == EINTR); + + if (r) { + if (errno == EAGAIN) + return -EAGAIN; + abort(); + } + + return 0; +} + +#endif /* defined(__APPLE__) && defined(__MACH__) */ + + +#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__) + +int uv_cond_init(uv_cond_t* cond) { + return -pthread_cond_init(cond, NULL); +} + +#else /* !(defined(__APPLE__) && defined(__MACH__)) */ + +int uv_cond_init(uv_cond_t* cond) { + pthread_condattr_t attr; + int err; + + err = pthread_condattr_init(&attr); + if (err) + return -err; + +#if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)) + err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + if (err) + goto error2; +#endif + + err = pthread_cond_init(cond, &attr); + if (err) + goto error2; + + err = pthread_condattr_destroy(&attr); + if (err) + goto error; + + return 0; + +error: + pthread_cond_destroy(cond); +error2: + pthread_condattr_destroy(&attr); + return -err; +} + +#endif /* defined(__APPLE__) && defined(__MACH__) */ + +void uv_cond_destroy(uv_cond_t* cond) { +#if defined(__APPLE__) && defined(__MACH__) + /* It has been reported that destroying condition variables that have been + * signalled but not waited on can sometimes result in application crashes. + * See https://codereview.chromium.org/1323293005. + */ + pthread_mutex_t mutex; + struct timespec ts; + int err; + + if (pthread_mutex_init(&mutex, NULL)) + abort(); + + if (pthread_mutex_lock(&mutex)) + abort(); + + ts.tv_sec = 0; + ts.tv_nsec = 1; + + err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts); + if (err != 0 && err != ETIMEDOUT) + abort(); + + if (pthread_mutex_unlock(&mutex)) + abort(); + + if (pthread_mutex_destroy(&mutex)) + abort(); +#endif /* defined(__APPLE__) && defined(__MACH__) */ + + if (pthread_cond_destroy(cond)) + abort(); +} + +void uv_cond_signal(uv_cond_t* cond) { + if (pthread_cond_signal(cond)) + abort(); +} + +void uv_cond_broadcast(uv_cond_t* cond) { + if (pthread_cond_broadcast(cond)) + abort(); +} + +void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { + if (pthread_cond_wait(cond, mutex)) + abort(); +} + + +int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { + int r; + struct timespec ts; + +#if defined(__APPLE__) && defined(__MACH__) + ts.tv_sec = timeout / NANOSEC; + ts.tv_nsec = timeout % NANOSEC; + r = pthread_cond_timedwait_relative_np(cond, mutex, &ts); +#else + timeout += uv__hrtime(UV_CLOCK_PRECISE); + ts.tv_sec = timeout / NANOSEC; + ts.tv_nsec = timeout % NANOSEC; +#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC) + /* + * The bionic pthread implementation doesn't support CLOCK_MONOTONIC, + * but has this alternative function instead. + */ + r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts); +#else + r = pthread_cond_timedwait(cond, mutex, &ts); +#endif /* __ANDROID__ */ +#endif + + + if (r == 0) + return 0; + + if (r == ETIMEDOUT) + return -ETIMEDOUT; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + + +int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { + return -pthread_barrier_init(barrier, NULL, count); +} + + +void uv_barrier_destroy(uv_barrier_t* barrier) { + if (pthread_barrier_destroy(barrier)) + abort(); +} + + +int uv_barrier_wait(uv_barrier_t* barrier) { + int r = pthread_barrier_wait(barrier); + if (r && r != PTHREAD_BARRIER_SERIAL_THREAD) + abort(); + return r == PTHREAD_BARRIER_SERIAL_THREAD; +} + + +int uv_key_create(uv_key_t* key) { + return -pthread_key_create(key, NULL); +} + + +void uv_key_delete(uv_key_t* key) { + if (pthread_key_delete(*key)) + abort(); +} + + +void* uv_key_get(uv_key_t* key) { + return pthread_getspecific(*key); +} + + +void uv_key_set(uv_key_t* key, void* value) { + if (pthread_setspecific(*key, value)) + abort(); +} diff --git a/Utilities/cmlibuv/src/unix/timer.c b/Utilities/cmlibuv/src/unix/timer.c new file mode 100644 index 0000000..f46bdf4 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/timer.c @@ -0,0 +1,172 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" +#include "heap-inl.h" + +#include <assert.h> +#include <limits.h> + + +static int timer_less_than(const struct heap_node* ha, + const struct heap_node* hb) { + const uv_timer_t* a; + const uv_timer_t* b; + + a = container_of(ha, uv_timer_t, heap_node); + b = container_of(hb, uv_timer_t, heap_node); + + if (a->timeout < b->timeout) + return 1; + if (b->timeout < a->timeout) + return 0; + + /* Compare start_id when both have the same timeout. start_id is + * allocated with loop->timer_counter in uv_timer_start(). + */ + if (a->start_id < b->start_id) + return 1; + if (b->start_id < a->start_id) + return 0; + + return 0; +} + + +int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER); + handle->timer_cb = NULL; + handle->repeat = 0; + return 0; +} + + +int uv_timer_start(uv_timer_t* handle, + uv_timer_cb cb, + uint64_t timeout, + uint64_t repeat) { + uint64_t clamped_timeout; + + if (cb == NULL) + return -EINVAL; + + if (uv__is_active(handle)) + uv_timer_stop(handle); + + clamped_timeout = handle->loop->time + timeout; + if (clamped_timeout < timeout) + clamped_timeout = (uint64_t) -1; + + handle->timer_cb = cb; + handle->timeout = clamped_timeout; + handle->repeat = repeat; + /* start_id is the second index to be compared in uv__timer_cmp() */ + handle->start_id = handle->loop->timer_counter++; + + heap_insert((struct heap*) &handle->loop->timer_heap, + (struct heap_node*) &handle->heap_node, + timer_less_than); + uv__handle_start(handle); + + return 0; +} + + +int uv_timer_stop(uv_timer_t* handle) { + if (!uv__is_active(handle)) + return 0; + + heap_remove((struct heap*) &handle->loop->timer_heap, + (struct heap_node*) &handle->heap_node, + timer_less_than); + uv__handle_stop(handle); + + return 0; +} + + +int uv_timer_again(uv_timer_t* handle) { + if (handle->timer_cb == NULL) + return -EINVAL; + + if (handle->repeat) { + uv_timer_stop(handle); + uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat); + } + + return 0; +} + + +void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) { + handle->repeat = repeat; +} + + +uint64_t uv_timer_get_repeat(const uv_timer_t* handle) { + return handle->repeat; +} + + +int uv__next_timeout(const uv_loop_t* loop) { + const struct heap_node* heap_node; + const uv_timer_t* handle; + uint64_t diff; + + heap_node = heap_min((const struct heap*) &loop->timer_heap); + if (heap_node == NULL) + return -1; /* block indefinitely */ + + handle = container_of(heap_node, uv_timer_t, heap_node); + if (handle->timeout <= loop->time) + return 0; + + diff = handle->timeout - loop->time; + if (diff > INT_MAX) + diff = INT_MAX; + + return diff; +} + + +void uv__run_timers(uv_loop_t* loop) { + struct heap_node* heap_node; + uv_timer_t* handle; + + for (;;) { + heap_node = heap_min((struct heap*) &loop->timer_heap); + if (heap_node == NULL) + break; + + handle = container_of(heap_node, uv_timer_t, heap_node); + if (handle->timeout > loop->time) + break; + + uv_timer_stop(handle); + uv_timer_again(handle); + handle->timer_cb(handle); + } +} + + +void uv__timer_close(uv_timer_t* handle) { + uv_timer_stop(handle); +} diff --git a/Utilities/cmlibuv/src/unix/tty.c b/Utilities/cmlibuv/src/unix/tty.c new file mode 100644 index 0000000..b2d37f4 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/tty.c @@ -0,0 +1,336 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" +#include "spinlock.h" + +#include <stdlib.h> +#include <assert.h> +#include <unistd.h> +#include <termios.h> +#include <errno.h> +#include <sys/ioctl.h> + +#if defined(__MVS__) && !defined(IMAXBEL) +#define IMAXBEL 0 +#endif + +static int orig_termios_fd = -1; +static struct termios orig_termios; +static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER; + +static int uv__tty_is_slave(const int fd) { + int result; +#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + int dummy; + + result = ioctl(fd, TIOCGPTN, &dummy) != 0; +#elif defined(__APPLE__) + char dummy[256]; + + result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0; +#else + /* Fallback to ptsname + */ + result = ptsname(fd) == NULL; +#endif + return result; +} + +int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) { + uv_handle_type type; + int flags; + int newfd; + int r; + int saved_flags; + char path[256]; + + /* File descriptors that refer to files cannot be monitored with epoll. + * That restriction also applies to character devices like /dev/random + * (but obviously not /dev/tty.) + */ + type = uv_guess_handle(fd); + if (type == UV_FILE || type == UV_UNKNOWN_HANDLE) + return -EINVAL; + + flags = 0; + newfd = -1; + + /* Reopen the file descriptor when it refers to a tty. This lets us put the + * tty in non-blocking mode without affecting other processes that share it + * with us. + * + * Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also + * affects fd 1 of `cat` because both file descriptors refer to the same + * struct file in the kernel. When we reopen our fd 0, it points to a + * different struct file, hence changing its properties doesn't affect + * other processes. + */ + if (type == UV_TTY) { + /* Reopening a pty in master mode won't work either because the reopened + * pty will be in slave mode (*BSD) or reopening will allocate a new + * master/slave pair (Linux). Therefore check if the fd points to a + * slave device. + */ + if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0) + r = uv__open_cloexec(path, O_RDWR); + else + r = -1; + + if (r < 0) { + /* fallback to using blocking writes */ + if (!readable) + flags |= UV_STREAM_BLOCKING; + goto skip; + } + + newfd = r; + + r = uv__dup2_cloexec(newfd, fd); + if (r < 0 && r != -EINVAL) { + /* EINVAL means newfd == fd which could conceivably happen if another + * thread called close(fd) between our calls to isatty() and open(). + * That's a rather unlikely event but let's handle it anyway. + */ + uv__close(newfd); + return r; + } + + fd = newfd; + } + +#if defined(__APPLE__) + /* Save the fd flags in case we need to restore them due to an error. */ + do + saved_flags = fcntl(fd, F_GETFL); + while (saved_flags == -1 && errno == EINTR); + + if (saved_flags == -1) { + if (newfd != -1) + uv__close(newfd); + return -errno; + } +#endif + + /* Pacify the compiler. */ + (void) &saved_flags; + +skip: + uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY); + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init in uv_stream_init. + */ + + if (!(flags & UV_STREAM_BLOCKING)) + uv__nonblock(fd, 1); + +#if defined(__APPLE__) + r = uv__stream_try_select((uv_stream_t*) tty, &fd); + if (r) { + int rc = r; + if (newfd != -1) + uv__close(newfd); + QUEUE_REMOVE(&tty->handle_queue); + do + r = fcntl(fd, F_SETFL, saved_flags); + while (r == -1 && errno == EINTR); + return rc; + } +#endif + + if (readable) + flags |= UV_STREAM_READABLE; + else + flags |= UV_STREAM_WRITABLE; + + uv__stream_open((uv_stream_t*) tty, fd, flags); + tty->mode = UV_TTY_MODE_NORMAL; + + return 0; +} + +static void uv__tty_make_raw(struct termios* tio) { + assert(tio != NULL); + +#if defined __sun || defined __MVS__ + /* + * This implementation of cfmakeraw for Solaris and derivatives is taken from + * http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html. + */ + tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | + IGNCR | ICRNL | IXON); + tio->c_oflag &= ~OPOST; + tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + tio->c_cflag &= ~(CSIZE | PARENB); + tio->c_cflag |= CS8; +#else + cfmakeraw(tio); +#endif /* #ifdef __sun */ +} + +int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { + struct termios tmp; + int fd; + + if (tty->mode == (int) mode) + return 0; + + fd = uv__stream_fd(tty); + if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) { + if (tcgetattr(fd, &tty->orig_termios)) + return -errno; + + /* This is used for uv_tty_reset_mode() */ + uv_spinlock_lock(&termios_spinlock); + if (orig_termios_fd == -1) { + orig_termios = tty->orig_termios; + orig_termios_fd = fd; + } + uv_spinlock_unlock(&termios_spinlock); + } + + tmp = tty->orig_termios; + switch (mode) { + case UV_TTY_MODE_NORMAL: + break; + case UV_TTY_MODE_RAW: + tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); + tmp.c_oflag |= (ONLCR); + tmp.c_cflag |= (CS8); + tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); + tmp.c_cc[VMIN] = 1; + tmp.c_cc[VTIME] = 0; + break; + case UV_TTY_MODE_IO: + uv__tty_make_raw(&tmp); + break; + } + + /* Apply changes after draining */ + if (tcsetattr(fd, TCSADRAIN, &tmp)) + return -errno; + + tty->mode = mode; + return 0; +} + + +int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { + struct winsize ws; + int err; + + do + err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws); + while (err == -1 && errno == EINTR); + + if (err == -1) + return -errno; + + *width = ws.ws_col; + *height = ws.ws_row; + + return 0; +} + + +uv_handle_type uv_guess_handle(uv_file file) { + struct sockaddr sa; + struct stat s; + socklen_t len; + int type; + + if (file < 0) + return UV_UNKNOWN_HANDLE; + + if (isatty(file)) + return UV_TTY; + + if (fstat(file, &s)) + return UV_UNKNOWN_HANDLE; + + if (S_ISREG(s.st_mode)) + return UV_FILE; + + if (S_ISCHR(s.st_mode)) + return UV_FILE; /* XXX UV_NAMED_PIPE? */ + + if (S_ISFIFO(s.st_mode)) + return UV_NAMED_PIPE; + + if (!S_ISSOCK(s.st_mode)) + return UV_UNKNOWN_HANDLE; + + len = sizeof(type); + if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len)) + return UV_UNKNOWN_HANDLE; + + len = sizeof(sa); + if (getsockname(file, &sa, &len)) + return UV_UNKNOWN_HANDLE; + + if (type == SOCK_DGRAM) + if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6) + return UV_UDP; + + if (type == SOCK_STREAM) { +#if defined(_AIX) || defined(__DragonFly__) + /* on AIX/DragonFly the getsockname call returns an empty sa structure + * for sockets of type AF_UNIX. For all other types it will + * return a properly filled in structure. + */ + if (len == 0) + return UV_NAMED_PIPE; +#endif /* defined(_AIX) || defined(__DragonFly__) */ + + if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6) + return UV_TCP; + if (sa.sa_family == AF_UNIX) + return UV_NAMED_PIPE; + } + + return UV_UNKNOWN_HANDLE; +} + + +/* This function is async signal-safe, meaning that it's safe to call from + * inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s + * critical section when the signal was raised. + */ +int uv_tty_reset_mode(void) { + int saved_errno; + int err; + + saved_errno = errno; + if (!uv_spinlock_trylock(&termios_spinlock)) + return -EBUSY; /* In uv_tty_set_mode(). */ + + err = 0; + if (orig_termios_fd != -1) + if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios)) + err = -errno; + + uv_spinlock_unlock(&termios_spinlock); + errno = saved_errno; + + return err; +} diff --git a/Utilities/cmlibuv/src/unix/udp.c b/Utilities/cmlibuv/src/unix/udp.c new file mode 100644 index 0000000..1cd4925 --- /dev/null +++ b/Utilities/cmlibuv/src/unix/udp.c @@ -0,0 +1,895 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include <assert.h> +#include <string.h> +#include <errno.h> +#include <stdlib.h> +#include <unistd.h> +#if defined(__MVS__) +#include <xti.h> +#endif + +#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP) +# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#endif + +#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP) +# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP +#endif + + +static void uv__udp_run_completed(uv_udp_t* handle); +static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); +static void uv__udp_recvmsg(uv_udp_t* handle); +static void uv__udp_sendmsg(uv_udp_t* handle); +static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, + int domain, + unsigned int flags); + + +void uv__udp_close(uv_udp_t* handle) { + uv__io_close(handle->loop, &handle->io_watcher); + uv__handle_stop(handle); + + if (handle->io_watcher.fd != -1) { + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + } +} + + +void uv__udp_finish_close(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + + assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); + assert(handle->io_watcher.fd == -1); + + while (!QUEUE_EMPTY(&handle->write_queue)) { + q = QUEUE_HEAD(&handle->write_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + req->status = -ECANCELED; + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + } + + uv__udp_run_completed(handle); + + assert(handle->send_queue_size == 0); + assert(handle->send_queue_count == 0); + + /* Now tear down the handle. */ + handle->recv_cb = NULL; + handle->alloc_cb = NULL; + /* but _do not_ touch close_cb */ +} + + +static void uv__udp_run_completed(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + + assert(!(handle->flags & UV_UDP_PROCESSING)); + handle->flags |= UV_UDP_PROCESSING; + + while (!QUEUE_EMPTY(&handle->write_completed_queue)) { + q = QUEUE_HEAD(&handle->write_completed_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + uv__req_unregister(handle->loop, req); + + handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count--; + + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + + if (req->send_cb == NULL) + continue; + + /* req->status >= 0 == bytes written + * req->status < 0 == errno + */ + if (req->status >= 0) + req->send_cb(req, 0); + else + req->send_cb(req, req->status); + } + + if (QUEUE_EMPTY(&handle->write_queue)) { + /* Pending queue and completion queue empty, stop watcher. */ + uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT); + if (!uv__io_active(&handle->io_watcher, POLLIN)) + uv__handle_stop(handle); + } + + handle->flags &= ~UV_UDP_PROCESSING; +} + + +static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { + uv_udp_t* handle; + + handle = container_of(w, uv_udp_t, io_watcher); + assert(handle->type == UV_UDP); + + if (revents & POLLIN) + uv__udp_recvmsg(handle); + + if (revents & POLLOUT) { + uv__udp_sendmsg(handle); + uv__udp_run_completed(handle); + } +} + + +static void uv__udp_recvmsg(uv_udp_t* handle) { + struct sockaddr_storage peer; + struct msghdr h; + ssize_t nread; + uv_buf_t buf; + int flags; + int count; + + assert(handle->recv_cb != NULL); + assert(handle->alloc_cb != NULL); + + /* Prevent loop starvation when the data comes in as fast as (or faster than) + * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O. + */ + count = 32; + + memset(&h, 0, sizeof(h)); + h.msg_name = &peer; + + do { + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 64 * 1024, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); + return; + } + assert(buf.base != NULL); + + h.msg_namelen = sizeof(peer); + h.msg_iov = (void*) &buf; + h.msg_iovlen = 1; + + do { + nread = recvmsg(handle->io_watcher.fd, &h, 0); + } + while (nread == -1 && errno == EINTR); + + if (nread == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + handle->recv_cb(handle, 0, &buf, NULL, 0); + else + handle->recv_cb(handle, -errno, &buf, NULL, 0); + } + else { + const struct sockaddr *addr; + if (h.msg_namelen == 0) + addr = NULL; + else + addr = (const struct sockaddr*) &peer; + + flags = 0; + if (h.msg_flags & MSG_TRUNC) + flags |= UV_UDP_PARTIAL; + + handle->recv_cb(handle, nread, &buf, addr, flags); + } + } + /* recv_cb callback may decide to pause or close the handle */ + while (nread != -1 + && count-- > 0 + && handle->io_watcher.fd != -1 + && handle->recv_cb != NULL); +} + + +static void uv__udp_sendmsg(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + struct msghdr h; + ssize_t size; + + while (!QUEUE_EMPTY(&handle->write_queue)) { + q = QUEUE_HEAD(&handle->write_queue); + assert(q != NULL); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + assert(req != NULL); + + memset(&h, 0, sizeof h); + h.msg_name = &req->addr; + h.msg_namelen = (req->addr.ss_family == AF_INET6 ? + sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); + h.msg_iov = (struct iovec*) req->bufs; + h.msg_iovlen = req->nbufs; + + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); + + if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; + + req->status = (size == -1 ? -errno : size); + + /* Sending a datagram is an atomic operation: either all data + * is written or nothing is (and EMSGSIZE is raised). That is + * why we don't handle partial writes. Just pop the request + * off the write queue and onto the completed queue, done. + */ + QUEUE_REMOVE(&req->queue); + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + uv__io_feed(handle->loop, &handle->io_watcher); + } +} + + +/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional + * refinements for programs that use multicast. + * + * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that + * are different from the BSDs: it _shares_ the port rather than steal it + * from the current listener. While useful, it's not something we can emulate + * on other platforms so we don't enable it. + */ +static int uv__set_reuse(int fd) { + int yes; + +#if defined(SO_REUSEPORT) && !defined(__linux__) + yes = 1; + if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) + return -errno; +#else + yes = 1; + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes))) + return -errno; +#endif + + return 0; +} + + +int uv__udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + int yes; + int fd; + + /* Check for bad flags. */ + if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR)) + return -EINVAL; + + /* Cannot set IPv6-only mode on non-IPv6 socket. */ + if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) + return -EINVAL; + + fd = handle->io_watcher.fd; + if (fd == -1) { + err = uv__socket(addr->sa_family, SOCK_DGRAM, 0); + if (err < 0) + return err; + fd = err; + handle->io_watcher.fd = fd; + } + + if (flags & UV_UDP_REUSEADDR) { + err = uv__set_reuse(fd); + if (err) + goto out; + } + + if (flags & UV_UDP_IPV6ONLY) { +#ifdef IPV6_V6ONLY + yes = 1; + if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) { + err = -errno; + goto out; + } +#else + err = -ENOTSUP; + goto out; +#endif + } + + if (bind(fd, addr, addrlen)) { + err = -errno; + if (errno == EAFNOSUPPORT) + /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a + * socket created with AF_INET to an AF_INET6 address or vice versa. */ + err = -EINVAL; + goto out; + } + + if (addr->sa_family == AF_INET6) + handle->flags |= UV_HANDLE_IPV6; + + handle->flags |= UV_HANDLE_BOUND; + + return 0; + +out: + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + return err; +} + + +static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, + int domain, + unsigned int flags) { + unsigned char taddr[sizeof(struct sockaddr_in6)]; + socklen_t addrlen; + + if (handle->io_watcher.fd != -1) + return 0; + + switch (domain) { + case AF_INET: + { + struct sockaddr_in* addr = (void*)&taddr; + memset(addr, 0, sizeof *addr); + addr->sin_family = AF_INET; + addr->sin_addr.s_addr = INADDR_ANY; + addrlen = sizeof *addr; + break; + } + case AF_INET6: + { + struct sockaddr_in6* addr = (void*)&taddr; + memset(addr, 0, sizeof *addr); + addr->sin6_family = AF_INET6; + addr->sin6_addr = in6addr_any; + addrlen = sizeof *addr; + break; + } + default: + assert(0 && "unsupported address family"); + abort(); + } + + return uv__udp_bind(handle, (const struct sockaddr*) &taddr, addrlen, flags); +} + + +int uv__udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen, + uv_udp_send_cb send_cb) { + int err; + int empty_queue; + + assert(nbufs > 0); + + err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); + if (err) + return err; + + /* It's legal for send_queue_count > 0 even when the write_queue is empty; + * it means there are error-state requests in the write_completed_queue that + * will touch up send_queue_size/count later. + */ + empty_queue = (handle->send_queue_count == 0); + + uv__req_init(handle->loop, req, UV_UDP_SEND); + assert(addrlen <= sizeof(req->addr)); + memcpy(&req->addr, addr, addrlen); + req->send_cb = send_cb; + req->handle = handle; + req->nbufs = nbufs; + + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); + + if (req->bufs == NULL) { + uv__req_unregister(handle->loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); + handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count++; + QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); + uv__handle_start(handle); + + if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { + uv__udp_sendmsg(handle); + } else { + uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); + } + + return 0; +} + + +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen) { + int err; + struct msghdr h; + ssize_t size; + + assert(nbufs > 0); + + /* already sending a message */ + if (handle->send_queue_count != 0) + return -EAGAIN; + + err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); + if (err) + return err; + + memset(&h, 0, sizeof h); + h.msg_name = (struct sockaddr*) addr; + h.msg_namelen = addrlen; + h.msg_iov = (struct iovec*) bufs; + h.msg_iovlen = nbufs; + + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); + + if (size == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return -EAGAIN; + else + return -errno; + } + + return size; +} + + +static int uv__udp_set_membership4(uv_udp_t* handle, + const struct sockaddr_in* multicast_addr, + const char* interface_addr, + uv_membership membership) { + struct ip_mreq mreq; + int optname; + int err; + + memset(&mreq, 0, sizeof mreq); + + if (interface_addr) { + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; + } else { + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + } + + mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IP_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IP_DROP_MEMBERSHIP; + break; + default: + return -EINVAL; + } + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + optname, + &mreq, + sizeof(mreq))) { +#if defined(__MVS__) + if (errno == ENXIO) + return -ENODEV; +#endif + return -errno; + } + + return 0; +} + + +static int uv__udp_set_membership6(uv_udp_t* handle, + const struct sockaddr_in6* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int optname; + struct ipv6_mreq mreq; + struct sockaddr_in6 addr6; + + memset(&mreq, 0, sizeof mreq); + + if (interface_addr) { + if (uv_ip6_addr(interface_addr, 0, &addr6)) + return -EINVAL; + mreq.ipv6mr_interface = addr6.sin6_scope_id; + } else { + mreq.ipv6mr_interface = 0; + } + + mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IPV6_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IPV6_DROP_MEMBERSHIP; + break; + default: + return -EINVAL; + } + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + optname, + &mreq, + sizeof(mreq))) { +#if defined(__MVS__) + if (errno == ENXIO) + return -ENODEV; +#endif + return -errno; + } + + return 0; +} + + +int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) { + int domain; + int err; + int fd; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return -EINVAL; + + if (flags & ~0xFF) + return -EINVAL; + + if (domain != AF_UNSPEC) { + err = uv__socket(domain, SOCK_DGRAM, 0); + if (err < 0) + return err; + fd = err; + } else { + fd = -1; + } + + uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP); + handle->alloc_cb = NULL; + handle->recv_cb = NULL; + handle->send_queue_size = 0; + handle->send_queue_count = 0; + uv__io_init(&handle->io_watcher, uv__udp_io, fd); + QUEUE_INIT(&handle->write_queue); + QUEUE_INIT(&handle->write_completed_queue); + return 0; +} + + +int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { + return uv_udp_init_ex(loop, handle, AF_UNSPEC); +} + + +int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { + int err; + + /* Check for already active socket. */ + if (handle->io_watcher.fd != -1) + return -EBUSY; + + err = uv__nonblock(sock, 1); + if (err) + return err; + + err = uv__set_reuse(sock); + if (err) + return err; + + handle->io_watcher.fd = sock; + return 0; +} + + +int uv_udp_set_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int err; + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + + if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) { + err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); + if (err) + return err; + return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); + } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) { + err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); + if (err) + return err; + return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); + } else { + return -EINVAL; + } +} + +static int uv__setsockopt(uv_udp_t* handle, + int option4, + int option6, + const void* val, + size_t size) { + int r; + + if (handle->flags & UV_HANDLE_IPV6) + r = setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + option6, + val, + size); + else + r = setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + option4, + val, + size); + if (r) + return -errno; + + return 0; +} + +static int uv__setsockopt_maybe_char(uv_udp_t* handle, + int option4, + int option6, + int val) { +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + char arg = val; +#elif defined(__OpenBSD__) + unsigned char arg = val; +#else + int arg = val; +#endif + + if (val < 0 || val > 255) + return -EINVAL; + + return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg)); +} + + +int uv_udp_set_broadcast(uv_udp_t* handle, int on) { + if (setsockopt(handle->io_watcher.fd, + SOL_SOCKET, + SO_BROADCAST, + &on, + sizeof(on))) { + return -errno; + } + + return 0; +} + + +int uv_udp_set_ttl(uv_udp_t* handle, int ttl) { + if (ttl < 1 || ttl > 255) + return -EINVAL; + +#if defined(__MVS__) + if (!(handle->flags & UV_HANDLE_IPV6)) + return -ENOTSUP; /* zOS does not support setting ttl for IPv4 */ +#endif + +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS, + * so hardcode the size of these options on this platform, + * and use the general uv__setsockopt_maybe_char call on other platforms. + */ +#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ + defined(__MVS__) + + return uv__setsockopt(handle, + IP_TTL, + IPV6_UNICAST_HOPS, + &ttl, + sizeof(ttl)); +#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) || + defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_TTL, + IPV6_UNICAST_HOPS, + ttl); +} + + +int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) { +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for + * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case, + * and use the general uv__setsockopt_maybe_char call otherwise. + */ +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + if (handle->flags & UV_HANDLE_IPV6) + return uv__setsockopt(handle, + IP_MULTICAST_TTL, + IPV6_MULTICAST_HOPS, + &ttl, + sizeof(ttl)); +#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_MULTICAST_TTL, + IPV6_MULTICAST_HOPS, + ttl); +} + + +int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) { +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for + * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case, + * and use the general uv__setsockopt_maybe_char call otherwise. + */ +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + if (handle->flags & UV_HANDLE_IPV6) + return uv__setsockopt(handle, + IP_MULTICAST_LOOP, + IPV6_MULTICAST_LOOP, + &on, + sizeof(on)); +#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_MULTICAST_LOOP, + IPV6_MULTICAST_LOOP, + on); +} + +int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { + struct sockaddr_storage addr_st; + struct sockaddr_in* addr4; + struct sockaddr_in6* addr6; + + addr4 = (struct sockaddr_in*) &addr_st; + addr6 = (struct sockaddr_in6*) &addr_st; + + if (!interface_addr) { + memset(&addr_st, 0, sizeof addr_st); + if (handle->flags & UV_HANDLE_IPV6) { + addr_st.ss_family = AF_INET6; + addr6->sin6_scope_id = 0; + } else { + addr_st.ss_family = AF_INET; + addr4->sin_addr.s_addr = htonl(INADDR_ANY); + } + } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) { + /* nothing, address was parsed */ + } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) { + /* nothing, address was parsed */ + } else { + return -EINVAL; + } + + if (addr_st.ss_family == AF_INET) { + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + IP_MULTICAST_IF, + (void*) &addr4->sin_addr, + sizeof(addr4->sin_addr)) == -1) { + return -errno; + } + } else if (addr_st.ss_family == AF_INET6) { + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + IPV6_MULTICAST_IF, + &addr6->sin6_scope_id, + sizeof(addr6->sin6_scope_id)) == -1) { + return -errno; + } + } else { + assert(0 && "unexpected address family"); + abort(); + } + + return 0; +} + + +int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->io_watcher.fd == -1) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getsockname(handle->io_watcher.fd, name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv__udp_recv_start(uv_udp_t* handle, + uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb) { + int err; + + if (alloc_cb == NULL || recv_cb == NULL) + return -EINVAL; + + if (uv__io_active(&handle->io_watcher, POLLIN)) + return -EALREADY; /* FIXME(bnoordhuis) Should be -EBUSY. */ + + err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0); + if (err) + return err; + + handle->alloc_cb = alloc_cb; + handle->recv_cb = recv_cb; + + uv__io_start(handle->loop, &handle->io_watcher, POLLIN); + uv__handle_start(handle); + + return 0; +} + + +int uv__udp_recv_stop(uv_udp_t* handle) { + uv__io_stop(handle->loop, &handle->io_watcher, POLLIN); + + if (!uv__io_active(&handle->io_watcher, POLLOUT)) + uv__handle_stop(handle); + + handle->alloc_cb = NULL; + handle->recv_cb = NULL; + + return 0; +} |