summaryrefslogtreecommitdiffstats
path: root/Utilities/cmlibuv/src/unix/fs.c
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmlibuv/src/unix/fs.c')
-rw-r--r--Utilities/cmlibuv/src/unix/fs.c94
1 files changed, 68 insertions, 26 deletions
diff --git a/Utilities/cmlibuv/src/unix/fs.c b/Utilities/cmlibuv/src/unix/fs.c
index f37749c..6d57cee 100644
--- a/Utilities/cmlibuv/src/unix/fs.c
+++ b/Utilities/cmlibuv/src/unix/fs.c
@@ -79,7 +79,11 @@
defined(__NetBSD__)
# include <sys/param.h>
# include <sys/mount.h>
-#elif defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
+#elif defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
# include <sys/statvfs.h>
#else
# include <sys/statfs.h>
@@ -229,11 +233,7 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
-#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
- return utimensat(req->file, NULL, ts, 0);
-#else
return futimens(req->file, ts);
-#endif
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
@@ -320,13 +320,14 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
if (path_length < pattern_size ||
strcmp(path + path_length - pattern_size, pattern)) {
errno = EINVAL;
- return -1;
+ r = -1;
+ goto clobber;
}
uv_once(&once, uv__mkostemp_initonce);
#ifdef O_CLOEXEC
- if (no_cloexec_support == 0 && uv__mkostemp != NULL) {
+ if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
r = uv__mkostemp(path, O_CLOEXEC);
if (r >= 0)
@@ -335,11 +336,11 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
/* If mkostemp() returns EINVAL, it means the kernel doesn't
support O_CLOEXEC, so we just fallback to mkstemp() below. */
if (errno != EINVAL)
- return r;
+ goto clobber;
/* We set the static variable so that next calls don't even
try to use mkostemp. */
- no_cloexec_support = 1;
+ uv__store_relaxed(&no_cloexec_support, 1);
}
#endif /* O_CLOEXEC */
@@ -361,6 +362,9 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
if (req->cb != NULL)
uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+clobber:
+ if (r < 0)
+ path[0] = '\0';
return r;
}
@@ -470,7 +474,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
# if defined(__linux__)
- if (no_preadv) retry:
+ if (uv__load_relaxed(&no_preadv)) retry:
# endif
{
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
@@ -482,7 +486,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
req->nbufs,
req->off);
if (result == -1 && errno == ENOSYS) {
- no_preadv = 1;
+ uv__store_relaxed(&no_preadv, 1);
goto retry;
}
}
@@ -639,7 +643,11 @@ static int uv__fs_closedir(uv_fs_t* req) {
static int uv__fs_statfs(uv_fs_t* req) {
uv_statfs_t* stat_fs;
-#if defined(__sun) || defined(__MVS__) || defined(__NetBSD__) || defined(__HAIKU__)
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
struct statvfs buf;
if (0 != statvfs(req->path, &buf))
@@ -656,7 +664,12 @@ static int uv__fs_statfs(uv_fs_t* req) {
return -1;
}
-#if defined(__sun) || defined(__MVS__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__HAIKU__)
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
stat_fs->f_type = 0; /* f_type is not supported. */
#else
stat_fs->f_type = buf.f_type;
@@ -897,8 +910,27 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
ssize_t r;
off = req->off;
+
+#ifdef __linux__
+ {
+ static int copy_file_range_support = 1;
+
+ if (copy_file_range_support) {
+ r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
+
+ if (r == -1 && errno == ENOSYS) {
+ errno = 0;
+ copy_file_range_support = 0;
+ } else {
+ goto ok;
+ }
+ }
+ }
+#endif
+
r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
+ok:
/* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
* it still writes out data. Fortunately, we can detect it by checking if
* the offset has been updated.
@@ -1141,7 +1173,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
goto out;
}
- dst_flags = O_WRONLY | O_CREAT | O_TRUNC;
+ dst_flags = O_WRONLY | O_CREAT;
if (req->flags & UV_FS_COPYFILE_EXCL)
dst_flags |= O_EXCL;
@@ -1160,16 +1192,26 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
goto out;
}
- /* Get the destination file's mode. */
- if (fstat(dstfd, &dst_statsbuf)) {
- err = UV__ERR(errno);
- goto out;
- }
+ /* If the file is not being opened exclusively, verify that the source and
+ destination are not the same file. If they are the same, bail out early. */
+ if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
+ /* Get the destination file's mode. */
+ if (fstat(dstfd, &dst_statsbuf)) {
+ err = UV__ERR(errno);
+ goto out;
+ }
- /* Check if srcfd and dstfd refer to the same file */
- if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
- src_statsbuf.st_ino == dst_statsbuf.st_ino) {
- goto out;
+ /* Check if srcfd and dstfd refer to the same file */
+ if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
+ src_statsbuf.st_ino == dst_statsbuf.st_ino) {
+ goto out;
+ }
+
+ /* Truncate the file in case the destination already existed. */
+ if (ftruncate(dstfd, 0) != 0) {
+ err = UV__ERR(errno);
+ goto out;
+ }
}
if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
@@ -1365,7 +1407,7 @@ static int uv__fs_statx(int fd,
int mode;
int rc;
- if (no_statx)
+ if (uv__load_relaxed(&no_statx))
return UV_ENOSYS;
dirfd = AT_FDCWD;
@@ -1398,7 +1440,7 @@ static int uv__fs_statx(int fd,
* implemented, rc might return 1 with 0 set as the error code in which
* case we return ENOSYS.
*/
- no_statx = 1;
+ uv__store_relaxed(&no_statx, 1);
return UV_ENOSYS;
}
@@ -2041,7 +2083,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
/* Only necessary for asychronous requests, i.e., requests with a callback.
* Synchronous ones don't copy their arguments and have req->path and
- * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
+ * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
*/
if (req->path != NULL &&