|
|
@@ -82,29 +82,12 @@ static int read_times(FILE* statfile_fp,
|
|
|
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
|
|
|
static uint64_t read_cpufreq(unsigned int cpunum);
|
|
|
|
|
|
-
|
|
|
int uv__platform_loop_init(uv_loop_t* loop) {
|
|
|
- int fd;
|
|
|
- fd = epoll_create1(O_CLOEXEC);
|
|
|
-
|
|
|
- /* epoll_create1() can fail either because it's not implemented (old kernel)
|
|
|
- * or because it doesn't understand the O_CLOEXEC flag.
|
|
|
- */
|
|
|
- if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
|
|
|
- fd = epoll_create(256);
|
|
|
-
|
|
|
- if (fd != -1)
|
|
|
- uv__cloexec(fd, 1);
|
|
|
- }
|
|
|
-
|
|
|
- loop->backend_fd = fd;
|
|
|
+
|
|
|
loop->inotify_fd = -1;
|
|
|
loop->inotify_watchers = NULL;
|
|
|
|
|
|
- if (fd == -1)
|
|
|
- return UV__ERR(errno);
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return uv__epoll_init(loop);
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -134,380 +117,6 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
|
|
|
}
|
|
|
|
|
|
|
|
|
-void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
|
|
|
- struct epoll_event* events;
|
|
|
- struct epoll_event dummy;
|
|
|
- uintptr_t i;
|
|
|
- uintptr_t nfds;
|
|
|
-
|
|
|
- assert(loop->watchers != NULL);
|
|
|
- assert(fd >= 0);
|
|
|
-
|
|
|
- events = (struct epoll_event*) loop->watchers[loop->nwatchers];
|
|
|
- nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
|
|
|
- if (events != NULL)
|
|
|
- /* Invalidate events with same file descriptor */
|
|
|
- for (i = 0; i < nfds; i++)
|
|
|
- if (events[i].data.fd == fd)
|
|
|
- events[i].data.fd = -1;
|
|
|
-
|
|
|
- /* Remove the file descriptor from the epoll.
|
|
|
- * This avoids a problem where the same file description remains open
|
|
|
- * in another process, causing repeated junk epoll events.
|
|
|
- *
|
|
|
- * We pass in a dummy epoll_event, to work around a bug in old kernels.
|
|
|
- */
|
|
|
- if (loop->backend_fd >= 0) {
|
|
|
- /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
|
|
|
- * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
|
|
|
- */
|
|
|
- memset(&dummy, 0, sizeof(dummy));
|
|
|
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-int uv__io_check_fd(uv_loop_t* loop, int fd) {
|
|
|
- struct epoll_event e;
|
|
|
- int rc;
|
|
|
-
|
|
|
- memset(&e, 0, sizeof(e));
|
|
|
- e.events = POLLIN;
|
|
|
- e.data.fd = -1;
|
|
|
-
|
|
|
- rc = 0;
|
|
|
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
|
|
|
- if (errno != EEXIST)
|
|
|
- rc = UV__ERR(errno);
|
|
|
-
|
|
|
- if (rc == 0)
|
|
|
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
|
|
|
- abort();
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-void uv__io_poll(uv_loop_t* loop, int timeout) {
|
|
|
- /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
|
|
|
- * effectively infinite on 32 bits architectures. To avoid blocking
|
|
|
- * indefinitely, we cap the timeout and poll again if necessary.
|
|
|
- *
|
|
|
- * Note that "30 minutes" is a simplification because it depends on
|
|
|
- * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
|
|
|
- * that being the largest value I have seen in the wild (and only once.)
|
|
|
- */
|
|
|
- static const int max_safe_timeout = 1789569;
|
|
|
- static int no_epoll_pwait_cached;
|
|
|
- static int no_epoll_wait_cached;
|
|
|
- int no_epoll_pwait;
|
|
|
- int no_epoll_wait;
|
|
|
- struct epoll_event events[1024];
|
|
|
- struct epoll_event* pe;
|
|
|
- struct epoll_event e;
|
|
|
- int real_timeout;
|
|
|
- QUEUE* q;
|
|
|
- uv__io_t* w;
|
|
|
- sigset_t sigset;
|
|
|
- uint64_t sigmask;
|
|
|
- uint64_t base;
|
|
|
- int have_signals;
|
|
|
- int nevents;
|
|
|
- int count;
|
|
|
- int nfds;
|
|
|
- int fd;
|
|
|
- int op;
|
|
|
- int i;
|
|
|
- int user_timeout;
|
|
|
- int reset_timeout;
|
|
|
-
|
|
|
- if (loop->nfds == 0) {
|
|
|
- assert(QUEUE_EMPTY(&loop->watcher_queue));
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- memset(&e, 0, sizeof(e));
|
|
|
-
|
|
|
- while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
|
|
- q = QUEUE_HEAD(&loop->watcher_queue);
|
|
|
- QUEUE_REMOVE(q);
|
|
|
- QUEUE_INIT(q);
|
|
|
-
|
|
|
- w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
|
|
- assert(w->pevents != 0);
|
|
|
- assert(w->fd >= 0);
|
|
|
- assert(w->fd < (int) loop->nwatchers);
|
|
|
-
|
|
|
- e.events = w->pevents;
|
|
|
- e.data.fd = w->fd;
|
|
|
-
|
|
|
- if (w->events == 0)
|
|
|
- op = EPOLL_CTL_ADD;
|
|
|
- else
|
|
|
- op = EPOLL_CTL_MOD;
|
|
|
-
|
|
|
- /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
|
|
|
- * events, skip the syscall and squelch the events after epoll_wait().
|
|
|
- */
|
|
|
- if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
|
|
|
- if (errno != EEXIST)
|
|
|
- abort();
|
|
|
-
|
|
|
- assert(op == EPOLL_CTL_ADD);
|
|
|
-
|
|
|
- /* We've reactivated a file descriptor that's been watched before. */
|
|
|
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
|
|
|
- abort();
|
|
|
- }
|
|
|
-
|
|
|
- w->events = w->pevents;
|
|
|
- }
|
|
|
-
|
|
|
- sigmask = 0;
|
|
|
- if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
|
|
- sigemptyset(&sigset);
|
|
|
- sigaddset(&sigset, SIGPROF);
|
|
|
- sigmask |= 1 << (SIGPROF - 1);
|
|
|
- }
|
|
|
-
|
|
|
- assert(timeout >= -1);
|
|
|
- base = loop->time;
|
|
|
- count = 48; /* Benchmarks suggest this gives the best throughput. */
|
|
|
- real_timeout = timeout;
|
|
|
-
|
|
|
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
|
|
- reset_timeout = 1;
|
|
|
- user_timeout = timeout;
|
|
|
- timeout = 0;
|
|
|
- } else {
|
|
|
- reset_timeout = 0;
|
|
|
- user_timeout = 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* You could argue there is a dependency between these two but
|
|
|
- * ultimately we don't care about their ordering with respect
|
|
|
- * to one another. Worst case, we make a few system calls that
|
|
|
- * could have been avoided because another thread already knows
|
|
|
- * they fail with ENOSYS. Hardly the end of the world.
|
|
|
- */
|
|
|
- no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
|
|
|
- no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- /* Only need to set the provider_entry_time if timeout != 0. The function
|
|
|
- * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
|
|
- */
|
|
|
- if (timeout != 0)
|
|
|
- uv__metrics_set_provider_entry_time(loop);
|
|
|
-
|
|
|
- /* See the comment for max_safe_timeout for an explanation of why
|
|
|
- * this is necessary. Executive summary: kernel bug workaround.
|
|
|
- */
|
|
|
- if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
|
|
|
- timeout = max_safe_timeout;
|
|
|
-
|
|
|
- if (sigmask != 0 && no_epoll_pwait != 0)
|
|
|
- if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
|
|
|
- abort();
|
|
|
-
|
|
|
- if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
|
|
|
- nfds = epoll_pwait(loop->backend_fd,
|
|
|
- events,
|
|
|
- ARRAY_SIZE(events),
|
|
|
- timeout,
|
|
|
- &sigset);
|
|
|
- if (nfds == -1 && errno == ENOSYS) {
|
|
|
- uv__store_relaxed(&no_epoll_pwait_cached, 1);
|
|
|
- no_epoll_pwait = 1;
|
|
|
- }
|
|
|
- } else {
|
|
|
- nfds = epoll_wait(loop->backend_fd,
|
|
|
- events,
|
|
|
- ARRAY_SIZE(events),
|
|
|
- timeout);
|
|
|
- if (nfds == -1 && errno == ENOSYS) {
|
|
|
- uv__store_relaxed(&no_epoll_wait_cached, 1);
|
|
|
- no_epoll_wait = 1;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (sigmask != 0 && no_epoll_pwait != 0)
|
|
|
- if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
|
|
|
- abort();
|
|
|
-
|
|
|
- /* Update loop->time unconditionally. It's tempting to skip the update when
|
|
|
- * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
|
|
- * operating system didn't reschedule our process while in the syscall.
|
|
|
- */
|
|
|
- SAVE_ERRNO(uv__update_time(loop));
|
|
|
-
|
|
|
- if (nfds == 0) {
|
|
|
- assert(timeout != -1);
|
|
|
-
|
|
|
- if (reset_timeout != 0) {
|
|
|
- timeout = user_timeout;
|
|
|
- reset_timeout = 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (timeout == -1)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (timeout == 0)
|
|
|
- return;
|
|
|
-
|
|
|
- /* We may have been inside the system call for longer than |timeout|
|
|
|
- * milliseconds so we need to update the timestamp to avoid drift.
|
|
|
- */
|
|
|
- goto update_timeout;
|
|
|
- }
|
|
|
-
|
|
|
- if (nfds == -1) {
|
|
|
- if (errno == ENOSYS) {
|
|
|
- /* epoll_wait() or epoll_pwait() failed, try the other system call. */
|
|
|
- assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (errno != EINTR)
|
|
|
- abort();
|
|
|
-
|
|
|
- if (reset_timeout != 0) {
|
|
|
- timeout = user_timeout;
|
|
|
- reset_timeout = 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (timeout == -1)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (timeout == 0)
|
|
|
- return;
|
|
|
-
|
|
|
- /* Interrupted by a signal. Update timeout and poll again. */
|
|
|
- goto update_timeout;
|
|
|
- }
|
|
|
-
|
|
|
- have_signals = 0;
|
|
|
- nevents = 0;
|
|
|
-
|
|
|
- {
|
|
|
- /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
|
|
|
- union {
|
|
|
- struct epoll_event* events;
|
|
|
- uv__io_t* watchers;
|
|
|
- } x;
|
|
|
-
|
|
|
- x.events = events;
|
|
|
- assert(loop->watchers != NULL);
|
|
|
- loop->watchers[loop->nwatchers] = x.watchers;
|
|
|
- loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
|
|
|
- }
|
|
|
-
|
|
|
- for (i = 0; i < nfds; i++) {
|
|
|
- pe = events + i;
|
|
|
- fd = pe->data.fd;
|
|
|
-
|
|
|
- /* Skip invalidated events, see uv__platform_invalidate_fd */
|
|
|
- if (fd == -1)
|
|
|
- continue;
|
|
|
-
|
|
|
- assert(fd >= 0);
|
|
|
- assert((unsigned) fd < loop->nwatchers);
|
|
|
-
|
|
|
- w = loop->watchers[fd];
|
|
|
-
|
|
|
- if (w == NULL) {
|
|
|
- /* File descriptor that we've stopped watching, disarm it.
|
|
|
- *
|
|
|
- * Ignore all errors because we may be racing with another thread
|
|
|
- * when the file descriptor is closed.
|
|
|
- */
|
|
|
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- /* Give users only events they're interested in. Prevents spurious
|
|
|
- * callbacks when previous callback invocation in this loop has stopped
|
|
|
- * the current watcher. Also, filters out events that users has not
|
|
|
- * requested us to watch.
|
|
|
- */
|
|
|
- pe->events &= w->pevents | POLLERR | POLLHUP;
|
|
|
-
|
|
|
- /* Work around an epoll quirk where it sometimes reports just the
|
|
|
- * EPOLLERR or EPOLLHUP event. In order to force the event loop to
|
|
|
- * move forward, we merge in the read/write events that the watcher
|
|
|
- * is interested in; uv__read() and uv__write() will then deal with
|
|
|
- * the error or hangup in the usual fashion.
|
|
|
- *
|
|
|
- * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
|
|
|
- * reads the available data, calls uv_read_stop(), then sometime later
|
|
|
- * calls uv_read_start() again. By then, libuv has forgotten about the
|
|
|
- * hangup and the kernel won't report EPOLLIN again because there's
|
|
|
- * nothing left to read. If anything, libuv is to blame here. The
|
|
|
- * current hack is just a quick bandaid; to properly fix it, libuv
|
|
|
- * needs to remember the error/hangup event. We should get that for
|
|
|
- * free when we switch over to edge-triggered I/O.
|
|
|
- */
|
|
|
- if (pe->events == POLLERR || pe->events == POLLHUP)
|
|
|
- pe->events |=
|
|
|
- w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
|
|
|
-
|
|
|
- if (pe->events != 0) {
|
|
|
- /* Run signal watchers last. This also affects child process watchers
|
|
|
- * because those are implemented in terms of signal watchers.
|
|
|
- */
|
|
|
- if (w == &loop->signal_io_watcher) {
|
|
|
- have_signals = 1;
|
|
|
- } else {
|
|
|
- uv__metrics_update_idle_time(loop);
|
|
|
- w->cb(loop, w, pe->events);
|
|
|
- }
|
|
|
-
|
|
|
- nevents++;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (reset_timeout != 0) {
|
|
|
- timeout = user_timeout;
|
|
|
- reset_timeout = 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (have_signals != 0) {
|
|
|
- uv__metrics_update_idle_time(loop);
|
|
|
- loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
|
|
- }
|
|
|
-
|
|
|
- loop->watchers[loop->nwatchers] = NULL;
|
|
|
- loop->watchers[loop->nwatchers + 1] = NULL;
|
|
|
-
|
|
|
- if (have_signals != 0)
|
|
|
- return; /* Event loop should cycle now so don't poll again. */
|
|
|
-
|
|
|
- if (nevents != 0) {
|
|
|
- if (nfds == ARRAY_SIZE(events) && --count != 0) {
|
|
|
- /* Poll for more events but don't block this time. */
|
|
|
- timeout = 0;
|
|
|
- continue;
|
|
|
- }
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (timeout == 0)
|
|
|
- return;
|
|
|
-
|
|
|
- if (timeout == -1)
|
|
|
- continue;
|
|
|
-
|
|
|
-update_timeout:
|
|
|
- assert(timeout > 0);
|
|
|
-
|
|
|
- real_timeout -= (loop->time - base);
|
|
|
- if (real_timeout <= 0)
|
|
|
- return;
|
|
|
-
|
|
|
- timeout = real_timeout;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
|
|
|
uint64_t uv__hrtime(uv_clocktype_t type) {
|
|
|
static clock_t fast_clock_id = -1;
|
|
|
@@ -602,22 +211,53 @@ err:
|
|
|
return UV_EINVAL;
|
|
|
}
|
|
|
|
|
|
+static int uv__slurp(const char* filename, char* buf, size_t len) {
|
|
|
+ ssize_t n;
|
|
|
+ int fd;
|
|
|
+
|
|
|
+ assert(len > 0);
|
|
|
+
|
|
|
+ fd = uv__open_cloexec(filename, O_RDONLY);
|
|
|
+ if (fd < 0)
|
|
|
+ return fd;
|
|
|
+
|
|
|
+ do
|
|
|
+ n = read(fd, buf, len - 1);
|
|
|
+ while (n == -1 && errno == EINTR);
|
|
|
+
|
|
|
+ if (uv__close_nocheckstdio(fd))
|
|
|
+ abort();
|
|
|
+
|
|
|
+ if (n < 0)
|
|
|
+ return UV__ERR(errno);
|
|
|
+
|
|
|
+ buf[n] = '\0';
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
int uv_uptime(double* uptime) {
|
|
|
static volatile int no_clock_boottime;
|
|
|
+ char buf[128];
|
|
|
struct timespec now;
|
|
|
int r;
|
|
|
|
|
|
+ /* Try /proc/uptime first, then fallback to clock_gettime(). */
|
|
|
+
|
|
|
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
|
|
|
+ if (1 == sscanf(buf, "%lf", uptime))
|
|
|
+ return 0;
|
|
|
+
|
|
|
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
|
|
|
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
|
|
|
* is suspended.
|
|
|
*/
|
|
|
if (no_clock_boottime) {
|
|
|
- retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
|
|
|
+ retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
|
|
|
}
|
|
|
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
|
|
|
no_clock_boottime = 1;
|
|
|
- goto retry;
|
|
|
+ goto retry_clock_gettime;
|
|
|
}
|
|
|
|
|
|
if (r)
|
|
|
@@ -709,35 +349,47 @@ static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|
|
}
|
|
|
|
|
|
|
|
|
-/* Also reads the CPU frequency on x86. The other architectures only have
|
|
|
- * a BogoMIPS field, which may not be very accurate.
|
|
|
+/* Also reads the CPU frequency on ppc and x86. The other architectures only
|
|
|
+ * have a BogoMIPS field, which may not be very accurate.
|
|
|
*
|
|
|
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
|
|
|
*/
|
|
|
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|
|
+#if defined(__PPC__)
|
|
|
+ static const char model_marker[] = "cpu\t\t: ";
|
|
|
+ static const char speed_marker[] = "clock\t\t: ";
|
|
|
+#else
|
|
|
static const char model_marker[] = "model name\t: ";
|
|
|
static const char speed_marker[] = "cpu MHz\t\t: ";
|
|
|
+#endif
|
|
|
const char* inferred_model;
|
|
|
unsigned int model_idx;
|
|
|
unsigned int speed_idx;
|
|
|
+ unsigned int part_idx;
|
|
|
char buf[1024];
|
|
|
char* model;
|
|
|
FILE* fp;
|
|
|
+ int model_id;
|
|
|
|
|
|
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
|
|
|
(void) &model_marker;
|
|
|
(void) &speed_marker;
|
|
|
(void) &speed_idx;
|
|
|
+ (void) &part_idx;
|
|
|
(void) &model;
|
|
|
(void) &buf;
|
|
|
(void) &fp;
|
|
|
+ (void) &model_id;
|
|
|
|
|
|
model_idx = 0;
|
|
|
speed_idx = 0;
|
|
|
+ part_idx = 0;
|
|
|
|
|
|
#if defined(__arm__) || \
|
|
|
defined(__i386__) || \
|
|
|
defined(__mips__) || \
|
|
|
+ defined(__aarch64__) || \
|
|
|
+ defined(__PPC__) || \
|
|
|
defined(__x86_64__)
|
|
|
fp = uv__open_file("/proc/cpuinfo");
|
|
|
if (fp == NULL)
|
|
|
@@ -756,11 +408,96 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|
|
continue;
|
|
|
}
|
|
|
}
|
|
|
-#if defined(__arm__) || defined(__mips__)
|
|
|
+#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
|
|
|
if (model_idx < numcpus) {
|
|
|
#if defined(__arm__)
|
|
|
/* Fallback for pre-3.8 kernels. */
|
|
|
static const char model_marker[] = "Processor\t: ";
|
|
|
+#elif defined(__aarch64__)
|
|
|
+ static const char part_marker[] = "CPU part\t: ";
|
|
|
+
|
|
|
+ /* Adapted from: https://github.com/karelzak/util-linux */
|
|
|
+ struct vendor_part {
|
|
|
+ const int id;
|
|
|
+ const char* name;
|
|
|
+ };
|
|
|
+
|
|
|
+ static const struct vendor_part arm_chips[] = {
|
|
|
+ { 0x811, "ARM810" },
|
|
|
+ { 0x920, "ARM920" },
|
|
|
+ { 0x922, "ARM922" },
|
|
|
+ { 0x926, "ARM926" },
|
|
|
+ { 0x940, "ARM940" },
|
|
|
+ { 0x946, "ARM946" },
|
|
|
+ { 0x966, "ARM966" },
|
|
|
+ { 0xa20, "ARM1020" },
|
|
|
+ { 0xa22, "ARM1022" },
|
|
|
+ { 0xa26, "ARM1026" },
|
|
|
+ { 0xb02, "ARM11 MPCore" },
|
|
|
+ { 0xb36, "ARM1136" },
|
|
|
+ { 0xb56, "ARM1156" },
|
|
|
+ { 0xb76, "ARM1176" },
|
|
|
+ { 0xc05, "Cortex-A5" },
|
|
|
+ { 0xc07, "Cortex-A7" },
|
|
|
+ { 0xc08, "Cortex-A8" },
|
|
|
+ { 0xc09, "Cortex-A9" },
|
|
|
+ { 0xc0d, "Cortex-A17" }, /* Originally A12 */
|
|
|
+ { 0xc0f, "Cortex-A15" },
|
|
|
+ { 0xc0e, "Cortex-A17" },
|
|
|
+ { 0xc14, "Cortex-R4" },
|
|
|
+ { 0xc15, "Cortex-R5" },
|
|
|
+ { 0xc17, "Cortex-R7" },
|
|
|
+ { 0xc18, "Cortex-R8" },
|
|
|
+ { 0xc20, "Cortex-M0" },
|
|
|
+ { 0xc21, "Cortex-M1" },
|
|
|
+ { 0xc23, "Cortex-M3" },
|
|
|
+ { 0xc24, "Cortex-M4" },
|
|
|
+ { 0xc27, "Cortex-M7" },
|
|
|
+ { 0xc60, "Cortex-M0+" },
|
|
|
+ { 0xd01, "Cortex-A32" },
|
|
|
+ { 0xd03, "Cortex-A53" },
|
|
|
+ { 0xd04, "Cortex-A35" },
|
|
|
+ { 0xd05, "Cortex-A55" },
|
|
|
+ { 0xd06, "Cortex-A65" },
|
|
|
+ { 0xd07, "Cortex-A57" },
|
|
|
+ { 0xd08, "Cortex-A72" },
|
|
|
+ { 0xd09, "Cortex-A73" },
|
|
|
+ { 0xd0a, "Cortex-A75" },
|
|
|
+ { 0xd0b, "Cortex-A76" },
|
|
|
+ { 0xd0c, "Neoverse-N1" },
|
|
|
+ { 0xd0d, "Cortex-A77" },
|
|
|
+ { 0xd0e, "Cortex-A76AE" },
|
|
|
+ { 0xd13, "Cortex-R52" },
|
|
|
+ { 0xd20, "Cortex-M23" },
|
|
|
+ { 0xd21, "Cortex-M33" },
|
|
|
+ { 0xd41, "Cortex-A78" },
|
|
|
+ { 0xd42, "Cortex-A78AE" },
|
|
|
+ { 0xd4a, "Neoverse-E1" },
|
|
|
+ { 0xd4b, "Cortex-A78C" },
|
|
|
+ };
|
|
|
+
|
|
|
+ if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
|
|
|
+ model = buf + sizeof(part_marker) - 1;
|
|
|
+
|
|
|
+ errno = 0;
|
|
|
+ model_id = strtol(model, NULL, 16);
|
|
|
+ if ((errno != 0) || model_id < 0) {
|
|
|
+ fclose(fp);
|
|
|
+ return UV_EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
|
|
|
+ if (model_id == arm_chips[part_idx].id) {
|
|
|
+ model = uv__strdup(arm_chips[part_idx].name);
|
|
|
+ if (model == NULL) {
|
|
|
+ fclose(fp);
|
|
|
+ return UV_ENOMEM;
|
|
|
+ }
|
|
|
+ ci[model_idx++].model = model;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
#else /* defined(__mips__) */
|
|
|
static const char model_marker[] = "cpu model\t\t: ";
|
|
|
#endif
|
|
|
@@ -775,18 +512,18 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|
|
continue;
|
|
|
}
|
|
|
}
|
|
|
-#else /* !__arm__ && !__mips__ */
|
|
|
+#else /* !__arm__ && !__mips__ && !__aarch64__ */
|
|
|
if (speed_idx < numcpus) {
|
|
|
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
|
|
|
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
|
|
continue;
|
|
|
}
|
|
|
}
|
|
|
-#endif /* __arm__ || __mips__ */
|
|
|
+#endif /* __arm__ || __mips__ || __aarch64__ */
|
|
|
}
|
|
|
|
|
|
fclose(fp);
|
|
|
-#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
|
|
|
+#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
|
|
|
|
|
|
/* Now we want to make sure that all the models contain *something* because
|
|
|
* it's not safe to leave them as null. Copy the last entry unless there
|
|
|
@@ -824,9 +561,9 @@ static int read_times(FILE* statfile_fp,
|
|
|
char buf[1024];
|
|
|
|
|
|
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
|
|
|
- multiplier = ((uint64_t)1000L / ticks);
|
|
|
assert(ticks != (unsigned int) -1);
|
|
|
assert(ticks != 0);
|
|
|
+ multiplier = ((uint64_t)1000L / ticks);
|
|
|
|
|
|
rewind(statfile_fp);
|
|
|
|
|
|
@@ -1025,32 +762,6 @@ void uv__set_process_title(const char* title) {
|
|
|
}
|
|
|
|
|
|
|
|
|
-static int uv__slurp(const char* filename, char* buf, size_t len) {
|
|
|
- ssize_t n;
|
|
|
- int fd;
|
|
|
-
|
|
|
- assert(len > 0);
|
|
|
-
|
|
|
- fd = uv__open_cloexec(filename, O_RDONLY);
|
|
|
- if (fd < 0)
|
|
|
- return fd;
|
|
|
-
|
|
|
- do
|
|
|
- n = read(fd, buf, len - 1);
|
|
|
- while (n == -1 && errno == EINTR);
|
|
|
-
|
|
|
- if (uv__close_nocheckstdio(fd))
|
|
|
- abort();
|
|
|
-
|
|
|
- if (n < 0)
|
|
|
- return UV__ERR(errno);
|
|
|
-
|
|
|
- buf[n] = '\0';
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
static uint64_t uv__read_proc_meminfo(const char* what) {
|
|
|
uint64_t rc;
|
|
|
char* p;
|