HardenedBSD src tree https://hardenedbsd.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1562 lines
41 KiB

  1. /*
  2. * top - a top users display for Unix
  3. *
  4. * DESCRIPTION:
  5. * Originally written for BSD4.4 system by Christos Zoulas.
  6. * Ported to FreeBSD 2.x by Steven Wallace && Wolfram Schneider
  7. * Order support hacked in from top-3.5beta6/machine/m_aix41.c
  8. * by Monte Mitzelfelt (for latest top see http://www.groupsys.com/topinfo/)
  9. *
  10. * AUTHOR: Christos Zoulas <christos@ee.cornell.edu>
  11. * Steven Wallace <swallace@FreeBSD.org>
  12. * Wolfram Schneider <wosch@FreeBSD.org>
  13. * Thomas Moestl <tmoestl@gmx.net>
  14. * Eitan Adler <eadler@FreeBSD.org>
  15. *
  16. * $FreeBSD$
  17. */
  18. #include <sys/errno.h>
  19. #include <sys/fcntl.h>
  20. #include <sys/param.h>
  21. #include <sys/priority.h>
  22. #include <sys/proc.h>
  23. #include <sys/resource.h>
  24. #include <sys/sbuf.h>
  25. #include <sys/sysctl.h>
  26. #include <sys/time.h>
  27. #include <sys/user.h>
  28. #include <assert.h>
  29. #include <err.h>
  30. #include <libgen.h>
  31. #include <kvm.h>
  32. #include <math.h>
  33. #include <paths.h>
  34. #include <stdio.h>
  35. #include <stdbool.h>
  36. #include <stdint.h>
  37. #include <stdlib.h>
  38. #include <string.h>
  39. #include <time.h>
  40. #include <unistd.h>
  41. #include <vis.h>
  42. #include "top.h"
  43. #include "display.h"
  44. #include "machine.h"
  45. #include "loadavg.h"
  46. #include "screen.h"
  47. #include "utils.h"
  48. #include "layout.h"
  49. #define GETSYSCTL(name, var) getsysctl(name, &(var), sizeof(var))
  50. extern struct timeval timeout;
  51. static int smpmode;
  52. enum displaymodes displaymode;
  53. static const int namelength = 10;
  54. /* TOP_JID_LEN based on max of 999999 */
  55. #define TOP_JID_LEN 6
  56. #define TOP_SWAP_LEN 5
  57. /* get_process_info passes back a handle. This is what it looks like: */
  58. struct handle {
  59. struct kinfo_proc **next_proc; /* points to next valid proc pointer */
  60. int remaining; /* number of pointers remaining */
  61. };
  62. /* define what weighted cpu is. */
  63. #define weighted_cpu(pct, pp) ((pp)->ki_swtime == 0 ? 0.0 : \
  64. ((pct) / (1.0 - exp((pp)->ki_swtime * logcpu))))
  65. /* what we consider to be process size: */
  66. #define PROCSIZE(pp) ((pp)->ki_size / 1024)
  67. #define RU(pp) (&(pp)->ki_rusage)
  68. #define PCTCPU(pp) (pcpu[pp - pbase])
  69. /* process state names for the "STATE" column of the display */
  70. /* the extra nulls in the string "run" are for adding a slash and
  71. the processor number when needed */
  72. static const char *state_abbrev[] = {
  73. "", "START", "RUN\0\0\0", "SLEEP", "STOP", "ZOMB", "WAIT", "LOCK"
  74. };
  75. static kvm_t *kd;
  76. /* values that we stash away in _init and use in later routines */
  77. static double logcpu;
  78. /* these are retrieved from the kernel in _init */
  79. static load_avg ccpu;
  80. /* these are used in the get_ functions */
  81. static int lastpid;
  82. /* these are for calculating cpu state percentages */
  83. static long cp_time[CPUSTATES];
  84. static long cp_old[CPUSTATES];
  85. static long cp_diff[CPUSTATES];
  86. /* these are for detailing the process states */
  87. static const char *procstatenames[] = {
  88. "", " starting, ", " running, ", " sleeping, ", " stopped, ",
  89. " zombie, ", " waiting, ", " lock, ",
  90. NULL
  91. };
  92. static int process_states[nitems(procstatenames)];
  93. /* these are for detailing the cpu states */
  94. static int cpu_states[CPUSTATES];
  95. static const char *cpustatenames[] = {
  96. "user", "nice", "system", "interrupt", "idle", NULL
  97. };
  98. /* these are for detailing the memory statistics */
  99. static const char *memorynames[] = {
  100. "K Active, ", "K Inact, ", "K Laundry, ", "K Wired, ", "K Buf, ",
  101. "K Free", NULL
  102. };
  103. static int memory_stats[nitems(memorynames)];
  104. static const char *arcnames[] = {
  105. "K Total, ", "K MFU, ", "K MRU, ", "K Anon, ", "K Header, ", "K Other",
  106. NULL
  107. };
  108. static int arc_stats[nitems(arcnames)];
  109. static const char *carcnames[] = {
  110. "K Compressed, ", "K Uncompressed, ", ":1 Ratio, ",
  111. NULL
  112. };
  113. static int carc_stats[nitems(carcnames)];
  114. static const char *swapnames[] = {
  115. "K Total, ", "K Used, ", "K Free, ", "% Inuse, ", "K In, ", "K Out",
  116. NULL
  117. };
  118. static int swap_stats[nitems(swapnames)];
  119. static int has_swap;
  120. /* these are for keeping track of the proc array */
  121. static int nproc;
  122. static int onproc = -1;
  123. static int pref_len;
  124. static struct kinfo_proc *pbase;
  125. static struct kinfo_proc **pref;
  126. static struct kinfo_proc *previous_procs;
  127. static struct kinfo_proc **previous_pref;
  128. static int previous_proc_count = 0;
  129. static int previous_proc_count_max = 0;
  130. static int previous_thread;
  131. /* data used for recalculating pctcpu */
  132. static double *pcpu;
  133. static struct timespec proc_uptime;
  134. static struct timeval proc_wall_time;
  135. static struct timeval previous_wall_time;
  136. static uint64_t previous_interval = 0;
  137. /* total number of io operations */
  138. static long total_inblock;
  139. static long total_oublock;
  140. static long total_majflt;
  141. /* these are for getting the memory statistics */
  142. static int arc_enabled;
  143. static int carc_enabled;
  144. static int pageshift; /* log base 2 of the pagesize */
  145. /* define pagetok in terms of pageshift */
  146. #define pagetok(size) ((size) << pageshift)
  147. /* swap usage */
  148. #define ki_swap(kip) \
  149. ((kip)->ki_swrss > (kip)->ki_rssize ? (kip)->ki_swrss - (kip)->ki_rssize : 0)
  150. /*
  151. * Sorting orders. The first element is the default.
  152. */
  153. static const char *ordernames[] = {
  154. "cpu", "size", "res", "time", "pri", "threads",
  155. "total", "read", "write", "fault", "vcsw", "ivcsw",
  156. "jid", "swap", "pid", NULL
  157. };
  158. /* Per-cpu time states */
  159. static int maxcpu;
  160. static int maxid;
  161. static int ncpus;
  162. static unsigned long cpumask;
  163. static long *times;
  164. static long *pcpu_cp_time;
  165. static long *pcpu_cp_old;
  166. static long *pcpu_cp_diff;
  167. static int *pcpu_cpu_states;
  168. static int compare_swap(const void *a, const void *b);
  169. static int compare_jid(const void *a, const void *b);
  170. static int compare_pid(const void *a, const void *b);
  171. static int compare_tid(const void *a, const void *b);
  172. static const char *format_nice(const struct kinfo_proc *pp);
  173. static void getsysctl(const char *name, void *ptr, size_t len);
  174. static int swapmode(int *retavail, int *retfree);
  175. static void update_layout(void);
  176. static int find_uid(uid_t needle, int *haystack);
  177. static int
  178. find_uid(uid_t needle, int *haystack)
  179. {
  180. size_t i = 0;
  181. for (; i < TOP_MAX_UIDS; ++i)
  182. if ((uid_t)haystack[i] == needle)
  183. return 1;
  184. return (0);
  185. }
  186. void
  187. toggle_pcpustats(void)
  188. {
  189. if (ncpus == 1)
  190. return;
  191. update_layout();
  192. }
  193. /* Adjust display based on ncpus and the ARC state. */
  194. static void
  195. update_layout(void)
  196. {
  197. y_mem = 3;
  198. y_arc = 4;
  199. y_carc = 5;
  200. y_swap = 3 + arc_enabled + carc_enabled + has_swap;
  201. y_idlecursor = 4 + arc_enabled + carc_enabled + has_swap;
  202. y_message = 4 + arc_enabled + carc_enabled + has_swap;
  203. y_header = 5 + arc_enabled + carc_enabled + has_swap;
  204. y_procs = 6 + arc_enabled + carc_enabled + has_swap;
  205. Header_lines = 6 + arc_enabled + carc_enabled + has_swap;
  206. if (pcpu_stats) {
  207. y_mem += ncpus - 1;
  208. y_arc += ncpus - 1;
  209. y_carc += ncpus - 1;
  210. y_swap += ncpus - 1;
  211. y_idlecursor += ncpus - 1;
  212. y_message += ncpus - 1;
  213. y_header += ncpus - 1;
  214. y_procs += ncpus - 1;
  215. Header_lines += ncpus - 1;
  216. }
  217. }
  218. int
  219. machine_init(struct statics *statics)
  220. {
  221. int i, j, empty, pagesize;
  222. uint64_t arc_size;
  223. int carc_en, nswapdev;
  224. size_t size;
  225. size = sizeof(smpmode);
  226. if ((sysctlbyname("machdep.smp_active", &smpmode, &size,
  227. NULL, 0) != 0 &&
  228. sysctlbyname("kern.smp.active", &smpmode, &size,
  229. NULL, 0) != 0) ||
  230. size != sizeof(smpmode))
  231. smpmode = 0;
  232. size = sizeof(arc_size);
  233. if (sysctlbyname("kstat.zfs.misc.arcstats.size", &arc_size, &size,
  234. NULL, 0) == 0 && arc_size != 0)
  235. arc_enabled = 1;
  236. size = sizeof(carc_en);
  237. if (arc_enabled &&
  238. sysctlbyname("vfs.zfs.compressed_arc_enabled", &carc_en, &size,
  239. NULL, 0) == 0 && carc_en == 1)
  240. carc_enabled = 1;
  241. kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open");
  242. if (kd == NULL)
  243. return (-1);
  244. size = sizeof(nswapdev);
  245. if (sysctlbyname("vm.nswapdev", &nswapdev, &size, NULL,
  246. 0) == 0 && nswapdev != 0)
  247. has_swap = 1;
  248. GETSYSCTL("kern.ccpu", ccpu);
  249. /* this is used in calculating WCPU -- calculate it ahead of time */
  250. logcpu = log(loaddouble(ccpu));
  251. pbase = NULL;
  252. pref = NULL;
  253. pcpu = NULL;
  254. nproc = 0;
  255. onproc = -1;
  256. /* get the page size and calculate pageshift from it */
  257. pagesize = getpagesize();
  258. pageshift = 0;
  259. while (pagesize > 1) {
  260. pageshift++;
  261. pagesize >>= 1;
  262. }
  263. /* we only need the amount of log(2)1024 for our conversion */
  264. pageshift -= LOG1024;
  265. /* fill in the statics information */
  266. statics->procstate_names = procstatenames;
  267. statics->cpustate_names = cpustatenames;
  268. statics->memory_names = memorynames;
  269. if (arc_enabled)
  270. statics->arc_names = arcnames;
  271. else
  272. statics->arc_names = NULL;
  273. if (carc_enabled)
  274. statics->carc_names = carcnames;
  275. else
  276. statics->carc_names = NULL;
  277. if (has_swap)
  278. statics->swap_names = swapnames;
  279. else
  280. statics->swap_names = NULL;
  281. statics->order_names = ordernames;
  282. /* Allocate state for per-CPU stats. */
  283. cpumask = 0;
  284. ncpus = 0;
  285. GETSYSCTL("kern.smp.maxcpus", maxcpu);
  286. times = calloc(maxcpu * CPUSTATES, sizeof(long));
  287. if (times == NULL)
  288. err(1, "calloc for kern.smp.maxcpus");
  289. size = sizeof(long) * maxcpu * CPUSTATES;
  290. if (sysctlbyname("kern.cp_times", times, &size, NULL, 0) == -1)
  291. err(1, "sysctlbyname kern.cp_times");
  292. pcpu_cp_time = calloc(1, size);
  293. maxid = (size / CPUSTATES / sizeof(long)) - 1;
  294. for (i = 0; i <= maxid; i++) {
  295. empty = 1;
  296. for (j = 0; empty && j < CPUSTATES; j++) {
  297. if (times[i * CPUSTATES + j] != 0)
  298. empty = 0;
  299. }
  300. if (!empty) {
  301. cpumask |= (1ul << i);
  302. ncpus++;
  303. }
  304. }
  305. assert(ncpus > 0);
  306. pcpu_cp_old = calloc(ncpus * CPUSTATES, sizeof(long));
  307. pcpu_cp_diff = calloc(ncpus * CPUSTATES, sizeof(long));
  308. pcpu_cpu_states = calloc(ncpus * CPUSTATES, sizeof(int));
  309. statics->ncpus = ncpus;
  310. update_layout();
  311. /* all done! */
  312. return (0);
  313. }
  314. char *
  315. format_header(const char *uname_field)
  316. {
  317. static struct sbuf* header = NULL;
  318. /* clean up from last time. */
  319. if (header != NULL) {
  320. sbuf_clear(header);
  321. } else {
  322. header = sbuf_new_auto();
  323. }
  324. switch (displaymode) {
  325. case DISP_CPU: {
  326. sbuf_printf(header, " %s", ps.thread_id ? " THR" : "PID");
  327. sbuf_printf(header, "%*s", ps.jail ? TOP_JID_LEN : 0,
  328. ps.jail ? " JID" : "");
  329. sbuf_printf(header, " %-*.*s ", namelength, namelength, uname_field);
  330. if (!ps.thread) {
  331. sbuf_cat(header, "THR ");
  332. }
  333. sbuf_cat(header, "PRI NICE SIZE RES ");
  334. if (ps.swap) {
  335. sbuf_printf(header, "%*s ", TOP_SWAP_LEN - 1, "SWAP");
  336. }
  337. sbuf_cat(header, "STATE ");
  338. if (smpmode) {
  339. sbuf_cat(header, "C ");
  340. }
  341. sbuf_cat(header, "TIME ");
  342. sbuf_printf(header, " %6s ", ps.wcpu ? "WCPU" : "CPU");
  343. sbuf_cat(header, "COMMAND");
  344. sbuf_finish(header);
  345. break;
  346. }
  347. case DISP_IO: {
  348. sbuf_printf(header, " %s%*s %-*.*s",
  349. ps.thread_id ? " THR" : "PID",
  350. ps.jail ? TOP_JID_LEN : 0, ps.jail ? " JID" : "",
  351. namelength, namelength, uname_field);
  352. sbuf_cat(header, " VCSW IVCSW READ WRITE FAULT TOTAL PERCENT COMMAND");
  353. sbuf_finish(header);
  354. break;
  355. }
  356. case DISP_MAX:
  357. assert("displaymode must not be set to DISP_MAX");
  358. }
  359. return sbuf_data(header);
  360. }
  361. static int swappgsin = -1;
  362. static int swappgsout = -1;
  363. void
  364. get_system_info(struct system_info *si)
  365. {
  366. struct loadavg sysload;
  367. int mib[2];
  368. struct timeval boottime;
  369. uint64_t arc_stat, arc_stat2;
  370. int i, j;
  371. size_t size;
  372. /* get the CPU stats */
  373. size = (maxid + 1) * CPUSTATES * sizeof(long);
  374. if (sysctlbyname("kern.cp_times", pcpu_cp_time, &size, NULL, 0) == -1)
  375. err(1, "sysctlbyname kern.cp_times");
  376. GETSYSCTL("kern.cp_time", cp_time);
  377. GETSYSCTL("vm.loadavg", sysload);
  378. GETSYSCTL("kern.lastpid", lastpid);
  379. /* convert load averages to doubles */
  380. for (i = 0; i < 3; i++)
  381. si->load_avg[i] = (double)sysload.ldavg[i] / sysload.fscale;
  382. /* convert cp_time counts to percentages */
  383. for (i = j = 0; i <= maxid; i++) {
  384. if ((cpumask & (1ul << i)) == 0)
  385. continue;
  386. percentages(CPUSTATES, &pcpu_cpu_states[j * CPUSTATES],
  387. &pcpu_cp_time[j * CPUSTATES],
  388. &pcpu_cp_old[j * CPUSTATES],
  389. &pcpu_cp_diff[j * CPUSTATES]);
  390. j++;
  391. }
  392. percentages(CPUSTATES, cpu_states, cp_time, cp_old, cp_diff);
  393. /* sum memory & swap statistics */
  394. {
  395. static unsigned int swap_delay = 0;
  396. static int swapavail = 0;
  397. static int swapfree = 0;
  398. static long bufspace = 0;
  399. static uint64_t nspgsin, nspgsout;
  400. GETSYSCTL("vfs.bufspace", bufspace);
  401. GETSYSCTL("vm.stats.vm.v_active_count", memory_stats[0]);
  402. GETSYSCTL("vm.stats.vm.v_inactive_count", memory_stats[1]);
  403. GETSYSCTL("vm.stats.vm.v_laundry_count", memory_stats[2]);
  404. GETSYSCTL("vm.stats.vm.v_wire_count", memory_stats[3]);
  405. GETSYSCTL("vm.stats.vm.v_free_count", memory_stats[5]);
  406. GETSYSCTL("vm.stats.vm.v_swappgsin", nspgsin);
  407. GETSYSCTL("vm.stats.vm.v_swappgsout", nspgsout);
  408. /* convert memory stats to Kbytes */
  409. memory_stats[0] = pagetok(memory_stats[0]);
  410. memory_stats[1] = pagetok(memory_stats[1]);
  411. memory_stats[2] = pagetok(memory_stats[2]);
  412. memory_stats[3] = pagetok(memory_stats[3]);
  413. memory_stats[4] = bufspace / 1024;
  414. memory_stats[5] = pagetok(memory_stats[5]);
  415. memory_stats[6] = -1;
  416. /* first interval */
  417. if (swappgsin < 0) {
  418. swap_stats[4] = 0;
  419. swap_stats[5] = 0;
  420. }
  421. /* compute differences between old and new swap statistic */
  422. else {
  423. swap_stats[4] = pagetok(((nspgsin - swappgsin)));
  424. swap_stats[5] = pagetok(((nspgsout - swappgsout)));
  425. }
  426. swappgsin = nspgsin;
  427. swappgsout = nspgsout;
  428. /* call CPU heavy swapmode() only for changes */
  429. if (swap_stats[4] > 0 || swap_stats[5] > 0 || swap_delay == 0) {
  430. swap_stats[3] = swapmode(&swapavail, &swapfree);
  431. swap_stats[0] = swapavail;
  432. swap_stats[1] = swapavail - swapfree;
  433. swap_stats[2] = swapfree;
  434. }
  435. swap_delay = 1;
  436. swap_stats[6] = -1;
  437. }
  438. if (arc_enabled) {
  439. GETSYSCTL("kstat.zfs.misc.arcstats.size", arc_stat);
  440. arc_stats[0] = arc_stat >> 10;
  441. GETSYSCTL("vfs.zfs.mfu_size", arc_stat);
  442. arc_stats[1] = arc_stat >> 10;
  443. GETSYSCTL("vfs.zfs.mru_size", arc_stat);
  444. arc_stats[2] = arc_stat >> 10;
  445. GETSYSCTL("vfs.zfs.anon_size", arc_stat);
  446. arc_stats[3] = arc_stat >> 10;
  447. GETSYSCTL("kstat.zfs.misc.arcstats.hdr_size", arc_stat);
  448. GETSYSCTL("kstat.zfs.misc.arcstats.l2_hdr_size", arc_stat2);
  449. arc_stats[4] = (arc_stat + arc_stat2) >> 10;
  450. GETSYSCTL("kstat.zfs.misc.arcstats.bonus_size", arc_stat);
  451. arc_stats[5] = arc_stat >> 10;
  452. GETSYSCTL("kstat.zfs.misc.arcstats.dnode_size", arc_stat);
  453. arc_stats[5] += arc_stat >> 10;
  454. GETSYSCTL("kstat.zfs.misc.arcstats.dbuf_size", arc_stat);
  455. arc_stats[5] += arc_stat >> 10;
  456. si->arc = arc_stats;
  457. }
  458. if (carc_enabled) {
  459. GETSYSCTL("kstat.zfs.misc.arcstats.compressed_size", arc_stat);
  460. carc_stats[0] = arc_stat >> 10;
  461. carc_stats[2] = arc_stat >> 10; /* For ratio */
  462. GETSYSCTL("kstat.zfs.misc.arcstats.uncompressed_size", arc_stat);
  463. carc_stats[1] = arc_stat >> 10;
  464. si->carc = carc_stats;
  465. }
  466. /* set arrays and strings */
  467. if (pcpu_stats) {
  468. si->cpustates = pcpu_cpu_states;
  469. si->ncpus = ncpus;
  470. } else {
  471. si->cpustates = cpu_states;
  472. si->ncpus = 1;
  473. }
  474. si->memory = memory_stats;
  475. si->swap = swap_stats;
  476. if (lastpid > 0) {
  477. si->last_pid = lastpid;
  478. } else {
  479. si->last_pid = -1;
  480. }
  481. /*
  482. * Print how long system has been up.
  483. * (Found by looking getting "boottime" from the kernel)
  484. */
  485. mib[0] = CTL_KERN;
  486. mib[1] = KERN_BOOTTIME;
  487. size = sizeof(boottime);
  488. if (sysctl(mib, nitems(mib), &boottime, &size, NULL, 0) != -1 &&
  489. boottime.tv_sec != 0) {
  490. si->boottime = boottime;
  491. } else {
  492. si->boottime.tv_sec = -1;
  493. }
  494. }
  495. #define NOPROC ((void *)-1)
  496. /*
  497. * We need to compare data from the old process entry with the new
  498. * process entry.
  499. * To facilitate doing this quickly we stash a pointer in the kinfo_proc
  500. * structure to cache the mapping. We also use a negative cache pointer
  501. * of NOPROC to avoid duplicate lookups.
  502. * XXX: this could be done when the actual processes are fetched, we do
  503. * it here out of laziness.
  504. */
  505. static const struct kinfo_proc *
  506. get_old_proc(struct kinfo_proc *pp)
  507. {
  508. const struct kinfo_proc * const *oldpp, *oldp;
  509. /*
  510. * If this is the first fetch of the kinfo_procs then we don't have
  511. * any previous entries.
  512. */
  513. if (previous_proc_count == 0)
  514. return (NULL);
  515. /* negative cache? */
  516. if (pp->ki_udata == NOPROC)
  517. return (NULL);
  518. /* cached? */
  519. if (pp->ki_udata != NULL)
  520. return (pp->ki_udata);
  521. /*
  522. * Not cached,
  523. * 1) look up based on pid.
  524. * 2) compare process start.
  525. * If we fail here, then setup a negative cache entry, otherwise
  526. * cache it.
  527. */
  528. oldpp = bsearch(&pp, previous_pref, previous_proc_count,
  529. sizeof(*previous_pref), ps.thread ? compare_tid : compare_pid);
  530. if (oldpp == NULL) {
  531. pp->ki_udata = NOPROC;
  532. return (NULL);
  533. }
  534. oldp = *oldpp;
  535. if (memcmp(&oldp->ki_start, &pp->ki_start, sizeof(pp->ki_start)) != 0) {
  536. pp->ki_udata = NOPROC;
  537. return (NULL);
  538. }
  539. pp->ki_udata = __DECONST(void *, oldp);
  540. return (oldp);
  541. }
  542. /*
  543. * Return the total amount of IO done in blocks in/out and faults.
  544. * store the values individually in the pointers passed in.
  545. */
  546. static long
  547. get_io_stats(const struct kinfo_proc *pp, long *inp, long *oup, long *flp,
  548. long *vcsw, long *ivcsw)
  549. {
  550. const struct kinfo_proc *oldp;
  551. static struct kinfo_proc dummy;
  552. long ret;
  553. oldp = get_old_proc(__DECONST(struct kinfo_proc *, pp));
  554. if (oldp == NULL) {
  555. memset(&dummy, 0, sizeof(dummy));
  556. oldp = &dummy;
  557. }
  558. *inp = RU(pp)->ru_inblock - RU(oldp)->ru_inblock;
  559. *oup = RU(pp)->ru_oublock - RU(oldp)->ru_oublock;
  560. *flp = RU(pp)->ru_majflt - RU(oldp)->ru_majflt;
  561. *vcsw = RU(pp)->ru_nvcsw - RU(oldp)->ru_nvcsw;
  562. *ivcsw = RU(pp)->ru_nivcsw - RU(oldp)->ru_nivcsw;
  563. ret =
  564. (RU(pp)->ru_inblock - RU(oldp)->ru_inblock) +
  565. (RU(pp)->ru_oublock - RU(oldp)->ru_oublock) +
  566. (RU(pp)->ru_majflt - RU(oldp)->ru_majflt);
  567. return (ret);
  568. }
  569. /*
  570. * If there was a previous update, use the delta in ki_runtime over
  571. * the previous interval to calculate pctcpu. Otherwise, fall back
  572. * to using the kernel's ki_pctcpu.
  573. */
  574. static double
  575. proc_calc_pctcpu(struct kinfo_proc *pp)
  576. {
  577. const struct kinfo_proc *oldp;
  578. if (previous_interval != 0) {
  579. oldp = get_old_proc(pp);
  580. if (oldp != NULL)
  581. return ((double)(pp->ki_runtime - oldp->ki_runtime)
  582. / previous_interval);
  583. /*
  584. * If this process/thread was created during the previous
  585. * interval, charge it's total runtime to the previous
  586. * interval.
  587. */
  588. else if (pp->ki_start.tv_sec > previous_wall_time.tv_sec ||
  589. (pp->ki_start.tv_sec == previous_wall_time.tv_sec &&
  590. pp->ki_start.tv_usec >= previous_wall_time.tv_usec))
  591. return ((double)pp->ki_runtime / previous_interval);
  592. }
  593. return (pctdouble(pp->ki_pctcpu));
  594. }
  595. /*
  596. * Return true if this process has used any CPU time since the
  597. * previous update.
  598. */
  599. static int
  600. proc_used_cpu(struct kinfo_proc *pp)
  601. {
  602. const struct kinfo_proc *oldp;
  603. oldp = get_old_proc(pp);
  604. if (oldp == NULL)
  605. return (PCTCPU(pp) != 0);
  606. return (pp->ki_runtime != oldp->ki_runtime ||
  607. RU(pp)->ru_nvcsw != RU(oldp)->ru_nvcsw ||
  608. RU(pp)->ru_nivcsw != RU(oldp)->ru_nivcsw);
  609. }
  610. /*
  611. * Return the total number of block in/out and faults by a process.
  612. */
  613. static long
  614. get_io_total(const struct kinfo_proc *pp)
  615. {
  616. long dummy;
  617. return (get_io_stats(pp, &dummy, &dummy, &dummy, &dummy, &dummy));
  618. }
  619. static struct handle handle;
  620. void *
  621. get_process_info(struct system_info *si, struct process_select *sel,
  622. int (*compare)(const void *, const void *))
  623. {
  624. int i;
  625. int total_procs;
  626. long p_io;
  627. long p_inblock, p_oublock, p_majflt, p_vcsw, p_ivcsw;
  628. long nsec;
  629. int active_procs;
  630. struct kinfo_proc **prefp;
  631. struct kinfo_proc *pp;
  632. struct timespec previous_proc_uptime;
  633. /*
  634. * If thread state was toggled, don't cache the previous processes.
  635. */
  636. if (previous_thread != sel->thread)
  637. nproc = 0;
  638. previous_thread = sel->thread;
  639. /*
  640. * Save the previous process info.
  641. */
  642. if (previous_proc_count_max < nproc) {
  643. free(previous_procs);
  644. previous_procs = calloc(nproc, sizeof(*previous_procs));
  645. free(previous_pref);
  646. previous_pref = calloc(nproc, sizeof(*previous_pref));
  647. if (previous_procs == NULL || previous_pref == NULL) {
  648. fprintf(stderr, "top: Out of memory.\n");
  649. quit(TOP_EX_SYS_ERROR);
  650. }
  651. previous_proc_count_max = nproc;
  652. }
  653. if (nproc) {
  654. for (i = 0; i < nproc; i++)
  655. previous_pref[i] = &previous_procs[i];
  656. memcpy(previous_procs, pbase, nproc * sizeof(*previous_procs));
  657. qsort(previous_pref, nproc, sizeof(*previous_pref),
  658. ps.thread ? compare_tid : compare_pid);
  659. }
  660. previous_proc_count = nproc;
  661. previous_proc_uptime = proc_uptime;
  662. previous_wall_time = proc_wall_time;
  663. previous_interval = 0;
  664. pbase = kvm_getprocs(kd, sel->thread ? KERN_PROC_ALL : KERN_PROC_PROC,
  665. 0, &nproc);
  666. gettimeofday(&proc_wall_time, NULL);
  667. if (clock_gettime(CLOCK_UPTIME, &proc_uptime) != 0)
  668. memset(&proc_uptime, 0, sizeof(proc_uptime));
  669. else if (previous_proc_uptime.tv_sec != 0 &&
  670. previous_proc_uptime.tv_nsec != 0) {
  671. previous_interval = (proc_uptime.tv_sec -
  672. previous_proc_uptime.tv_sec) * 1000000;
  673. nsec = proc_uptime.tv_nsec - previous_proc_uptime.tv_nsec;
  674. if (nsec < 0) {
  675. previous_interval -= 1000000;
  676. nsec += 1000000000;
  677. }
  678. previous_interval += nsec / 1000;
  679. }
  680. if (nproc > onproc) {
  681. pref = realloc(pref, sizeof(*pref) * nproc);
  682. pcpu = realloc(pcpu, sizeof(*pcpu) * nproc);
  683. onproc = nproc;
  684. }
  685. if (pref == NULL || pbase == NULL || pcpu == NULL) {
  686. fprintf(stderr, "top: Out of memory.\n");
  687. quit(TOP_EX_SYS_ERROR);
  688. }
  689. /* get a pointer to the states summary array */
  690. si->procstates = process_states;
  691. /* count up process states and get pointers to interesting procs */
  692. total_procs = 0;
  693. active_procs = 0;
  694. total_inblock = 0;
  695. total_oublock = 0;
  696. total_majflt = 0;
  697. memset(process_states, 0, sizeof(process_states));
  698. prefp = pref;
  699. for (pp = pbase, i = 0; i < nproc; pp++, i++) {
  700. if (pp->ki_stat == 0)
  701. /* not in use */
  702. continue;
  703. if (!sel->self && pp->ki_pid == mypid && sel->pid == -1)
  704. /* skip self */
  705. continue;
  706. if (!sel->system && (pp->ki_flag & P_SYSTEM) && sel->pid == -1)
  707. /* skip system process */
  708. continue;
  709. p_io = get_io_stats(pp, &p_inblock, &p_oublock, &p_majflt,
  710. &p_vcsw, &p_ivcsw);
  711. total_inblock += p_inblock;
  712. total_oublock += p_oublock;
  713. total_majflt += p_majflt;
  714. total_procs++;
  715. process_states[(unsigned char)pp->ki_stat]++;
  716. if (pp->ki_stat == SZOMB)
  717. /* skip zombies */
  718. continue;
  719. if (!sel->kidle && pp->ki_tdflags & TDF_IDLETD && sel->pid == -1)
  720. /* skip kernel idle process */
  721. continue;
  722. PCTCPU(pp) = proc_calc_pctcpu(pp);
  723. if (sel->thread && PCTCPU(pp) > 1.0)
  724. PCTCPU(pp) = 1.0;
  725. if (displaymode == DISP_CPU && !sel->idle &&
  726. (!proc_used_cpu(pp) ||
  727. pp->ki_stat == SSTOP || pp->ki_stat == SIDL))
  728. /* skip idle or non-running processes */
  729. continue;
  730. if (displaymode == DISP_IO && !sel->idle && p_io == 0)
  731. /* skip processes that aren't doing I/O */
  732. continue;
  733. if (sel->jid != -1 && pp->ki_jid != sel->jid)
  734. /* skip proc. that don't belong to the selected JID */
  735. continue;
  736. if (sel->uid[0] != -1 && !find_uid(pp->ki_ruid, sel->uid))
  737. /* skip proc. that don't belong to the selected UID */
  738. continue;
  739. if (sel->pid != -1 && pp->ki_pid != sel->pid)
  740. continue;
  741. *prefp++ = pp;
  742. active_procs++;
  743. }
  744. /* if requested, sort the "interesting" processes */
  745. if (compare != NULL)
  746. qsort(pref, active_procs, sizeof(*pref), compare);
  747. /* remember active and total counts */
  748. si->p_total = total_procs;
  749. si->p_pactive = pref_len = active_procs;
  750. /* pass back a handle */
  751. handle.next_proc = pref;
  752. handle.remaining = active_procs;
  753. return (&handle);
  754. }
  755. char *
  756. format_next_process(struct handle * xhandle, char *(*get_userid)(int), int flags)
  757. {
  758. struct kinfo_proc *pp;
  759. const struct kinfo_proc *oldp;
  760. long cputime;
  761. char status[22];
  762. size_t state;
  763. struct rusage ru, *rup;
  764. long p_tot, s_tot;
  765. char *cmdbuf = NULL;
  766. char **args;
  767. static struct sbuf* procbuf = NULL;
  768. /* clean up from last time. */
  769. if (procbuf != NULL) {
  770. sbuf_clear(procbuf);
  771. } else {
  772. procbuf = sbuf_new_auto();
  773. }
  774. /* find and remember the next proc structure */
  775. pp = *(xhandle->next_proc++);
  776. xhandle->remaining--;
  777. /* get the process's command name */
  778. if ((pp->ki_flag & P_INMEM) == 0) {
  779. /*
  780. * Print swapped processes as <pname>
  781. */
  782. size_t len;
  783. len = strlen(pp->ki_comm);
  784. if (len > sizeof(pp->ki_comm) - 3)
  785. len = sizeof(pp->ki_comm) - 3;
  786. memmove(pp->ki_comm + 1, pp->ki_comm, len);
  787. pp->ki_comm[0] = '<';
  788. pp->ki_comm[len + 1] = '>';
  789. pp->ki_comm[len + 2] = '\0';
  790. }
  791. /*
  792. * Convert the process's runtime from microseconds to seconds. This
  793. * time includes the interrupt time although that is not wanted here.
  794. * ps(1) is similarly sloppy.
  795. */
  796. cputime = (pp->ki_runtime + 500000) / 1000000;
  797. /* generate "STATE" field */
  798. switch (state = pp->ki_stat) {
  799. case SRUN:
  800. if (smpmode && pp->ki_oncpu != NOCPU)
  801. sprintf(status, "CPU%d", pp->ki_oncpu);
  802. else
  803. strcpy(status, "RUN");
  804. break;
  805. case SLOCK:
  806. if (pp->ki_kiflag & KI_LOCKBLOCK) {
  807. sprintf(status, "*%.6s", pp->ki_lockname);
  808. break;
  809. }
  810. /* fall through */
  811. case SSLEEP:
  812. sprintf(status, "%.6s", pp->ki_wmesg);
  813. break;
  814. default:
  815. if (state < nitems(state_abbrev)) {
  816. sprintf(status, "%.6s", state_abbrev[state]);
  817. } else {
  818. sprintf(status, "?%5zu", state);
  819. }
  820. break;
  821. }
  822. cmdbuf = calloc(screen_width + 1, 1);
  823. if (cmdbuf == NULL) {
  824. warn("calloc(%d)", screen_width + 1);
  825. return NULL;
  826. }
  827. if (!(flags & FMT_SHOWARGS)) {
  828. if (ps.thread && pp->ki_flag & P_HADTHREADS &&
  829. pp->ki_tdname[0]) {
  830. snprintf(cmdbuf, screen_width, "%s{%s%s}", pp->ki_comm,
  831. pp->ki_tdname, pp->ki_moretdname);
  832. } else {
  833. snprintf(cmdbuf, screen_width, "%s", pp->ki_comm);
  834. }
  835. } else {
  836. if (pp->ki_flag & P_SYSTEM ||
  837. (args = kvm_getargv(kd, pp, screen_width)) == NULL ||
  838. !(*args)) {
  839. if (ps.thread && pp->ki_flag & P_HADTHREADS &&
  840. pp->ki_tdname[0]) {
  841. snprintf(cmdbuf, screen_width,
  842. "[%s{%s%s}]", pp->ki_comm, pp->ki_tdname,
  843. pp->ki_moretdname);
  844. } else {
  845. snprintf(cmdbuf, screen_width,
  846. "[%s]", pp->ki_comm);
  847. }
  848. } else {
  849. const char *src;
  850. char *dst, *argbuf;
  851. const char *cmd;
  852. size_t argbuflen;
  853. size_t len;
  854. argbuflen = screen_width * 4;
  855. argbuf = calloc(argbuflen + 1, 1);
  856. if (argbuf == NULL) {
  857. warn("calloc(%zu)", argbuflen + 1);
  858. free(cmdbuf);
  859. return NULL;
  860. }
  861. dst = argbuf;
  862. /* Extract cmd name from argv */
  863. cmd = basename(*args);
  864. for (; (src = *args++) != NULL; ) {
  865. if (*src == '\0')
  866. continue;
  867. len = (argbuflen - (dst - argbuf) - 1) / 4;
  868. strvisx(dst, src,
  869. MIN(strlen(src), len),
  870. VIS_NL | VIS_CSTYLE);
  871. while (*dst != '\0')
  872. dst++;
  873. if ((argbuflen - (dst - argbuf) - 1) / 4 > 0)
  874. *dst++ = ' '; /* add delimiting space */
  875. }
  876. if (dst != argbuf && dst[-1] == ' ')
  877. dst--;
  878. *dst = '\0';
  879. if (strcmp(cmd, pp->ki_comm) != 0) {
  880. if (ps.thread && pp->ki_flag & P_HADTHREADS &&
  881. pp->ki_tdname[0])
  882. snprintf(cmdbuf, screen_width,
  883. "%s (%s){%s%s}", argbuf,
  884. pp->ki_comm, pp->ki_tdname,
  885. pp->ki_moretdname);
  886. else
  887. snprintf(cmdbuf, screen_width,
  888. "%s (%s)", argbuf, pp->ki_comm);
  889. } else {
  890. if (ps.thread && pp->ki_flag & P_HADTHREADS &&
  891. pp->ki_tdname[0])
  892. snprintf(cmdbuf, screen_width,
  893. "%s{%s%s}", argbuf, pp->ki_tdname,
  894. pp->ki_moretdname);
  895. else
  896. strlcpy(cmdbuf, argbuf, screen_width);
  897. }
  898. free(argbuf);
  899. }
  900. }
  901. if (displaymode == DISP_IO) {
  902. oldp = get_old_proc(pp);
  903. if (oldp != NULL) {
  904. ru.ru_inblock = RU(pp)->ru_inblock -
  905. RU(oldp)->ru_inblock;
  906. ru.ru_oublock = RU(pp)->ru_oublock -
  907. RU(oldp)->ru_oublock;
  908. ru.ru_majflt = RU(pp)->ru_majflt - RU(oldp)->ru_majflt;
  909. ru.ru_nvcsw = RU(pp)->ru_nvcsw - RU(oldp)->ru_nvcsw;
  910. ru.ru_nivcsw = RU(pp)->ru_nivcsw - RU(oldp)->ru_nivcsw;
  911. rup = &ru;
  912. } else {
  913. rup = RU(pp);
  914. }
  915. p_tot = rup->ru_inblock + rup->ru_oublock + rup->ru_majflt;
  916. s_tot = total_inblock + total_oublock + total_majflt;
  917. sbuf_printf(procbuf, "%5d ", (ps.thread_id) ? pp->ki_tid : pp->ki_pid);
  918. if (ps.jail) {
  919. sbuf_printf(procbuf, "%*d ", TOP_JID_LEN - 1, pp->ki_jid);
  920. }
  921. sbuf_printf(procbuf, "%-*.*s", namelength, namelength, (*get_userid)(pp->ki_ruid));
  922. sbuf_printf(procbuf, "%6ld ", rup->ru_nvcsw);
  923. sbuf_printf(procbuf, "%6ld ", rup->ru_nivcsw);
  924. sbuf_printf(procbuf, "%6ld ", rup->ru_inblock);
  925. sbuf_printf(procbuf, "%6ld ", rup->ru_oublock);
  926. sbuf_printf(procbuf, "%6ld ", rup->ru_majflt);
  927. sbuf_printf(procbuf, "%6ld ", p_tot);
  928. sbuf_printf(procbuf, "%6.2f%% ", s_tot == 0 ? 0.0 : (p_tot * 100.0 / s_tot));
  929. } else {
  930. sbuf_printf(procbuf, "%5d ", (ps.thread_id) ? pp->ki_tid : pp->ki_pid);
  931. if (ps.jail) {
  932. sbuf_printf(procbuf, "%*d ", TOP_JID_LEN - 1, pp->ki_jid);
  933. }
  934. sbuf_printf(procbuf, "%-*.*s ", namelength, namelength, (*get_userid)(pp->ki_ruid));
  935. if (!ps.thread) {
  936. sbuf_printf(procbuf, "%4d ", pp->ki_numthreads);
  937. } else {
  938. sbuf_printf(procbuf, " ");
  939. }
  940. sbuf_printf(procbuf, "%3d ", pp->ki_pri.pri_level - PZERO);
  941. sbuf_printf(procbuf, "%4s", format_nice(pp));
  942. sbuf_printf(procbuf, "%7s ", format_k(PROCSIZE(pp)));
  943. sbuf_printf(procbuf, "%6s ", format_k(pagetok(pp->ki_rssize)));
  944. if (ps.swap) {
  945. sbuf_printf(procbuf, "%*s ",
  946. TOP_SWAP_LEN - 1,
  947. format_k(pagetok(ki_swap(pp))));
  948. }
  949. sbuf_printf(procbuf, "%-6.6s ", status);
  950. if (smpmode) {
  951. int cpu;
  952. if (state == SRUN && pp->ki_oncpu != NOCPU) {
  953. cpu = pp->ki_oncpu;
  954. } else {
  955. cpu = pp->ki_lastcpu;
  956. }
  957. sbuf_printf(procbuf, "%3d ", cpu);
  958. }
  959. sbuf_printf(procbuf, "%6s ", format_time(cputime));
  960. sbuf_printf(procbuf, "%6.2f%% ", ps.wcpu ? 100.0 * weighted_cpu(PCTCPU(pp), pp) : 100.0 * PCTCPU(pp));
  961. }
  962. sbuf_printf(procbuf, "%s", printable(cmdbuf));
  963. free(cmdbuf);
  964. return (sbuf_data(procbuf));
  965. }
  966. static void
  967. getsysctl(const char *name, void *ptr, size_t len)
  968. {
  969. size_t nlen = len;
  970. if (sysctlbyname(name, ptr, &nlen, NULL, 0) == -1) {
  971. fprintf(stderr, "top: sysctl(%s...) failed: %s\n", name,
  972. strerror(errno));
  973. quit(TOP_EX_SYS_ERROR);
  974. }
  975. if (nlen != len) {
  976. fprintf(stderr, "top: sysctl(%s...) expected %lu, got %lu\n",
  977. name, (unsigned long)len, (unsigned long)nlen);
  978. quit(TOP_EX_SYS_ERROR);
  979. }
  980. }
  981. static const char *
  982. format_nice(const struct kinfo_proc *pp)
  983. {
  984. const char *fifo, *kproc;
  985. int rtpri;
  986. static char nicebuf[4 + 1];
  987. fifo = PRI_NEED_RR(pp->ki_pri.pri_class) ? "" : "F";
  988. kproc = (pp->ki_flag & P_KPROC) ? "k" : "";
  989. switch (PRI_BASE(pp->ki_pri.pri_class)) {
  990. case PRI_ITHD:
  991. return ("-");
  992. case PRI_REALTIME:
  993. /*
  994. * XXX: the kernel doesn't tell us the original rtprio and
  995. * doesn't really know what it was, so to recover it we
  996. * must be more chummy with the implementation than the
  997. * implementation is with itself. pri_user gives a
  998. * constant "base" priority, but is only initialized
  999. * properly for user threads. pri_native gives what the
  1000. * kernel calls the "base" priority, but it isn't constant
  1001. * since it is changed by priority propagation. pri_native
  1002. * also isn't properly initialized for all threads, but it
  1003. * is properly initialized for kernel realtime and idletime
  1004. * threads. Thus we use pri_user for the base priority of
  1005. * user threads (it is always correct) and pri_native for
  1006. * the base priority of kernel realtime and idletime threads
  1007. * (there is nothing better, and it is usually correct).
  1008. *
  1009. * The field width and thus the buffer are too small for
  1010. * values like "kr31F", but such values shouldn't occur,
  1011. * and if they do then the tailing "F" is not displayed.
  1012. */
  1013. rtpri = ((pp->ki_flag & P_KPROC) ? pp->ki_pri.pri_native :
  1014. pp->ki_pri.pri_user) - PRI_MIN_REALTIME;
  1015. snprintf(nicebuf, sizeof(nicebuf), "%sr%d%s",
  1016. kproc, rtpri, fifo);
  1017. break;
  1018. case PRI_TIMESHARE:
  1019. if (pp->ki_flag & P_KPROC)
  1020. return ("-");
  1021. snprintf(nicebuf, sizeof(nicebuf), "%d", pp->ki_nice - NZERO);
  1022. break;
  1023. case PRI_IDLE:
  1024. /* XXX: as above. */
  1025. rtpri = ((pp->ki_flag & P_KPROC) ? pp->ki_pri.pri_native :
  1026. pp->ki_pri.pri_user) - PRI_MIN_IDLE;
  1027. snprintf(nicebuf, sizeof(nicebuf), "%si%d%s",
  1028. kproc, rtpri, fifo);
  1029. break;
  1030. default:
  1031. return ("?");
  1032. }
  1033. return (nicebuf);
  1034. }
  1035. /* comparison routines for qsort */
  1036. static int
  1037. compare_pid(const void *p1, const void *p2)
  1038. {
  1039. const struct kinfo_proc * const *pp1 = p1;
  1040. const struct kinfo_proc * const *pp2 = p2;
  1041. assert((*pp2)->ki_pid >= 0 && (*pp1)->ki_pid >= 0);
  1042. return ((*pp1)->ki_pid - (*pp2)->ki_pid);
  1043. }
  1044. static int
  1045. compare_tid(const void *p1, const void *p2)
  1046. {
  1047. const struct kinfo_proc * const *pp1 = p1;
  1048. const struct kinfo_proc * const *pp2 = p2;
  1049. assert((*pp2)->ki_tid >= 0 && (*pp1)->ki_tid >= 0);
  1050. return ((*pp1)->ki_tid - (*pp2)->ki_tid);
  1051. }
  1052. /*
  1053. * proc_compare - comparison function for "qsort"
  1054. * Compares the resource consumption of two processes using five
  1055. * distinct keys. The keys (in descending order of importance) are:
  1056. * percent cpu, cpu ticks, state, resident set size, total virtual
  1057. * memory usage. The process states are ordered as follows (from least
  1058. * to most important): WAIT, zombie, sleep, stop, start, run. The
  1059. * array declaration below maps a process state index into a number
  1060. * that reflects this ordering.
  1061. */
  1062. static int sorted_state[] = {
  1063. 0, /* not used */
  1064. 3, /* sleep */
  1065. 1, /* ABANDONED (WAIT) */
  1066. 6, /* run */
  1067. 5, /* start */
  1068. 2, /* zombie */
  1069. 4 /* stop */
  1070. };
  1071. #define ORDERKEY_PCTCPU(a, b) do { \
  1072. double diff; \
  1073. if (ps.wcpu) \
  1074. diff = weighted_cpu(PCTCPU((b)), (b)) - \
  1075. weighted_cpu(PCTCPU((a)), (a)); \
  1076. else \
  1077. diff = PCTCPU((b)) - PCTCPU((a)); \
  1078. if (diff != 0) \
  1079. return (diff > 0 ? 1 : -1); \
  1080. } while (0)
  1081. #define ORDERKEY_CPTICKS(a, b) do { \
  1082. int64_t diff = (int64_t)(b)->ki_runtime - (int64_t)(a)->ki_runtime; \
  1083. if (diff != 0) \
  1084. return (diff > 0 ? 1 : -1); \
  1085. } while (0)
  1086. #define ORDERKEY_STATE(a, b) do { \
  1087. int diff = sorted_state[(unsigned char)(b)->ki_stat] - sorted_state[(unsigned char)(a)->ki_stat]; \
  1088. if (diff != 0) \
  1089. return (diff > 0 ? 1 : -1); \
  1090. } while (0)
  1091. #define ORDERKEY_PRIO(a, b) do { \
  1092. int diff = (int)(b)->ki_pri.pri_level - (int)(a)->ki_pri.pri_level; \
  1093. if (diff != 0) \
  1094. return (diff > 0 ? 1 : -1); \
  1095. } while (0)
  1096. #define ORDERKEY_THREADS(a, b) do { \
  1097. int diff = (int)(b)->ki_numthreads - (int)(a)->ki_numthreads; \
  1098. if (diff != 0) \
  1099. return (diff > 0 ? 1 : -1); \
  1100. } while (0)
  1101. #define ORDERKEY_RSSIZE(a, b) do { \
  1102. long diff = (long)(b)->ki_rssize - (long)(a)->ki_rssize; \
  1103. if (diff != 0) \
  1104. return (diff > 0 ? 1 : -1); \
  1105. } while (0)
  1106. #define ORDERKEY_MEM(a, b) do { \
  1107. long diff = (long)PROCSIZE((b)) - (long)PROCSIZE((a)); \
  1108. if (diff != 0) \
  1109. return (diff > 0 ? 1 : -1); \
  1110. } while (0)
  1111. #define ORDERKEY_JID(a, b) do { \
  1112. int diff = (int)(b)->ki_jid - (int)(a)->ki_jid; \
  1113. if (diff != 0) \
  1114. return (diff > 0 ? 1 : -1); \
  1115. } while (0)
  1116. #define ORDERKEY_SWAP(a, b) do { \
  1117. int diff = (int)ki_swap(b) - (int)ki_swap(a); \
  1118. if (diff != 0) \
  1119. return (diff > 0 ? 1 : -1); \
  1120. } while (0)
  1121. /* compare_cpu - the comparison function for sorting by cpu percentage */
  1122. static int
  1123. compare_cpu(const void *arg1, const void *arg2)
  1124. {
  1125. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1126. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1127. ORDERKEY_PCTCPU(p1, p2);
  1128. ORDERKEY_CPTICKS(p1, p2);
  1129. ORDERKEY_STATE(p1, p2);
  1130. ORDERKEY_PRIO(p1, p2);
  1131. ORDERKEY_RSSIZE(p1, p2);
  1132. ORDERKEY_MEM(p1, p2);
  1133. return (0);
  1134. }
  1135. /* compare_size - the comparison function for sorting by total memory usage */
  1136. static int
  1137. compare_size(const void *arg1, const void *arg2)
  1138. {
  1139. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1140. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1141. ORDERKEY_MEM(p1, p2);
  1142. ORDERKEY_RSSIZE(p1, p2);
  1143. ORDERKEY_PCTCPU(p1, p2);
  1144. ORDERKEY_CPTICKS(p1, p2);
  1145. ORDERKEY_STATE(p1, p2);
  1146. ORDERKEY_PRIO(p1, p2);
  1147. return (0);
  1148. }
  1149. /* compare_res - the comparison function for sorting by resident set size */
  1150. static int
  1151. compare_res(const void *arg1, const void *arg2)
  1152. {
  1153. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1154. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1155. ORDERKEY_RSSIZE(p1, p2);
  1156. ORDERKEY_MEM(p1, p2);
  1157. ORDERKEY_PCTCPU(p1, p2);
  1158. ORDERKEY_CPTICKS(p1, p2);
  1159. ORDERKEY_STATE(p1, p2);
  1160. ORDERKEY_PRIO(p1, p2);
  1161. return (0);
  1162. }
  1163. /* compare_time - the comparison function for sorting by total cpu time */
  1164. static int
  1165. compare_time(const void *arg1, const void *arg2)
  1166. {
  1167. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1168. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *) arg2;
  1169. ORDERKEY_CPTICKS(p1, p2);
  1170. ORDERKEY_PCTCPU(p1, p2);
  1171. ORDERKEY_STATE(p1, p2);
  1172. ORDERKEY_PRIO(p1, p2);
  1173. ORDERKEY_RSSIZE(p1, p2);
  1174. ORDERKEY_MEM(p1, p2);
  1175. return (0);
  1176. }
  1177. /* compare_prio - the comparison function for sorting by priority */
  1178. static int
  1179. compare_prio(const void *arg1, const void *arg2)
  1180. {
  1181. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1182. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1183. ORDERKEY_PRIO(p1, p2);
  1184. ORDERKEY_CPTICKS(p1, p2);
  1185. ORDERKEY_PCTCPU(p1, p2);
  1186. ORDERKEY_STATE(p1, p2);
  1187. ORDERKEY_RSSIZE(p1, p2);
  1188. ORDERKEY_MEM(p1, p2);
  1189. return (0);
  1190. }
  1191. /* compare_threads - the comparison function for sorting by threads */
  1192. static int
  1193. compare_threads(const void *arg1, const void *arg2)
  1194. {
  1195. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1196. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1197. ORDERKEY_THREADS(p1, p2);
  1198. ORDERKEY_PCTCPU(p1, p2);
  1199. ORDERKEY_CPTICKS(p1, p2);
  1200. ORDERKEY_STATE(p1, p2);
  1201. ORDERKEY_PRIO(p1, p2);
  1202. ORDERKEY_RSSIZE(p1, p2);
  1203. ORDERKEY_MEM(p1, p2);
  1204. return (0);
  1205. }
  1206. /* compare_jid - the comparison function for sorting by jid */
  1207. static int
  1208. compare_jid(const void *arg1, const void *arg2)
  1209. {
  1210. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1211. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1212. ORDERKEY_JID(p1, p2);
  1213. ORDERKEY_PCTCPU(p1, p2);
  1214. ORDERKEY_CPTICKS(p1, p2);
  1215. ORDERKEY_STATE(p1, p2);
  1216. ORDERKEY_PRIO(p1, p2);
  1217. ORDERKEY_RSSIZE(p1, p2);
  1218. ORDERKEY_MEM(p1, p2);
  1219. return (0);
  1220. }
  1221. /* compare_swap - the comparison function for sorting by swap */
  1222. static int
  1223. compare_swap(const void *arg1, const void *arg2)
  1224. {
  1225. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1226. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1227. ORDERKEY_SWAP(p1, p2);
  1228. ORDERKEY_PCTCPU(p1, p2);
  1229. ORDERKEY_CPTICKS(p1, p2);
  1230. ORDERKEY_STATE(p1, p2);
  1231. ORDERKEY_PRIO(p1, p2);
  1232. ORDERKEY_RSSIZE(p1, p2);
  1233. ORDERKEY_MEM(p1, p2);
  1234. return (0);
  1235. }
  1236. /* assorted comparison functions for sorting by i/o */
  1237. static int
  1238. compare_iototal(const void *arg1, const void *arg2)
  1239. {
  1240. const struct kinfo_proc * const p1 = *(const struct kinfo_proc * const *)arg1;
  1241. const struct kinfo_proc * const p2 = *(const struct kinfo_proc * const *)arg2;
  1242. return (get_io_total(p2) - get_io_total(p1));
  1243. }
  1244. static int
  1245. compare_ioread(const void *arg1, const void *arg2)
  1246. {
  1247. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1248. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1249. long dummy, inp1, inp2;
  1250. (void) get_io_stats(p1, &inp1, &dummy, &dummy, &dummy, &dummy);
  1251. (void) get_io_stats(p2, &inp2, &dummy, &dummy, &dummy, &dummy);
  1252. return (inp2 - inp1);
  1253. }
  1254. static int
  1255. compare_iowrite(const void *arg1, const void *arg2)
  1256. {
  1257. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1258. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1259. long dummy, oup1, oup2;
  1260. (void) get_io_stats(p1, &dummy, &oup1, &dummy, &dummy, &dummy);
  1261. (void) get_io_stats(p2, &dummy, &oup2, &dummy, &dummy, &dummy);
  1262. return (oup2 - oup1);
  1263. }
  1264. static int
  1265. compare_iofault(const void *arg1, const void *arg2)
  1266. {
  1267. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1268. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1269. long dummy, flp1, flp2;
  1270. (void) get_io_stats(p1, &dummy, &dummy, &flp1, &dummy, &dummy);
  1271. (void) get_io_stats(p2, &dummy, &dummy, &flp2, &dummy, &dummy);
  1272. return (flp2 - flp1);
  1273. }
  1274. static int
  1275. compare_vcsw(const void *arg1, const void *arg2)
  1276. {
  1277. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1278. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1279. long dummy, flp1, flp2;
  1280. (void) get_io_stats(p1, &dummy, &dummy, &dummy, &flp1, &dummy);
  1281. (void) get_io_stats(p2, &dummy, &dummy, &dummy, &flp2, &dummy);
  1282. return (flp2 - flp1);
  1283. }
  1284. static int
  1285. compare_ivcsw(const void *arg1, const void *arg2)
  1286. {
  1287. const struct kinfo_proc *p1 = *(const struct kinfo_proc * const *)arg1;
  1288. const struct kinfo_proc *p2 = *(const struct kinfo_proc * const *)arg2;
  1289. long dummy, flp1, flp2;
  1290. (void) get_io_stats(p1, &dummy, &dummy, &dummy, &dummy, &flp1);
  1291. (void) get_io_stats(p2, &dummy, &dummy, &dummy, &dummy, &flp2);
  1292. return (flp2 - flp1);
  1293. }
  1294. int (*compares[])(const void *arg1, const void *arg2) = {
  1295. compare_cpu,
  1296. compare_size,
  1297. compare_res,
  1298. compare_time,
  1299. compare_prio,
  1300. compare_threads,
  1301. compare_iototal,
  1302. compare_ioread,
  1303. compare_iowrite,
  1304. compare_iofault,
  1305. compare_vcsw,
  1306. compare_ivcsw,
  1307. compare_jid,
  1308. compare_swap,
  1309. NULL
  1310. };
  1311. static int
  1312. swapmode(int *retavail, int *retfree)
  1313. {
  1314. int n;
  1315. struct kvm_swap swapary[1];
  1316. static int pagesize = 0;
  1317. static unsigned long swap_maxpages = 0;
  1318. *retavail = 0;
  1319. *retfree = 0;
  1320. #define CONVERT(v) ((quad_t)(v) * pagesize / 1024)
  1321. n = kvm_getswapinfo(kd, swapary, 1, 0);
  1322. if (n < 0 || swapary[0].ksw_total == 0)
  1323. return (0);
  1324. if (pagesize == 0)
  1325. pagesize = getpagesize();
  1326. if (swap_maxpages == 0)
  1327. GETSYSCTL("vm.swap_maxpages", swap_maxpages);
  1328. /* ksw_total contains the total size of swap all devices which may
  1329. exceed the maximum swap size allocatable in the system */
  1330. if ( swapary[0].ksw_total > swap_maxpages )
  1331. swapary[0].ksw_total = swap_maxpages;
  1332. *retavail = CONVERT(swapary[0].ksw_total);
  1333. *retfree = CONVERT(swapary[0].ksw_total - swapary[0].ksw_used);
  1334. #undef CONVERT
  1335. n = (int)(swapary[0].ksw_used * 100.0 / swapary[0].ksw_total);
  1336. return (n);
  1337. }