Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c (revision 271156) +++ sys/vm/vm_pageout.c (working copy) @@ -76,6 +76,7 @@ __FBSDID("$FreeBSD$"); #include "opt_vm.h" +#include "opt_kdtrace.h" #include #include #include @@ -89,6 +90,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -133,6 +135,10 @@ static struct kproc_desc page_kp = { SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &page_kp); +SDT_PROVIDER_DEFINE(vm); +SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); +SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); + #if !defined(NO_SWAPPING) /* the kernel process "vm_daemon"*/ static void vm_daemon(void); @@ -667,6 +673,7 @@ vm_pageout_grow_cache(int tries, vm_paddr_t low, v * may acquire locks and/or sleep, so they can only be invoked * when "tries" is greater than zero. */ + SDT_PROBE0(vm, , , vm__lowmem_cache); EVENTHANDLER_INVOKE(vm_lowmem, 0); /* @@ -899,7 +906,7 @@ vm_pageout_map_deactivate_pages(map, desired) * pass 1 - Move inactive to cache or free * pass 2 - Launder dirty pages */ -static void +static void __used vm_pageout_scan(struct vm_domain *vmd, int pass) { vm_page_t m, next; @@ -920,6 +927,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) /* * Decrease registered cache sizes. */ + SDT_PROBE0(vm, , , vm__lowmem_scan); EVENTHANDLER_INVOKE(vm_lowmem, 0); /* * We do this explicitly after the caches have been