// // Orcallator.se, a log generating performance monitor. // // This program logs many different system quantities to a log file // for later processing. // // Author: Blair Zajac . // // Portions copied from percollator.se written by Adrian Cockroft. // // Version 1.12: Sep 14, 1999 Add the page scan rate as scanrate in // measure_cpu. // Version 1.11: Aug 13, 1999 Add the number of CPUs as ncpus. Move // measure_disk and measure_ram sooner in the // list of subsystems to handle. Increase the // number of characters for each network // interface from four to five. Add new disk // reads, writes, Kbytes read, and Kbytes // written per second. Add number of bytes // of free memory in bytes as freememK. // Version 1.10: Jul 28, 1999 Measure the process spawn rate if WATCH_CPU // is defined and the user is root. // Version 1.9: Jun 2, 1999 If WATCH_YAHOO is defined, then process the // access log as a Yahoo! style access log. // Restructure the code to handle different // web server access log formats. // Version 1.8: Jun 1, 1999 If the environmental variable WEB_SERVER is // defined, use its value of the as the name // of the process to count for the number of // web servers on the system. If WEB_SERVER // is not defined, then count number of httpd's. // Version 1.7: Mar 25, 1999 Simplify and speed up count_proc by 20%. // Version 1.6: Feb 23, 1999 Print pvm.user_time and system_time correctly. // Version 1.5: Feb 23, 1999 Always write header to a new file. // Version 1.4: Feb 19, 1999 Handle missing HTTP/1.x in access log. // Version 1.3: Feb 18, 1999 On busy machines httpops5 will be enlarged. // Version 1.2: Feb 18, 1999 Output data on integral multiples of interval. // Version 1.1: Feb 18, 1999 Integrate Squid log processing from SE 3.1. // Version 1.0: Sep 9, 1998 Initial version. // // The default sampling interval in seconds. #define SAMPLE_INTERVAL 300 // The maximum number of colums of data. #define MAX_COLUMNS 512 // Define the different parts of the system you want to examine. #ifdef WATCH_OS #define WATCH_CPU 1 #define WATCH_MUTEX 1 #define WATCH_NET 1 #define WATCH_TCP 1 #define WATCH_NFS 1 #define WATCH_DISK 1 #define WATCH_DNLC 1 #define WATCH_INODE 1 #define WATCH_RAM 1 #define WATCH_PAGES 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if WATCH_CPU || WATCH_WEB #include #ifdef WATCH_CPU // This is the maximum pid on Solaris hosts. #define DEFAULT_MAXPID 30000 #include #endif #ifdef WATCH_WEB #include // Define this macro which returns the size index for a file of a // particular size. This saves the overhead of a function call. #define WWW_SIZE_INDEX(size, size_index) \ if (size < 1024) { \ size_index=0; /* under 1KB */ \ } else { \ if (size < 10240) { \ size_index=1; /* under 10K */ \ } else { \ if (size < 102400) { \ size_index=2; /* under 100KB */ \ } else { \ if (size < 1048576) { \ size_index=3; /* under 1MB */ \ } else { \ size_index=4; /* over 1MB */ \ } \ } \ } \ } \ dwnld_size[size_index]++; // Handle the reply code from the server. #define WWW_REPLY_CODE(word) \ if (word != nil) { \ if (word == "304") { \ httpop_condgets++; \ } \ else { \ first_byte = word; \ if (first_byte[0] == '4' || first_byte[0] == '5') { \ httpop_errors++; \ } \ } \ } // Handle the method of the object served. This define only works with // non-proxy servers. #define WWW_METHOD1(word) \ if (word != nil) { \ switch (word) { \ case "get": \ case "GET": \ httpop_gets++; \ break; \ case "post": \ case "POST": \ httpop_posts++; \ break; \ case "head": \ case "HEAD": \ ishead = 1; \ httpop_condgets++; \ break; #ifdef WATCH_SQUID #define WWW_METHOD2 \ case "icp_query": \ case "ICP_QUERY": \ squid_icp_queries++; \ break; #else #define WWW_METHOD2 #endif #define WWW_METHOD_END \ default: \ break; \ } \ } #define WWW_METHOD(word) WWW_METHOD1(word) WWW_METHOD2 WWW_METHOD_END #endif #endif // Put all rules here so they can be accessed by the handle functions. lr_cpu_t lr_cpu$cpu; lr_cpu_t tmp_lrcpu; lr_mutex_t lr_mutex$m; lr_mutex_t tmp_mutex; lr_net_t lr_net$nr; lr_net_t tmp_nr; lr_tcp_t lr_tcp$tcp; lr_tcp_t tmp_lrtcp; #ifdef WATCH_TCP tcp tcp$tcp; tcp tmp_tcp; #endif lr_rpcclient_t lr_rpcclient$r; lr_rpcclient_t tmp_lrpcc; lr_disk_t lr_disk$dr; lr_disk_t tmp_dr; lr_dnlc_t lr_dnlc$dnlc; lr_dnlc_t tmp_lrdnlc; lr_inode_t lr_inode$inode; lr_inode_t tmp_lrinode; lr_ram_t lr_ram$ram; lr_ram_t tmp_lrram; #ifdef WATCH_PAGES ks_system_pages kstat$pages; ks_system_pages tmp_kstat_pages; #endif lr_swapspace_t lr_swapspace$s; lr_swapspace_t tmp_lrswap; lr_kmem_t lr_kmem$kmem; lr_kmem_t tmp_lrkmem; ks_system_misc kstat$misc; ks_system_misc tmp_kstat_misc; // Put application globals here. string nodename; // Name of this machine. string program_name; // Name of this program. int hz; // Clock tick rate. int page_size; // Page size in bytes. long boot_time; // Boot time of the system. long interval = SAMPLE_INTERVAL; // Sampling interval. #ifdef WATCH_CPU int can_read_kernel = 0; // If the kernel can be read. long kvm$mpid; // The last created PID. // These variables store the mpid before and after the standard interval. long mpid_previous; long mpid_current; ulonglong mpid_then; ulonglong mpid_now; // These variables store the mpid before and after 5 second intervals. long mpid5_previous; long mpid5_current; ulonglong mpid5_then; ulonglong mpid5_now; double mpid5_rate; #endif // Variables for handling the httpd access log. #ifdef WATCH_WEB string www_server_proc_name = getenv("WEB_SERVER"); string search_url = getenv("SEARCHURL"); string www_log_filename = getenv("WEB_LOG"); string www_gateway = getenv("GATEWAY"); ulong www_fd; uint www_gatelen; stat_t www_stat[1]; ulong www_ino; long www_size; double www_interval; // Hi-res interval time. ulonglong www_then; ulonglong www_now; double www5_interval; // Actual hi-res 5 second interval. ulonglong www5_then; ulonglong www5_now; double httpops; double httpops5; double gateops; double dtmp; long httpop_gets; long httpop_condgets; // HEAD or code = 304 conditional get no data. long httpop_posts; long httpop_cgi_bins; long httpop_searches; long httpop_errors; long dwnld_size[5]; // [0] < 1K, [1] < 10K, [2] < 100K, [3] < 1M, [4] >= 1M long dwnld_totalz; // total size counted from log #if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO // If we're watching a Yahoo log, then take the transfer time to be the // processing time. double www_dwnld_time_sum; // transfer time double www_dwnld_time_by_size[5]; // mean transfer time by size bin #endif #if WATCH_PROXY || WATCH_SQUID long prxy_squid_indirect; // number of hits that go via PROXY,SOCKS,parent long prxy_squid_cache_hits; // number of hits returned from cache #endif #ifdef WATCH_PROXY long prxy_cache_writes; // number of writes and updates to cache long prxy_uncacheable; // number of explicitly uncacheable httpops // any extra is errors or incomplete ops #endif #ifdef WATCH_SQUID long squid_cache_misses; long squid_icp_requests; long squid_icp_queries; long squid_client_http; #endif #endif // Variables for handling output. string col_comment[MAX_COLUMNS]; // Comments for each column. string col_data[MAX_COLUMNS]; // Data for each column. int current_column; // The current column. // Reset the output data. reset_output() { current_column = 0; } // Add one column of comments and data to the buffers. put_output(string comment, string data) { if (current_column >= MAX_COLUMNS) { fprintf(stderr, "%s: too many columns (%d). Increase MAX_COLUMNS.\n", program_name, current_column); exit(1); } col_comment[current_column] = comment; col_data[current_column] = data; ++current_column; } print_columns(ulong fd, string data[]) { int i; for (i=0; i= mpid5_previous) { mpid5_diff = mpid5_current - mpid5_previous; } else { mpid5_diff = mpid5_current + DEFAULT_MAXPID - mpid5_previous; } rate = mpid5_diff/mpid5_interval; if (rate > mpid5_rate) { mpid5_rate = rate; } mpid5_previous = mpid5_current; // Now take these results to measure the long interval rate. // Because the mpid may flip over DEFAULT_MAXPID more than once // in the long interval time span, use the difference between // the previous and current mpid over a 5 second interval to // calculate the long interval difference. mpid_current += mpid5_diff; mpid_now = mpid5_now; } else { sleep(sleep_till - now); } #else sleep(sleep_till - now); #endif now = time(0); } } #ifdef WATCH_CPU measure_cpu() { p_vmstat pvm; double mpid_interval; double mpid_rate; pvm = vmglobal_total(); // In SE 3.0 user_time and system_time are int and in SE 3.1 they are // double, so cast everything to double using + 0.0. put_output(" usr%", sprintf("%5.1f", pvm.user_time + 0.0)); put_output(" sys%", sprintf("%5.1f", pvm.system_time + 0.0)); put_output(" 1runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_1min/256.0)); put_output(" 5runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_5min/256.0)); put_output("15runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_15min/256.0)); put_output("#proc", sprintf("%5lu", tmp_kstat_misc.nproc)); put_output("scanrate", sprintf("%8.3f", pvm.scan)); // Calculate the rate of new process spawning. if (can_read_kernel != 0) { mpid_interval = (mpid_now - mpid_then) * 0.000000001; mpid_rate = (mpid_current - mpid_previous) / mpid_interval; put_output("#proc/s", sprintf("%7.3f", mpid_rate)); put_output("#proc/p5s", sprintf("%9.4f", mpid5_rate)); // Reset counters. mpid_then = mpid_now; mpid_previous = mpid_current; mpid5_rate = 0; } } #endif #ifdef WATCH_MUTEX measure_mutex() { put_output(" smtx", sprintf("%5d", tmp_mutex.smtx)); put_output("smtx/cpu", sprintf("%8d", tmp_mutex.smtx/tmp_mutex.ncpus)); put_output("ncpus", sprintf("%5d", tmp_mutex.ncpus)); } #endif #ifdef WATCH_NET measure_net() { int i; for (i=0; i peak_disk_busy) { peak_disk_busy = GLOBAL_disk[i].run_percent; } } mean_disk_busy = mean_disk_busy/GLOBAL_disk[0].disk_count; put_output("disk_peak", sprintf("%9.3f", peak_disk_busy)); put_output("disk_mean", sprintf("%9.3f", mean_disk_busy)); put_output("disk_rd/s", sprintf("%9.1f", total_reads)); put_output("disk_wr/s", sprintf("%9.1f", total_writes)); put_output("disk_rK/s", sprintf("%9.1f", total_readk)); put_output("disk_wK/s", sprintf("%9.1f", total_writek)); } #endif #ifdef WATCH_DNLC measure_dnlc() { put_output("dnlc_ref/s", sprintf("%10.3f", tmp_lrdnlc.refrate)); put_output("dnlc_hit%", sprintf("%9.3f", tmp_lrdnlc.hitrate)); } #endif #ifdef WATCH_INODE measure_inode() { put_output("inod_ref/s", sprintf("%10.3f", tmp_lrinode.refrate)); put_output("inod_hit%", sprintf("%9.3f", tmp_lrinode.hitrate)); put_output("inod_stl/s", sprintf("%10.3f", tmp_lrinode.iprate)); } #endif #ifdef WATCH_RAM measure_ram() { put_output("swap_avail", sprintf("%10ld", GLOBAL_pvm[0].swap_avail)); put_output("page_rstim", sprintf("%10d", tmp_lrram.restime)); put_output(" freememK", sprintf("%10d", GLOBAL_pvm[0].freemem)); put_output("free_pages", sprintf("%10d", (GLOBAL_pvm[0].freemem*1024)/page_size)); } #endif #ifdef WATCH_PAGES measure_pages() { put_output("pp_kernel", sprintf("%9lu", tmp_kstat_pages.pp_kernel)); put_output("pagesfree", sprintf("%9lu", tmp_kstat_pages.pagesfree)); put_output("pageslock", sprintf("%9lu", tmp_kstat_pages.pageslocked)); put_output("pagesio", sprintf("%7lu", tmp_kstat_pages.pagesio)); put_output("pagestotl", sprintf("%9lu", tmp_kstat_pages.pagestotal)); } #endif #ifdef WATCH_WEB // Breakdown access log format. accesslog(string buf) { int z; int size_index; int ishead; string word; char first_byte[1]; #if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO double xf; #ifdef WATCH_SQUID string logtag; string request; #endif #ifdef WATCH_YAHOO string arg; ulong ptr; ulong tmp; ulong ulong_xf; #endif #endif ishead = 0; #ifdef WATCH_YAHOO // Make sure that the input line has at least 32 bytes of data plus a new // line, for a total length of 33. if (strlen(buf) < 33) { return; } word = strtok(buf,"\05"); #else word = strtok(buf," "); #endif if (word == nil) { return; } #ifdef WATCH_SQUID // Word contains unix time in seconds.milliseconds. word = strtok(nil, " "); // Elapsed request time in ms xf = atof(word)/1000.0; www_dwnld_time_sum += xf; #ifdef DINKY printf("time: %s %f total %f\n", word, xf, xfer_sum); #endif word = strtok(nil, " "); // Client IP address logtag = strtok(nil, "/"); // Log tag word = strtok(nil, " "); // Reply code WWW_REPLY_CODE(word) word = strtok(nil, " "); // Size sent to client z = atoi(word); request = strtok(nil, " "); // Request method word = strtok(nil, " "); // URL if (word != nil) { if (word =~ "cgi-bin") { httpop_cgi_bins++; } if (word =~ search_url) { httpop_searches++; } } word = strtok(nil, " "); // Optional user ident word = strtok(nil, "/"); // Hierarchy if (word != nil) { if (word =~ "DIRECT") { prxy_squid_indirect++; } } word = strtok(nil, " "); // Hostname word = strtok(nil, " "); // Content-type // Process the collected data. if (logtag =~ "TCP") { squid_client_http++; } if (logtag =~ "UDP") { squid_icp_requests++; } if (logtag =~ "HIT") { prxy_squid_cache_hits++; } if (logtag =~ "MISS") { squid_cache_misses++; } WWW_METHOD(request) // Do not add size if its a HEAD. if (ishead == 0) { dwnld_totalz += z; } WWW_SIZE_INDEX(z, size_index) www_dwnld_time_by_size[size_index] += xf; #elif WATCH_YAHOO // Yahoo log format. Fields in square brackets will only appear in the // log file if the data actually exists (ie. you will never see a null // Referrer field). Further, fields labelled here with "(CONFIG)" will // only appear if they are enabled via the YahooLogOptions configuration // directive. // // IP Address (8 hex digits) // Timestamp (time_t as 8 hex digits) // Processing Time (in microseconds, as 8 hex digits) // Bytes Sent (8 hex digits) // URL // [^Er referrer] (CONFIG) // [^Em method] (CONFIG) // [^Es status_code] // ^Ed signature // \n // Ignore the IP address and timestamp. Get the processing time, the // number of bytes sent and the URL. For each portion of the line, split // it up into separate pieces. if (sscanf(word, "%8lx%8lx%8x%8x", &tmp, &tmp, &ulong_xf, &z) != 4) { return; } xf = ulong_xf/1000000.0; WWW_SIZE_INDEX(z, size_index) www_dwnld_time_sum += xf; www_dwnld_time_by_size[size_index] += xf; if (word =~ "cgi-bin") { httpop_cgi_bins++; } if (word =~ search_url) { httpop_searches++; } for (;;) { word = strtok(nil, "\05"); if (word == nil) { break; } first_byte = word; ptr = &word + 1; arg = ((string) ptr); ptr = 0; switch (first_byte[0]) { case 'm': WWW_METHOD(arg) ptr = 1; break; case 's': WWW_REPLY_CODE(arg) break; default: break; } } // If no method was seen, then assume it was a GET. if (ptr == 0) { httpop_gets++; } // Do not add size if its a HEAD. if (ishead == 0) { dwnld_totalz += z; } #else /* common or netscape proxy formats */ strtok(nil, " "); // - strtok(nil, " "); // - strtok(nil, " ["); // date strtok(nil, " "); // zone] word = strtok(nil, " \""); // GET or POST WWW_METHOD(word) word = strtok(nil, " "); // URL if (word != nil) { if (word =~ "cgi-bin") { httpop_cgi_bins++; } if (word =~ search_url) { httpop_searches++; } } // Sometimes HTTP/1.x is not listed in the access log. Skip it // if it does exist. Load the error/success code. word = strtok(nil, " "); if (word != nil && (word =~ "HTTP" || word =~ "http")) { word = strtok(nil, " "); } WWW_REPLY_CODE(word) word = strtok(nil, " "); // Bytes transferred. if (word != nil) { z = atoi(word); if (ishead == 0) { // Do not add size if its a HEAD. dwnld_totalz += z; } WWW_SIZE_INDEX(z, size_index) } #ifdef WATCH_PROXY word = strtok(nil, " "); // status from server word = strtok(nil, " "); // length from server word = strtok(nil, " "); // length from client POST word = strtok(nil, " "); // length POSTed to remote word = strtok(nil, " "); // client header req word = strtok(nil, " "); // proxy header resp word = strtok(nil, " "); // proxy header req word = strtok(nil, " "); // server header resp word = strtok(nil, " "); // transfer total secs word = strtok(nil, " "); // route // - DIRECT PROXY(host.domain:port) SOCKS if (word != nil) { if (strncmp(word, "PROXY", 5) == 0 || strncmp(word, "SOCKS", 5) == 0) { prxy_squid_indirect++; } } word = strtok(nil, " "); // client finish status word = strtok(nil, " "); // server finish status word = strtok(nil, " "); // cache finish status // ERROR HOST-NOT-AVAILABLE = error or incomplete op // WRITTEN REFRESHED CL-MISMATCH(content length mismatch) = cache_writes // NO-CHECK UP-TO-DATE = cache_hits // DO-NOT-CACHE NON-CACHEABLE = uncacheable if (word != nil) { switch(word) { case "WRITTEN": case "REFRESHED": case "CL-MISMATCH": prxy_cache_writes++; break; case "NO-CHECK": case "UP-TO-DATE": prxy_squid_cache_hits++; break; case "DO-NOT-CACHE": case "NON-CACHEABLE": prxy_uncacheable++; break; default: break; } } word = strtok(nil, " ["); // [transfer total time x.xxx if (word != nil) { xf = atof(word); www_dwnld_time_sum += xf; www_dwnld_time_by_size[size_index] += xf; } #endif #endif } measure_web(long sleep_till) { double lastops = 0.0; char buf[BUFSIZ]; int i; long now; httpops = 0.0; httpops5 = 0.0; gateops = 0.0; httpop_gets = 0; httpop_condgets = 0; httpop_posts = 0; httpop_cgi_bins = 0; httpop_errors = 0; httpop_searches = 0; for (i=0; i<5; i++) { dwnld_size[i] = 0; #if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO www_dwnld_time_by_size[i] = 0.0; #endif } dwnld_totalz = 0; #if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO www_dwnld_time_sum = 0.0; #endif #if WATCH_PROXY || WATCH_SQUID prxy_squid_indirect = 0; prxy_squid_cache_hits = 0; #ifdef WATCH_PROXY prxy_cache_writes = 0; prxy_uncacheable = 0; #else squid_cache_misses = 0; squid_icp_requests = 0; squid_icp_queries = 0; squid_client_http = 0; #endif #endif if (www_log_filename != nil) { now = time(0); while (now < sleep_till) { #ifdef WATCH_CPU sleep_till_and_count_new_proceses(now + 5); #else sleep(5); #endif now = time(0); if (www_fd != 0) { while (fgets(buf, BUFSIZ, www_fd) != nil) { httpops += 1.0; if (www_gatelen > 0) { if (strncmp(buf, www_gateway, www_gatelen) == 0) { gateops += 1.0; } } accesslog(buf); } } /* see if the file has been switched or truncated */ stat(www_log_filename, www_stat); if (www_ino != www_stat[0].st_ino || www_size > www_stat[0].st_size) { if (www_fd != 0) { fclose(www_fd); /* close the old log */ } /* log file has changed, open the new one */ www_fd = fopen(www_log_filename, "r"); if (www_fd != 0) { www_ino = www_stat[0].st_ino; while(fgets(buf, BUFSIZ, www_fd) != nil) { httpops += 1.0; if (www_gatelen > 0) { if (strncmp(buf, www_gateway, www_gatelen) == 0) { gateops += 1.0; } } accesslog(buf); } } } www5_now = gethrtime(); www5_interval = (www5_now - www5_then) * 0.000000001; www5_then = www5_now; dtmp = (httpops - lastops)/www5_interval; if (dtmp > httpops5) { httpops5 = dtmp; } lastops = httpops; // Remember size for next time. www_size = www_stat[0].st_size; } } else { sleep_till_and_count_new_proceses(sleep_till); www5_now = gethrtime(); } www_now = www5_now; www_interval = (www_now - www_then) * 0.000000001; www_then = www_now; // Use dtmp to get percentages. if (httpops == 0.0) { dtmp = 0.0; } else { dtmp = 100.0 / httpops; } #if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO for (i=0; i<5; ++i) { if (dwnld_size[i] == 0) { www_dwnld_time_by_size[i] = 0.0; } else { www_dwnld_time_by_size[i] = www_dwnld_time_by_size[i]/dwnld_size[i]; } } #endif } int count_proc(string name) { int count; prpsinfo_t p; count = 0; for (p=first_proc(); p.pr_pid != -1; p=next_proc()) { if (p.pr_fname =~ name) { count++; } } return count; } put_httpd() { put_output("#httpds", sprintf("%7d", count_proc(www_server_proc_name))); put_output("httpop/s", sprintf("%8.2f", httpops/www_interval)); put_output("http/p5s", sprintf("%8.2f", httpops5)); put_output("cndget/s", sprintf("%8.2f", httpop_condgets/www_interval)); put_output("search/s", sprintf("%8.3f", httpop_searches/www_interval)); put_output(" cgi/s", sprintf("%8.3f", httpop_cgi_bins/www_interval)); put_output(" htErr/s", sprintf("%8.3f", httpop_errors/www_interval)); put_output(" httpb/s", sprintf("%8.0f", dwnld_totalz/www_interval)); put_output(" %to1KB", sprintf("%8.2f", dtmp*dwnld_size[0])); put_output(" %to10KB", sprintf("%8.2f", dtmp*dwnld_size[1])); put_output("%to100KB", sprintf("%8.2f", dtmp*dwnld_size[2])); put_output(" %to1MB", sprintf("%8.2f", dtmp*dwnld_size[3])); put_output("%over1MB", sprintf("%8.2f", dtmp*dwnld_size[4])); put_output(www_gateway, sprintf("%8.2f", gateops/www_interval)); #if WATCH_PROXY || WATCH_SQUID put_output(" %indir", sprintf("%8.2f", dtmp * prxy_squid_indirect)); put_output("%cch_hit", sprintf("%8.2f", dtmp * prxy_squid_cache_hits)); #ifdef WATCH_PROXY put_output("%cch_wrt", sprintf("%8.2f", dtmp * prxy_cache_writes)); put_output("%cch_unc", sprintf("%8.2f", dtmp * prxy_uncacheable)); #else put_output("%cch_mis", sprintf("%8.2f", dtmp * squid_cache_misses)); put_output("%cch_req", sprintf("%8.2f", dtmp * squid_icp_requests)); put_output("%cch_qry", sprintf("%8.2f", dtmp * squid_icp_queries)); #endif put_output(" xfr_t", sprintf("%8.2f", 0.01 * dtmp * www_dwnld_time_sum)); put_output(" xfr1_t", sprintf("%8.2f", www_dwnld_time_by_size[0])); put_output(" xfr10_t", sprintf("%8.2f", www_dwnld_time_by_size[1])); put_output("xfr100_t", sprintf("%8.2f", www_dwnld_time_by_size[2])); put_output(" xfr1M_t", sprintf("%8.2f", www_dwnld_time_by_size[3])); put_output("xfro1M_t", sprintf("%8.2f", www_dwnld_time_by_size[4])); #elif WATCH_YAHOO put_output(" wprc_t", sprintf("%9.5f", 0.01 * dtmp * www_dwnld_time_sum)); put_output(" wprc1_t", sprintf("%9.5f", www_dwnld_time_by_size[0])); put_output(" wprc10_t", sprintf("%9.5f", www_dwnld_time_by_size[1])); put_output("wprc100_t", sprintf("%9.5f", www_dwnld_time_by_size[2])); put_output(" wprc1M_t", sprintf("%9.5f", www_dwnld_time_by_size[3])); put_output("wprco1M_t", sprintf("%9.5f", www_dwnld_time_by_size[4])); #endif } #endif