compare_two_ftime_report_sets 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. #!/usr/bin/python
  2. # Script to statistically compare two sets of log files with -ftime-report
  3. # output embedded within them.
  4. # Contributed by Lawrence Crowl <crowl@google.com>
  5. #
  6. # Copyright (C) 2012 Free Software Foundation, Inc.
  7. #
  8. # This file is part of GCC.
  9. #
  10. # GCC is free software; you can redistribute it and/or modify
  11. # it under the terms of the GNU General Public License as published by
  12. # the Free Software Foundation; either version 3, or (at your option)
  13. # any later version.
  14. #
  15. # GCC is distributed in the hope that it will be useful,
  16. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. # GNU General Public License for more details.
  19. #
  20. # You should have received a copy of the GNU General Public License
  21. # along with GCC; see the file COPYING. If not, write to
  22. # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
  23. # Boston, MA 02110-1301, USA.
  24. """ Compare two sets of compile-time performance numbers.
  25. The intent of this script is to compare compile-time performance of two
  26. different versions of the compiler. Each version of the compiler must be
  27. run at least three times with the -ftime-report option. Each log file
  28. represents a data point, or trial. The set of trials for each compiler
  29. version constitutes a sample. The ouput of the script is a description
  30. of the statistically significant difference between the two version of
  31. the compiler.
  32. The parameters to the script are:
  33. Two file patterns that each match a set of log files. You will probably
  34. need to quote the patterns before passing them to the script.
  35. Each pattern corresponds to a version of the compiler.
  36. A regular expression that finds interesting lines in the log files.
  37. If you want to match the beginning of the line, you will need to add
  38. the ^ operator. The filtering uses Python regular expression syntax.
  39. The default is "TOTAL".
  40. All of the interesting lines in a single log file are summed to produce
  41. a single trial (data point).
  42. A desired statistical confidence within the range 60% to 99.9%. Due to
  43. the implementation, this confidence will be rounded down to one of 60%,
  44. 70%, 80%, 90%, 95%, 98%, 99%, 99.5%, 99.8%, and 99.9%.
  45. The default is 95.
  46. If the computed confidence is lower than desired, the script will
  47. estimate the number of trials needed to meet the desired confidence.
  48. This estimate is not very good, as the variance tends to change as
  49. you increase the number of trials.
  50. The most common use of the script is total compile-time comparison between
  51. logfiles stored in different directories.
  52. compare_two_ftime_report_sets "Log1/*perf" "Log2/*perf"
  53. One can also look at parsing time, but expecting a lower confidence.
  54. compare_two_ftime_report_sets "Log1/*perf" "Log2/*perf" "^phase parsing" 75
  55. """
  56. import os
  57. import sys
  58. import fnmatch
  59. import glob
  60. import re
  61. import math
  62. ####################################################################### Utility
  63. def divide(dividend, divisor):
  64. """ Return the quotient, avoiding division by zero.
  65. """
  66. if divisor == 0:
  67. return sys.float_info.max
  68. else:
  69. return dividend / divisor
  70. ################################################################# File and Line
  71. # Should you repurpose this script, this code might help.
  72. #
  73. #def find_files(topdir, filepat):
  74. # """ Find a set of file names, under a given directory,
  75. # matching a Unix shell file pattern.
  76. # Returns an iterator over the file names.
  77. # """
  78. # for path, dirlist, filelist in os.walk(topdir):
  79. # for name in fnmatch.filter(filelist, filepat):
  80. # yield os.path.join(path, name)
  81. def match_files(fileglob):
  82. """ Find a set of file names matching a Unix shell glob pattern.
  83. Returns an iterator over the file names.
  84. """
  85. return glob.iglob(os.path.expanduser(fileglob))
  86. def lines_in_file(filename):
  87. """ Return an iterator over lines in the named file. """
  88. filedesc = open(filename, "r")
  89. for line in filedesc:
  90. yield line
  91. filedesc.close()
  92. def lines_containing_pattern(pattern, lines):
  93. """ Find lines by a Python regular-expression.
  94. Returns an iterator over lines containing the expression.
  95. """
  96. parser = re.compile(pattern)
  97. for line in lines:
  98. if parser.search(line):
  99. yield line
  100. ############################################################# Number Formatting
  101. def strip_redundant_digits(numrep):
  102. if numrep.find(".") == -1:
  103. return numrep
  104. return numrep.rstrip("0").rstrip(".")
  105. def text_number(number):
  106. return strip_redundant_digits("%g" % number)
  107. def round_significant(digits, number):
  108. if number == 0:
  109. return 0
  110. magnitude = abs(number)
  111. significance = math.floor(math.log10(magnitude))
  112. least_position = int(significance - digits + 1)
  113. return round(number, -least_position)
  114. def text_significant(digits, number):
  115. return text_number(round_significant(digits, number))
  116. def text_percent(number):
  117. return text_significant(3, number*100) + "%"
  118. ################################################################ T-Distribution
  119. # This section of code provides functions for using Student's t-distribution.
  120. # The functions are implemented using table lookup
  121. # to facilitate implementation of inverse functions.
  122. # The table is comprised of row 0 listing the alpha values,
  123. # column 0 listing the degree-of-freedom values,
  124. # and the other entries listing the corresponding t-distribution values.
  125. t_dist_table = [
  126. [ 0, 0.200, 0.150, 0.100, 0.050, 0.025, 0.010, 0.005, .0025, 0.001, .0005],
  127. [ 1, 1.376, 1.963, 3.078, 6.314, 12.71, 31.82, 63.66, 127.3, 318.3, 636.6],
  128. [ 2, 1.061, 1.386, 1.886, 2.920, 4.303, 6.965, 9.925, 14.09, 22.33, 31.60],
  129. [ 3, 0.978, 1.250, 1.638, 2.353, 3.182, 4.541, 5.841, 7.453, 10.21, 12.92],
  130. [ 4, 0.941, 1.190, 1.533, 2.132, 2.776, 3.747, 4.604, 5.598, 7.173, 8.610],
  131. [ 5, 0.920, 1.156, 1.476, 2.015, 2.571, 3.365, 4.032, 4.773, 5.894, 6.869],
  132. [ 6, 0.906, 1.134, 1.440, 1.943, 2.447, 3.143, 3.707, 4.317, 5.208, 5.959],
  133. [ 7, 0.896, 1.119, 1.415, 1.895, 2.365, 2.998, 3.499, 4.029, 4.785, 5.408],
  134. [ 8, 0.889, 1.108, 1.397, 1.860, 2.306, 2.896, 3.355, 3.833, 4.501, 5.041],
  135. [ 9, 0.883, 1.100, 1.383, 1.833, 2.262, 2.821, 3.250, 3.690, 4.297, 4.781],
  136. [ 10, 0.879, 1.093, 1.372, 1.812, 2.228, 2.764, 3.169, 3.581, 4.144, 4.587],
  137. [ 11, 0.876, 1.088, 1.363, 1.796, 2.201, 2.718, 3.106, 3.497, 4.025, 4.437],
  138. [ 12, 0.873, 1.083, 1.356, 1.782, 2.179, 2.681, 3.055, 3.428, 3.930, 4.318],
  139. [ 13, 0.870, 1.079, 1.350, 1.771, 2.160, 2.650, 3.012, 3.372, 3.852, 4.221],
  140. [ 14, 0.868, 1.076, 1.345, 1.761, 2.145, 2.624, 2.977, 3.326, 3.787, 4.140],
  141. [ 15, 0.866, 1.074, 1.341, 1.753, 2.131, 2.602, 2.947, 3.286, 3.733, 4.073],
  142. [ 16, 0.865, 1.071, 1.337, 1.746, 2.120, 2.583, 2.921, 3.252, 3.686, 4.015],
  143. [ 17, 0.863, 1.069, 1.333, 1.740, 2.110, 2.567, 2.898, 3.222, 3.646, 3.965],
  144. [ 18, 0.862, 1.067, 1.330, 1.734, 2.101, 2.552, 2.878, 3.197, 3.610, 3.922],
  145. [ 19, 0.861, 1.066, 1.328, 1.729, 2.093, 2.539, 2.861, 3.174, 3.579, 3.883],
  146. [ 20, 0.860, 1.064, 1.325, 1.725, 2.086, 2.528, 2.845, 3.153, 3.552, 3.850],
  147. [ 21, 0.859, 1.063, 1.323, 1.721, 2.080, 2.518, 2.831, 3.135, 3.527, 3.819],
  148. [ 22, 0.858, 1.061, 1.321, 1.717, 2.074, 2.508, 2.819, 3.119, 3.505, 3.792],
  149. [ 23, 0.858, 1.060, 1.319, 1.714, 2.069, 2.500, 2.807, 3.104, 3.485, 3.768],
  150. [ 24, 0.857, 1.059, 1.318, 1.711, 2.064, 2.492, 2.797, 3.091, 3.467, 3.745],
  151. [ 25, 0.856, 1.058, 1.316, 1.708, 2.060, 2.485, 2.787, 3.078, 3.450, 3.725],
  152. [ 26, 0.856, 1.058, 1.315, 1.706, 2.056, 2.479, 2.779, 3.067, 3.435, 3.707],
  153. [ 27, 0.855, 1.057, 1.314, 1.703, 2.052, 2.473, 2.771, 3.057, 3.421, 3.689],
  154. [ 28, 0.855, 1.056, 1.313, 1.701, 2.048, 2.467, 2.763, 3.047, 3.408, 3.674],
  155. [ 29, 0.854, 1.055, 1.311, 1.699, 2.045, 2.462, 2.756, 3.038, 3.396, 3.660],
  156. [ 30, 0.854, 1.055, 1.310, 1.697, 2.042, 2.457, 2.750, 3.030, 3.385, 3.646],
  157. [ 31, 0.853, 1.054, 1.309, 1.696, 2.040, 2.453, 2.744, 3.022, 3.375, 3.633],
  158. [ 32, 0.853, 1.054, 1.309, 1.694, 2.037, 2.449, 2.738, 3.015, 3.365, 3.622],
  159. [ 33, 0.853, 1.053, 1.308, 1.692, 2.035, 2.445, 2.733, 3.008, 3.356, 3.611],
  160. [ 34, 0.852, 1.052, 1.307, 1.691, 2.032, 2.441, 2.728, 3.002, 3.348, 3.601],
  161. [ 35, 0.852, 1.052, 1.306, 1.690, 2.030, 2.438, 2.724, 2.996, 3.340, 3.591],
  162. [ 36, 0.852, 1.052, 1.306, 1.688, 2.028, 2.434, 2.719, 2.990, 3.333, 3.582],
  163. [ 37, 0.851, 1.051, 1.305, 1.687, 2.026, 2.431, 2.715, 2.985, 3.326, 3.574],
  164. [ 38, 0.851, 1.051, 1.304, 1.686, 2.024, 2.429, 2.712, 2.980, 3.319, 3.566],
  165. [ 39, 0.851, 1.050, 1.304, 1.685, 2.023, 2.426, 2.708, 2.976, 3.313, 3.558],
  166. [ 40, 0.851, 1.050, 1.303, 1.684, 2.021, 2.423, 2.704, 2.971, 3.307, 3.551],
  167. [ 50, 0.849, 1.047, 1.299, 1.676, 2.009, 2.403, 2.678, 2.937, 3.261, 3.496],
  168. [ 60, 0.848, 1.045, 1.296, 1.671, 2.000, 2.390, 2.660, 2.915, 3.232, 3.460],
  169. [ 80, 0.846, 1.043, 1.292, 1.664, 1.990, 2.374, 2.639, 2.887, 3.195, 3.416],
  170. [100, 0.845, 1.042, 1.290, 1.660, 1.984, 2.364, 2.626, 2.871, 3.174, 3.390],
  171. [150, 0.844, 1.040, 1.287, 1.655, 1.976, 2.351, 2.609, 2.849, 3.145, 3.357] ]
  172. # The functions use the following parameter name conventions:
  173. # alpha - the alpha parameter
  174. # degree - the degree-of-freedom parameter
  175. # value - the t-distribution value for some alpha and degree
  176. # deviations - a confidence interval radius,
  177. # expressed as a multiple of the standard deviation of the sample
  178. # ax - the alpha parameter index
  179. # dx - the degree-of-freedom parameter index
  180. # The interface to this section of code is the last three functions,
  181. # find_t_dist_value, find_t_dist_alpha, and find_t_dist_degree.
  182. def t_dist_alpha_at_index(ax):
  183. if ax == 0:
  184. return .25 # effectively no confidence
  185. else:
  186. return t_dist_table[0][ax]
  187. def t_dist_degree_at_index(dx):
  188. return t_dist_table[dx][0]
  189. def t_dist_value_at_index(ax, dx):
  190. return t_dist_table[dx][ax]
  191. def t_dist_index_of_degree(degree):
  192. limit = len(t_dist_table) - 1
  193. dx = 0
  194. while dx < limit and t_dist_degree_at_index(dx+1) <= degree:
  195. dx += 1
  196. return dx
  197. def t_dist_index_of_alpha(alpha):
  198. limit = len(t_dist_table[0]) - 1
  199. ax = 0
  200. while ax < limit and t_dist_alpha_at_index(ax+1) >= alpha:
  201. ax += 1
  202. return ax
  203. def t_dist_index_of_value(dx, value):
  204. limit = len(t_dist_table[dx]) - 1
  205. ax = 0
  206. while ax < limit and t_dist_value_at_index(ax+1, dx) < value:
  207. ax += 1
  208. return ax
  209. def t_dist_value_within_deviations(dx, ax, deviations):
  210. degree = t_dist_degree_at_index(dx)
  211. count = degree + 1
  212. root = math.sqrt(count)
  213. value = t_dist_value_at_index(ax, dx)
  214. nominal = value / root
  215. comparison = nominal <= deviations
  216. return comparison
  217. def t_dist_index_of_degree_for_deviations(ax, deviations):
  218. limit = len(t_dist_table) - 1
  219. dx = 1
  220. while dx < limit and not t_dist_value_within_deviations(dx, ax, deviations):
  221. dx += 1
  222. return dx
  223. def find_t_dist_value(alpha, degree):
  224. """ Return the t-distribution value.
  225. The parameters are alpha and degree of freedom.
  226. """
  227. dx = t_dist_index_of_degree(degree)
  228. ax = t_dist_index_of_alpha(alpha)
  229. return t_dist_value_at_index(ax, dx)
  230. def find_t_dist_alpha(value, degree):
  231. """ Return the alpha.
  232. The parameters are the t-distribution value for a given degree of freedom.
  233. """
  234. dx = t_dist_index_of_degree(degree)
  235. ax = t_dist_index_of_value(dx, value)
  236. return t_dist_alpha_at_index(ax)
  237. def find_t_dist_degree(alpha, deviations):
  238. """ Return the degree-of-freedom.
  239. The parameters are the desired alpha and the number of standard deviations
  240. away from the mean that the degree should handle.
  241. """
  242. ax = t_dist_index_of_alpha(alpha)
  243. dx = t_dist_index_of_degree_for_deviations(ax, deviations)
  244. return t_dist_degree_at_index(dx)
  245. ############################################################## Core Statistical
  246. # This section provides the core statistical classes and functions.
  247. class Accumulator:
  248. """ An accumulator for statistical information using arithmetic mean. """
  249. def __init__(self):
  250. self.count = 0
  251. self.mean = 0
  252. self.sumsqdiff = 0
  253. def insert(self, value):
  254. self.count += 1
  255. diff = value - self.mean
  256. self.mean += diff / self.count
  257. self.sumsqdiff += (self.count - 1) * diff * diff / self.count
  258. def fill_accumulator_from_values(values):
  259. accumulator = Accumulator()
  260. for value in values:
  261. accumulator.insert(value)
  262. return accumulator
  263. def alpha_from_confidence(confidence):
  264. scrubbed = min(99.99, max(confidence, 60))
  265. return (100.0 - scrubbed) / 200.0
  266. def confidence_from_alpha(alpha):
  267. return 100 - 200 * alpha
  268. class Sample:
  269. """ A description of a sample using an arithmetic mean. """
  270. def __init__(self, accumulator, alpha):
  271. if accumulator.count < 3:
  272. sys.exit("Samples must contain three trials.")
  273. self.count = accumulator.count
  274. self.mean = accumulator.mean
  275. variance = accumulator.sumsqdiff / (self.count - 1)
  276. self.deviation = math.sqrt(variance)
  277. self.error = self.deviation / math.sqrt(self.count)
  278. self.alpha = alpha
  279. self.radius = find_t_dist_value(alpha, self.count - 1) * self.error
  280. def alpha_for_radius(self, radius):
  281. return find_t_dist_alpha(divide(radius, self.error), self.count)
  282. def degree_for_radius(self, radius):
  283. return find_t_dist_degree(self.alpha, divide(radius, self.deviation))
  284. def __str__(self):
  285. text = "trial count is " + text_number(self.count)
  286. text += ", mean is " + text_number(self.mean)
  287. text += " (" + text_number(confidence_from_alpha(self.alpha)) +"%"
  288. text += " confidence in " + text_number(self.mean - self.radius)
  289. text += " to " + text_number(self.mean + self.radius) + ")"
  290. text += ",\nstd.deviation is " + text_number(self.deviation)
  291. text += ", std.error is " + text_number(self.error)
  292. return text
  293. def sample_from_values(values, alpha):
  294. accumulator = fill_accumulator_from_values(values)
  295. return Sample(accumulator, alpha)
  296. class Comparison:
  297. """ A comparison of two samples using arithmetic means. """
  298. def __init__(self, first, second, alpha):
  299. if first.mean > second.mean:
  300. self.upper = first
  301. self.lower = second
  302. self.larger = "first"
  303. else:
  304. self.upper = second
  305. self.lower = first
  306. self.larger = "second"
  307. self.a_wanted = alpha
  308. radius = self.upper.mean - self.lower.mean
  309. rising = self.lower.alpha_for_radius(radius)
  310. falling = self.upper.alpha_for_radius(radius)
  311. self.a_actual = max(rising, falling)
  312. rising = self.lower.degree_for_radius(radius)
  313. falling = self.upper.degree_for_radius(radius)
  314. self.count = max(rising, falling) + 1
  315. def __str__(self):
  316. message = "The " + self.larger + " sample appears to be "
  317. change = divide(self.upper.mean, self.lower.mean) - 1
  318. message += text_percent(change) + " larger,\n"
  319. confidence = confidence_from_alpha(self.a_actual)
  320. if confidence >= 60:
  321. message += "with " + text_number(confidence) + "% confidence"
  322. message += " of being larger."
  323. else:
  324. message += "but with no confidence of actually being larger."
  325. if self.a_actual > self.a_wanted:
  326. confidence = confidence_from_alpha(self.a_wanted)
  327. message += "\nTo reach " + text_number(confidence) + "% confidence,"
  328. if self.count < 100:
  329. message += " you need roughly " + text_number(self.count) + " trials,\n"
  330. message += "assuming the standard deviation is stable, which is iffy."
  331. else:
  332. message += "\nyou need to reduce the larger deviation"
  333. message += " or increase the number of trials."
  334. return message
  335. ############################################################ Single Value Files
  336. # This section provides functions to compare two raw data files,
  337. # each containing a whole sample consisting of single number per line.
  338. # Should you repurpose this script, this code might help.
  339. #
  340. #def values_from_data_file(filename):
  341. # for line in lines_in_file(filename):
  342. # yield float(line)
  343. # Should you repurpose this script, this code might help.
  344. #
  345. #def sample_from_data_file(filename, alpha):
  346. # confidence = confidence_from_alpha(alpha)
  347. # text = "\nArithmetic sample for data file\n\"" + filename + "\""
  348. # text += " with desired confidence " + text_number(confidence) + " is "
  349. # print text
  350. # values = values_from_data_file(filename)
  351. # sample = sample_from_values(values, alpha)
  352. # print sample
  353. # return sample
  354. # Should you repurpose this script, this code might help.
  355. #
  356. #def compare_two_data_files(filename1, filename2, confidence):
  357. # alpha = alpha_from_confidence(confidence)
  358. # sample1 = sample_from_data_file(filename1, alpha)
  359. # sample2 = sample_from_data_file(filename2, alpha)
  360. # print
  361. # print Comparison(sample1, sample2, alpha)
  362. # Should you repurpose this script, this code might help.
  363. #
  364. #def command_two_data_files():
  365. # argc = len(sys.argv)
  366. # if argc < 2 or 4 < argc:
  367. # message = "usage: " + sys.argv[0]
  368. # message += " file-name file-name [confidence]"
  369. # print message
  370. # else:
  371. # filename1 = sys.argv[1]
  372. # filename2 = sys.argv[2]
  373. # if len(sys.argv) >= 4:
  374. # confidence = int(sys.argv[3])
  375. # else:
  376. # confidence = 95
  377. # compare_two_data_files(filename1, filename2, confidence)
  378. ############################################### -ftime-report TimeVar Log Files
  379. # This section provides functions to compare two sets of -ftime-report log
  380. # files. Each set is a sample, where each data point is derived from the
  381. # sum of values in a single log file.
  382. label = r"^ *([^:]*[^: ]) *:"
  383. number = r" *([0-9.]*) *"
  384. percent = r"\( *[0-9]*\%\)"
  385. numpct = number + percent
  386. total_format = label + number + number + number + number + " kB\n"
  387. total_parser = re.compile(total_format)
  388. tmvar_format = label + numpct + " usr" + numpct + " sys"
  389. tmvar_format += numpct + " wall" + number + " kB " + percent + " ggc\n"
  390. tmvar_parser = re.compile(tmvar_format)
  391. replace = r"\2\t\3\t\4\t\5\t\1"
  392. def split_time_report(lines, pattern):
  393. if pattern == "TOTAL":
  394. parser = total_parser
  395. else:
  396. parser = tmvar_parser
  397. for line in lines:
  398. modified = parser.sub(replace, line)
  399. if modified != line:
  400. yield re.split("\t", modified)
  401. def extract_cpu_time(tvtuples):
  402. for tuple in tvtuples:
  403. yield float(tuple[0]) + float(tuple[1])
  404. def sum_values(values):
  405. sum = 0
  406. for value in values:
  407. sum += value
  408. return sum
  409. def extract_time_for_timevar_log(filename, pattern):
  410. lines = lines_in_file(filename)
  411. tmvars = lines_containing_pattern(pattern, lines)
  412. tuples = split_time_report(tmvars, pattern)
  413. times = extract_cpu_time(tuples)
  414. return sum_values(times)
  415. def extract_times_for_timevar_logs(filelist, pattern):
  416. for filename in filelist:
  417. yield extract_time_for_timevar_log(filename, pattern)
  418. def sample_from_timevar_logs(fileglob, pattern, alpha):
  419. confidence = confidence_from_alpha(alpha)
  420. text = "\nArithmetic sample for timevar log files\n\"" + fileglob + "\""
  421. text += "\nand selecting lines containing \"" + pattern + "\""
  422. text += " with desired confidence " + text_number(confidence) + " is "
  423. print text
  424. filelist = match_files(fileglob)
  425. values = extract_times_for_timevar_logs(filelist, pattern)
  426. sample = sample_from_values(values, alpha)
  427. print sample
  428. return sample
  429. def compare_two_timevar_logs(fileglob1, fileglob2, pattern, confidence):
  430. alpha = alpha_from_confidence(confidence)
  431. sample1 = sample_from_timevar_logs(fileglob1, pattern, alpha)
  432. sample2 = sample_from_timevar_logs(fileglob2, pattern, alpha)
  433. print
  434. print Comparison(sample1, sample2, alpha)
  435. def command_two_timevar_logs():
  436. argc = len(sys.argv)
  437. if argc < 3 or 5 < argc:
  438. message = "usage: " + sys.argv[0]
  439. message += " file-pattern file-pattern [line-pattern [confidence]]"
  440. print message
  441. else:
  442. filepat1 = sys.argv[1]
  443. filepat2 = sys.argv[2]
  444. if len(sys.argv) >= 5:
  445. confidence = int(sys.argv[4])
  446. else:
  447. confidence = 95
  448. if len(sys.argv) >= 4:
  449. linepat = sys.argv[3]
  450. else:
  451. linepat = "TOTAL"
  452. compare_two_timevar_logs(filepat1, filepat2, linepat, confidence)
  453. ########################################################################## Main
  454. # This section is the main code, implementing the command.
  455. command_two_timevar_logs()