1 | #!/usr/local/bin/python -O
|
---|
2 |
|
---|
3 | """ A Python Benchmark Suite
|
---|
4 |
|
---|
5 | """
|
---|
6 | #
|
---|
7 | # Note: Please keep this module compatible to Python 1.5.2.
|
---|
8 | #
|
---|
9 | # Tests may include features in later Python versions, but these
|
---|
10 | # should then be embedded in try-except clauses in the configuration
|
---|
11 | # module Setup.py.
|
---|
12 | #
|
---|
13 |
|
---|
14 | # pybench Copyright
|
---|
15 | __copyright__ = """\
|
---|
16 | Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
|
---|
17 | Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
|
---|
18 |
|
---|
19 | All Rights Reserved.
|
---|
20 |
|
---|
21 | Permission to use, copy, modify, and distribute this software and its
|
---|
22 | documentation for any purpose and without fee or royalty is hereby
|
---|
23 | granted, provided that the above copyright notice appear in all copies
|
---|
24 | and that both that copyright notice and this permission notice appear
|
---|
25 | in supporting documentation or portions thereof, including
|
---|
26 | modifications, that you make.
|
---|
27 |
|
---|
28 | THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
|
---|
29 | THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
---|
30 | FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
|
---|
31 | INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
|
---|
32 | FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
---|
33 | NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
---|
34 | WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
|
---|
35 | """
|
---|
36 |
|
---|
37 | import sys, time, operator, string
|
---|
38 | from CommandLine import *
|
---|
39 |
|
---|
40 | try:
|
---|
41 | import cPickle
|
---|
42 | pickle = cPickle
|
---|
43 | except ImportError:
|
---|
44 | import pickle
|
---|
45 |
|
---|
46 | # Version number; version history: see README file !
|
---|
47 | __version__ = '2.0'
|
---|
48 |
|
---|
49 | ### Constants
|
---|
50 |
|
---|
51 | # Second fractions
|
---|
52 | MILLI_SECONDS = 1e3
|
---|
53 | MICRO_SECONDS = 1e6
|
---|
54 |
|
---|
55 | # Percent unit
|
---|
56 | PERCENT = 100
|
---|
57 |
|
---|
58 | # Horizontal line length
|
---|
59 | LINE = 79
|
---|
60 |
|
---|
61 | # Minimum test run-time
|
---|
62 | MIN_TEST_RUNTIME = 1e-3
|
---|
63 |
|
---|
64 | # Number of calibration runs to use for calibrating the tests
|
---|
65 | CALIBRATION_RUNS = 20
|
---|
66 |
|
---|
67 | # Number of calibration loops to run for each calibration run
|
---|
68 | CALIBRATION_LOOPS = 20
|
---|
69 |
|
---|
70 | # Allow skipping calibration ?
|
---|
71 | ALLOW_SKIPPING_CALIBRATION = 1
|
---|
72 |
|
---|
73 | # Timer types
|
---|
74 | TIMER_TIME_TIME = 'time.time'
|
---|
75 | TIMER_TIME_CLOCK = 'time.clock'
|
---|
76 | TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
|
---|
77 |
|
---|
78 | # Choose platform default timer
|
---|
79 | if sys.platform[:3] == 'win':
|
---|
80 | # On WinXP this has 2.5ms resolution
|
---|
81 | TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
|
---|
82 | else:
|
---|
83 | # On Linux this has 1ms resolution
|
---|
84 | TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
|
---|
85 |
|
---|
86 | # Print debug information ?
|
---|
87 | _debug = 0
|
---|
88 |
|
---|
89 | ### Helpers
|
---|
90 |
|
---|
91 | def get_timer(timertype):
|
---|
92 |
|
---|
93 | if timertype == TIMER_TIME_TIME:
|
---|
94 | return time.time
|
---|
95 | elif timertype == TIMER_TIME_CLOCK:
|
---|
96 | return time.clock
|
---|
97 | elif timertype == TIMER_SYSTIMES_PROCESSTIME:
|
---|
98 | import systimes
|
---|
99 | return systimes.processtime
|
---|
100 | else:
|
---|
101 | raise TypeError('unknown timer type: %s' % timertype)
|
---|
102 |
|
---|
103 | def get_machine_details():
|
---|
104 |
|
---|
105 | import platform
|
---|
106 | if _debug:
|
---|
107 | print 'Getting machine details...'
|
---|
108 | buildno, builddate = platform.python_build()
|
---|
109 | python = platform.python_version()
|
---|
110 | if python > '2.0':
|
---|
111 | try:
|
---|
112 | unichr(100000)
|
---|
113 | except ValueError:
|
---|
114 | # UCS2 build (standard)
|
---|
115 | unicode = 'UCS2'
|
---|
116 | else:
|
---|
117 | # UCS4 build (most recent Linux distros)
|
---|
118 | unicode = 'UCS4'
|
---|
119 | else:
|
---|
120 | unicode = None
|
---|
121 | bits, linkage = platform.architecture()
|
---|
122 | return {
|
---|
123 | 'platform': platform.platform(),
|
---|
124 | 'processor': platform.processor(),
|
---|
125 | 'executable': sys.executable,
|
---|
126 | 'python': platform.python_version(),
|
---|
127 | 'compiler': platform.python_compiler(),
|
---|
128 | 'buildno': buildno,
|
---|
129 | 'builddate': builddate,
|
---|
130 | 'unicode': unicode,
|
---|
131 | 'bits': bits,
|
---|
132 | }
|
---|
133 |
|
---|
134 | def print_machine_details(d, indent=''):
|
---|
135 |
|
---|
136 | l = ['Machine Details:',
|
---|
137 | ' Platform ID: %s' % d.get('platform', 'n/a'),
|
---|
138 | ' Processor: %s' % d.get('processor', 'n/a'),
|
---|
139 | '',
|
---|
140 | 'Python:',
|
---|
141 | ' Executable: %s' % d.get('executable', 'n/a'),
|
---|
142 | ' Version: %s' % d.get('python', 'n/a'),
|
---|
143 | ' Compiler: %s' % d.get('compiler', 'n/a'),
|
---|
144 | ' Bits: %s' % d.get('bits', 'n/a'),
|
---|
145 | ' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
|
---|
146 | d.get('buildno', 'n/a')),
|
---|
147 | ' Unicode: %s' % d.get('unicode', 'n/a'),
|
---|
148 | ]
|
---|
149 | print indent + string.join(l, '\n' + indent) + '\n'
|
---|
150 |
|
---|
151 | ### Test baseclass
|
---|
152 |
|
---|
153 | class Test:
|
---|
154 |
|
---|
155 | """ All test must have this class as baseclass. It provides
|
---|
156 | the necessary interface to the benchmark machinery.
|
---|
157 |
|
---|
158 | The tests must set .rounds to a value high enough to let the
|
---|
159 | test run between 20-50 seconds. This is needed because
|
---|
160 | clock()-timing only gives rather inaccurate values (on Linux,
|
---|
161 | for example, it is accurate to a few hundreths of a
|
---|
162 | second). If you don't want to wait that long, use a warp
|
---|
163 | factor larger than 1.
|
---|
164 |
|
---|
165 | It is also important to set the .operations variable to a
|
---|
166 | value representing the number of "virtual operations" done per
|
---|
167 | call of .run().
|
---|
168 |
|
---|
169 | If you change a test in some way, don't forget to increase
|
---|
170 | it's version number.
|
---|
171 |
|
---|
172 | """
|
---|
173 |
|
---|
174 | ### Instance variables that each test should override
|
---|
175 |
|
---|
176 | # Version number of the test as float (x.yy); this is important
|
---|
177 | # for comparisons of benchmark runs - tests with unequal version
|
---|
178 | # number will not get compared.
|
---|
179 | version = 2.0
|
---|
180 |
|
---|
181 | # The number of abstract operations done in each round of the
|
---|
182 | # test. An operation is the basic unit of what you want to
|
---|
183 | # measure. The benchmark will output the amount of run-time per
|
---|
184 | # operation. Note that in order to raise the measured timings
|
---|
185 | # significantly above noise level, it is often required to repeat
|
---|
186 | # sets of operations more than once per test round. The measured
|
---|
187 | # overhead per test round should be less than 1 second.
|
---|
188 | operations = 1
|
---|
189 |
|
---|
190 | # Number of rounds to execute per test run. This should be
|
---|
191 | # adjusted to a figure that results in a test run-time of between
|
---|
192 | # 1-2 seconds.
|
---|
193 | rounds = 100000
|
---|
194 |
|
---|
195 | ### Internal variables
|
---|
196 |
|
---|
197 | # Mark this class as implementing a test
|
---|
198 | is_a_test = 1
|
---|
199 |
|
---|
200 | # Last timing: (real, run, overhead)
|
---|
201 | last_timing = (0.0, 0.0, 0.0)
|
---|
202 |
|
---|
203 | # Warp factor to use for this test
|
---|
204 | warp = 1
|
---|
205 |
|
---|
206 | # Number of calibration runs to use
|
---|
207 | calibration_runs = CALIBRATION_RUNS
|
---|
208 |
|
---|
209 | # List of calibration timings
|
---|
210 | overhead_times = None
|
---|
211 |
|
---|
212 | # List of test run timings
|
---|
213 | times = []
|
---|
214 |
|
---|
215 | # Timer used for the benchmark
|
---|
216 | timer = TIMER_PLATFORM_DEFAULT
|
---|
217 |
|
---|
218 | def __init__(self, warp=None, calibration_runs=None, timer=None):
|
---|
219 |
|
---|
220 | # Set parameters
|
---|
221 | if warp is not None:
|
---|
222 | self.rounds = int(self.rounds / warp)
|
---|
223 | if self.rounds == 0:
|
---|
224 | raise ValueError('warp factor set too high')
|
---|
225 | self.warp = warp
|
---|
226 | if calibration_runs is not None:
|
---|
227 | if (not ALLOW_SKIPPING_CALIBRATION and
|
---|
228 | calibration_runs < 1):
|
---|
229 | raise ValueError('at least one calibration run is required')
|
---|
230 | self.calibration_runs = calibration_runs
|
---|
231 | if timer is not None:
|
---|
232 | timer = timer
|
---|
233 |
|
---|
234 | # Init variables
|
---|
235 | self.times = []
|
---|
236 | self.overhead_times = []
|
---|
237 |
|
---|
238 | # We want these to be in the instance dict, so that pickle
|
---|
239 | # saves them
|
---|
240 | self.version = self.version
|
---|
241 | self.operations = self.operations
|
---|
242 | self.rounds = self.rounds
|
---|
243 |
|
---|
244 | def get_timer(self):
|
---|
245 |
|
---|
246 | """ Return the timer function to use for the test.
|
---|
247 |
|
---|
248 | """
|
---|
249 | return get_timer(self.timer)
|
---|
250 |
|
---|
251 | def compatible(self, other):
|
---|
252 |
|
---|
253 | """ Return 1/0 depending on whether the test is compatible
|
---|
254 | with the other Test instance or not.
|
---|
255 |
|
---|
256 | """
|
---|
257 | if self.version != other.version:
|
---|
258 | return 0
|
---|
259 | if self.rounds != other.rounds:
|
---|
260 | return 0
|
---|
261 | return 1
|
---|
262 |
|
---|
263 | def calibrate_test(self):
|
---|
264 |
|
---|
265 | if self.calibration_runs == 0:
|
---|
266 | self.overhead_times = [0.0]
|
---|
267 | return
|
---|
268 |
|
---|
269 | calibrate = self.calibrate
|
---|
270 | timer = self.get_timer()
|
---|
271 | calibration_loops = range(CALIBRATION_LOOPS)
|
---|
272 |
|
---|
273 | # Time the calibration loop overhead
|
---|
274 | prep_times = []
|
---|
275 | for i in range(self.calibration_runs):
|
---|
276 | t = timer()
|
---|
277 | for i in calibration_loops:
|
---|
278 | pass
|
---|
279 | t = timer() - t
|
---|
280 | prep_times.append(t)
|
---|
281 | min_prep_time = min(prep_times)
|
---|
282 | if _debug:
|
---|
283 | print
|
---|
284 | print 'Calib. prep time = %.6fms' % (
|
---|
285 | min_prep_time * MILLI_SECONDS)
|
---|
286 |
|
---|
287 | # Time the calibration runs (doing CALIBRATION_LOOPS loops of
|
---|
288 | # .calibrate() method calls each)
|
---|
289 | for i in range(self.calibration_runs):
|
---|
290 | t = timer()
|
---|
291 | for i in calibration_loops:
|
---|
292 | calibrate()
|
---|
293 | t = timer() - t
|
---|
294 | self.overhead_times.append(t / CALIBRATION_LOOPS
|
---|
295 | - min_prep_time)
|
---|
296 |
|
---|
297 | # Check the measured times
|
---|
298 | min_overhead = min(self.overhead_times)
|
---|
299 | max_overhead = max(self.overhead_times)
|
---|
300 | if _debug:
|
---|
301 | print 'Calib. overhead time = %.6fms' % (
|
---|
302 | min_overhead * MILLI_SECONDS)
|
---|
303 | if min_overhead < 0.0:
|
---|
304 | raise ValueError('calibration setup did not work')
|
---|
305 | if max_overhead - min_overhead > 0.1:
|
---|
306 | raise ValueError(
|
---|
307 | 'overhead calibration timing range too inaccurate: '
|
---|
308 | '%r - %r' % (min_overhead, max_overhead))
|
---|
309 |
|
---|
310 | def run(self):
|
---|
311 |
|
---|
312 | """ Run the test in two phases: first calibrate, then
|
---|
313 | do the actual test. Be careful to keep the calibration
|
---|
314 | timing low w/r to the test timing.
|
---|
315 |
|
---|
316 | """
|
---|
317 | test = self.test
|
---|
318 | timer = self.get_timer()
|
---|
319 |
|
---|
320 | # Get calibration
|
---|
321 | min_overhead = min(self.overhead_times)
|
---|
322 |
|
---|
323 | # Test run
|
---|
324 | t = timer()
|
---|
325 | test()
|
---|
326 | t = timer() - t
|
---|
327 | if t < MIN_TEST_RUNTIME:
|
---|
328 | raise ValueError('warp factor too high: '
|
---|
329 | 'test times are < 10ms')
|
---|
330 | eff_time = t - min_overhead
|
---|
331 | if eff_time < 0:
|
---|
332 | raise ValueError('wrong calibration')
|
---|
333 | self.last_timing = (eff_time, t, min_overhead)
|
---|
334 | self.times.append(eff_time)
|
---|
335 |
|
---|
336 | def calibrate(self):
|
---|
337 |
|
---|
338 | """ Calibrate the test.
|
---|
339 |
|
---|
340 | This method should execute everything that is needed to
|
---|
341 | setup and run the test - except for the actual operations
|
---|
342 | that you intend to measure. pybench uses this method to
|
---|
343 | measure the test implementation overhead.
|
---|
344 |
|
---|
345 | """
|
---|
346 | return
|
---|
347 |
|
---|
348 | def test(self):
|
---|
349 |
|
---|
350 | """ Run the test.
|
---|
351 |
|
---|
352 | The test needs to run self.rounds executing
|
---|
353 | self.operations number of operations each.
|
---|
354 |
|
---|
355 | """
|
---|
356 | return
|
---|
357 |
|
---|
358 | def stat(self):
|
---|
359 |
|
---|
360 | """ Return test run statistics as tuple:
|
---|
361 |
|
---|
362 | (minimum run time,
|
---|
363 | average run time,
|
---|
364 | total run time,
|
---|
365 | average time per operation,
|
---|
366 | minimum overhead time)
|
---|
367 |
|
---|
368 | """
|
---|
369 | runs = len(self.times)
|
---|
370 | if runs == 0:
|
---|
371 | return 0.0, 0.0, 0.0, 0.0
|
---|
372 | min_time = min(self.times)
|
---|
373 | total_time = reduce(operator.add, self.times, 0.0)
|
---|
374 | avg_time = total_time / float(runs)
|
---|
375 | operation_avg = total_time / float(runs
|
---|
376 | * self.rounds
|
---|
377 | * self.operations)
|
---|
378 | if self.overhead_times:
|
---|
379 | min_overhead = min(self.overhead_times)
|
---|
380 | else:
|
---|
381 | min_overhead = self.last_timing[2]
|
---|
382 | return min_time, avg_time, total_time, operation_avg, min_overhead
|
---|
383 |
|
---|
384 | ### Load Setup
|
---|
385 |
|
---|
386 | # This has to be done after the definition of the Test class, since
|
---|
387 | # the Setup module will import subclasses using this class.
|
---|
388 |
|
---|
389 | import Setup
|
---|
390 |
|
---|
391 | ### Benchmark base class
|
---|
392 |
|
---|
393 | class Benchmark:
|
---|
394 |
|
---|
395 | # Name of the benchmark
|
---|
396 | name = ''
|
---|
397 |
|
---|
398 | # Number of benchmark rounds to run
|
---|
399 | rounds = 1
|
---|
400 |
|
---|
401 | # Warp factor use to run the tests
|
---|
402 | warp = 1 # Warp factor
|
---|
403 |
|
---|
404 | # Average benchmark round time
|
---|
405 | roundtime = 0
|
---|
406 |
|
---|
407 | # Benchmark version number as float x.yy
|
---|
408 | version = 2.0
|
---|
409 |
|
---|
410 | # Produce verbose output ?
|
---|
411 | verbose = 0
|
---|
412 |
|
---|
413 | # Dictionary with the machine details
|
---|
414 | machine_details = None
|
---|
415 |
|
---|
416 | # Timer used for the benchmark
|
---|
417 | timer = TIMER_PLATFORM_DEFAULT
|
---|
418 |
|
---|
419 | def __init__(self, name, verbose=None, timer=None, warp=None,
|
---|
420 | calibration_runs=None):
|
---|
421 |
|
---|
422 | if name:
|
---|
423 | self.name = name
|
---|
424 | else:
|
---|
425 | self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
|
---|
426 | (time.localtime(time.time())[:6])
|
---|
427 | if verbose is not None:
|
---|
428 | self.verbose = verbose
|
---|
429 | if timer is not None:
|
---|
430 | self.timer = timer
|
---|
431 | if warp is not None:
|
---|
432 | self.warp = warp
|
---|
433 | if calibration_runs is not None:
|
---|
434 | self.calibration_runs = calibration_runs
|
---|
435 |
|
---|
436 | # Init vars
|
---|
437 | self.tests = {}
|
---|
438 | if _debug:
|
---|
439 | print 'Getting machine details...'
|
---|
440 | self.machine_details = get_machine_details()
|
---|
441 |
|
---|
442 | # Make .version an instance attribute to have it saved in the
|
---|
443 | # Benchmark pickle
|
---|
444 | self.version = self.version
|
---|
445 |
|
---|
446 | def get_timer(self):
|
---|
447 |
|
---|
448 | """ Return the timer function to use for the test.
|
---|
449 |
|
---|
450 | """
|
---|
451 | return get_timer(self.timer)
|
---|
452 |
|
---|
453 | def compatible(self, other):
|
---|
454 |
|
---|
455 | """ Return 1/0 depending on whether the benchmark is
|
---|
456 | compatible with the other Benchmark instance or not.
|
---|
457 |
|
---|
458 | """
|
---|
459 | if self.version != other.version:
|
---|
460 | return 0
|
---|
461 | if (self.machine_details == other.machine_details and
|
---|
462 | self.timer != other.timer):
|
---|
463 | return 0
|
---|
464 | if (self.calibration_runs == 0 and
|
---|
465 | other.calibration_runs != 0):
|
---|
466 | return 0
|
---|
467 | if (self.calibration_runs != 0 and
|
---|
468 | other.calibration_runs == 0):
|
---|
469 | return 0
|
---|
470 | return 1
|
---|
471 |
|
---|
472 | def load_tests(self, setupmod, limitnames=None):
|
---|
473 |
|
---|
474 | # Add tests
|
---|
475 | if self.verbose:
|
---|
476 | print 'Searching for tests ...'
|
---|
477 | print '--------------------------------------'
|
---|
478 | for testclass in setupmod.__dict__.values():
|
---|
479 | if not hasattr(testclass, 'is_a_test'):
|
---|
480 | continue
|
---|
481 | name = testclass.__name__
|
---|
482 | if name == 'Test':
|
---|
483 | continue
|
---|
484 | if (limitnames is not None and
|
---|
485 | limitnames.search(name) is None):
|
---|
486 | continue
|
---|
487 | self.tests[name] = testclass(
|
---|
488 | warp=self.warp,
|
---|
489 | calibration_runs=self.calibration_runs,
|
---|
490 | timer=self.timer)
|
---|
491 | l = self.tests.keys()
|
---|
492 | l.sort()
|
---|
493 | if self.verbose:
|
---|
494 | for name in l:
|
---|
495 | print ' %s' % name
|
---|
496 | print '--------------------------------------'
|
---|
497 | print ' %i tests found' % len(l)
|
---|
498 | print
|
---|
499 |
|
---|
500 | def calibrate(self):
|
---|
501 |
|
---|
502 | print 'Calibrating tests. Please wait...'
|
---|
503 | if self.verbose:
|
---|
504 | print
|
---|
505 | print 'Test min max'
|
---|
506 | print '-' * LINE
|
---|
507 | tests = self.tests.items()
|
---|
508 | tests.sort()
|
---|
509 | for i in range(len(tests)):
|
---|
510 | name, test = tests[i]
|
---|
511 | test.calibrate_test()
|
---|
512 | if self.verbose:
|
---|
513 | print '%30s: %6.3fms %6.3fms' % \
|
---|
514 | (name,
|
---|
515 | min(test.overhead_times) * MILLI_SECONDS,
|
---|
516 | max(test.overhead_times) * MILLI_SECONDS)
|
---|
517 | print
|
---|
518 |
|
---|
519 | def run(self):
|
---|
520 |
|
---|
521 | tests = self.tests.items()
|
---|
522 | tests.sort()
|
---|
523 | timer = self.get_timer()
|
---|
524 | print 'Running %i round(s) of the suite at warp factor %i:' % \
|
---|
525 | (self.rounds, self.warp)
|
---|
526 | print
|
---|
527 | self.roundtimes = []
|
---|
528 | for i in range(self.rounds):
|
---|
529 | if self.verbose:
|
---|
530 | print ' Round %-25i effective absolute overhead' % (i+1)
|
---|
531 | total_eff_time = 0.0
|
---|
532 | for j in range(len(tests)):
|
---|
533 | name, test = tests[j]
|
---|
534 | if self.verbose:
|
---|
535 | print '%30s:' % name,
|
---|
536 | test.run()
|
---|
537 | (eff_time, abs_time, min_overhead) = test.last_timing
|
---|
538 | total_eff_time = total_eff_time + eff_time
|
---|
539 | if self.verbose:
|
---|
540 | print ' %5.0fms %5.0fms %7.3fms' % \
|
---|
541 | (eff_time * MILLI_SECONDS,
|
---|
542 | abs_time * MILLI_SECONDS,
|
---|
543 | min_overhead * MILLI_SECONDS)
|
---|
544 | self.roundtimes.append(total_eff_time)
|
---|
545 | if self.verbose:
|
---|
546 | print (' '
|
---|
547 | ' ------------------------------')
|
---|
548 | print (' '
|
---|
549 | ' Totals: %6.0fms' %
|
---|
550 | (total_eff_time * MILLI_SECONDS))
|
---|
551 | print
|
---|
552 | else:
|
---|
553 | print '* Round %i done in %.3f seconds.' % (i+1,
|
---|
554 | total_eff_time)
|
---|
555 | print
|
---|
556 |
|
---|
557 | def stat(self):
|
---|
558 |
|
---|
559 | """ Return benchmark run statistics as tuple:
|
---|
560 |
|
---|
561 | (minimum round time,
|
---|
562 | average round time,
|
---|
563 | maximum round time)
|
---|
564 |
|
---|
565 | XXX Currently not used, since the benchmark does test
|
---|
566 | statistics across all rounds.
|
---|
567 |
|
---|
568 | """
|
---|
569 | runs = len(self.roundtimes)
|
---|
570 | if runs == 0:
|
---|
571 | return 0.0, 0.0
|
---|
572 | min_time = min(self.roundtimes)
|
---|
573 | total_time = reduce(operator.add, self.roundtimes, 0.0)
|
---|
574 | avg_time = total_time / float(runs)
|
---|
575 | max_time = max(self.roundtimes)
|
---|
576 | return (min_time, avg_time, max_time)
|
---|
577 |
|
---|
578 | def print_header(self, title='Benchmark'):
|
---|
579 |
|
---|
580 | print '-' * LINE
|
---|
581 | print '%s: %s' % (title, self.name)
|
---|
582 | print '-' * LINE
|
---|
583 | print
|
---|
584 | print ' Rounds: %s' % self.rounds
|
---|
585 | print ' Warp: %s' % self.warp
|
---|
586 | print ' Timer: %s' % self.timer
|
---|
587 | print
|
---|
588 | if self.machine_details:
|
---|
589 | print_machine_details(self.machine_details, indent=' ')
|
---|
590 | print
|
---|
591 |
|
---|
592 | def print_benchmark(self, hidenoise=0, limitnames=None):
|
---|
593 |
|
---|
594 | print ('Test '
|
---|
595 | ' minimum average operation overhead')
|
---|
596 | print '-' * LINE
|
---|
597 | tests = self.tests.items()
|
---|
598 | tests.sort()
|
---|
599 | total_min_time = 0.0
|
---|
600 | total_avg_time = 0.0
|
---|
601 | for name, test in tests:
|
---|
602 | if (limitnames is not None and
|
---|
603 | limitnames.search(name) is None):
|
---|
604 | continue
|
---|
605 | (min_time,
|
---|
606 | avg_time,
|
---|
607 | total_time,
|
---|
608 | op_avg,
|
---|
609 | min_overhead) = test.stat()
|
---|
610 | total_min_time = total_min_time + min_time
|
---|
611 | total_avg_time = total_avg_time + avg_time
|
---|
612 | print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
|
---|
613 | (name,
|
---|
614 | min_time * MILLI_SECONDS,
|
---|
615 | avg_time * MILLI_SECONDS,
|
---|
616 | op_avg * MICRO_SECONDS,
|
---|
617 | min_overhead *MILLI_SECONDS)
|
---|
618 | print '-' * LINE
|
---|
619 | print ('Totals: '
|
---|
620 | ' %6.0fms %6.0fms' %
|
---|
621 | (total_min_time * MILLI_SECONDS,
|
---|
622 | total_avg_time * MILLI_SECONDS,
|
---|
623 | ))
|
---|
624 | print
|
---|
625 |
|
---|
626 | def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
|
---|
627 |
|
---|
628 | # Check benchmark versions
|
---|
629 | if compare_to.version != self.version:
|
---|
630 | print ('* Benchmark versions differ: '
|
---|
631 | 'cannot compare this benchmark to "%s" !' %
|
---|
632 | compare_to.name)
|
---|
633 | print
|
---|
634 | self.print_benchmark(hidenoise=hidenoise,
|
---|
635 | limitnames=limitnames)
|
---|
636 | return
|
---|
637 |
|
---|
638 | # Print header
|
---|
639 | compare_to.print_header('Comparing with')
|
---|
640 | print ('Test '
|
---|
641 | ' minimum run-time average run-time')
|
---|
642 | print (' '
|
---|
643 | ' this other diff this other diff')
|
---|
644 | print '-' * LINE
|
---|
645 |
|
---|
646 | # Print test comparisons
|
---|
647 | tests = self.tests.items()
|
---|
648 | tests.sort()
|
---|
649 | total_min_time = other_total_min_time = 0.0
|
---|
650 | total_avg_time = other_total_avg_time = 0.0
|
---|
651 | benchmarks_compatible = self.compatible(compare_to)
|
---|
652 | tests_compatible = 1
|
---|
653 | for name, test in tests:
|
---|
654 | if (limitnames is not None and
|
---|
655 | limitnames.search(name) is None):
|
---|
656 | continue
|
---|
657 | (min_time,
|
---|
658 | avg_time,
|
---|
659 | total_time,
|
---|
660 | op_avg,
|
---|
661 | min_overhead) = test.stat()
|
---|
662 | total_min_time = total_min_time + min_time
|
---|
663 | total_avg_time = total_avg_time + avg_time
|
---|
664 | try:
|
---|
665 | other = compare_to.tests[name]
|
---|
666 | except KeyError:
|
---|
667 | other = None
|
---|
668 | if other is None:
|
---|
669 | # Other benchmark doesn't include the given test
|
---|
670 | min_diff, avg_diff = 'n/a', 'n/a'
|
---|
671 | other_min_time = 0.0
|
---|
672 | other_avg_time = 0.0
|
---|
673 | tests_compatible = 0
|
---|
674 | else:
|
---|
675 | (other_min_time,
|
---|
676 | other_avg_time,
|
---|
677 | other_total_time,
|
---|
678 | other_op_avg,
|
---|
679 | other_min_overhead) = other.stat()
|
---|
680 | other_total_min_time = other_total_min_time + other_min_time
|
---|
681 | other_total_avg_time = other_total_avg_time + other_avg_time
|
---|
682 | if (benchmarks_compatible and
|
---|
683 | test.compatible(other)):
|
---|
684 | # Both benchmark and tests are comparible
|
---|
685 | min_diff = ((min_time * self.warp) /
|
---|
686 | (other_min_time * other.warp) - 1.0)
|
---|
687 | avg_diff = ((avg_time * self.warp) /
|
---|
688 | (other_avg_time * other.warp) - 1.0)
|
---|
689 | if hidenoise and abs(min_diff) < 10.0:
|
---|
690 | min_diff = ''
|
---|
691 | else:
|
---|
692 | min_diff = '%+5.1f%%' % (min_diff * PERCENT)
|
---|
693 | if hidenoise and abs(avg_diff) < 10.0:
|
---|
694 | avg_diff = ''
|
---|
695 | else:
|
---|
696 | avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
|
---|
697 | else:
|
---|
698 | # Benchmark or tests are not comparible
|
---|
699 | min_diff, avg_diff = 'n/a', 'n/a'
|
---|
700 | tests_compatible = 0
|
---|
701 | print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
|
---|
702 | (name,
|
---|
703 | min_time * MILLI_SECONDS,
|
---|
704 | other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
|
---|
705 | min_diff,
|
---|
706 | avg_time * MILLI_SECONDS,
|
---|
707 | other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
|
---|
708 | avg_diff)
|
---|
709 | print '-' * LINE
|
---|
710 |
|
---|
711 | # Summarise test results
|
---|
712 | if not benchmarks_compatible or not tests_compatible:
|
---|
713 | min_diff, avg_diff = 'n/a', 'n/a'
|
---|
714 | else:
|
---|
715 | if other_total_min_time != 0.0:
|
---|
716 | min_diff = '%+5.1f%%' % (
|
---|
717 | ((total_min_time * self.warp) /
|
---|
718 | (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
|
---|
719 | else:
|
---|
720 | min_diff = 'n/a'
|
---|
721 | if other_total_avg_time != 0.0:
|
---|
722 | avg_diff = '%+5.1f%%' % (
|
---|
723 | ((total_avg_time * self.warp) /
|
---|
724 | (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
|
---|
725 | else:
|
---|
726 | avg_diff = 'n/a'
|
---|
727 | print ('Totals: '
|
---|
728 | ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
|
---|
729 | (total_min_time * MILLI_SECONDS,
|
---|
730 | (other_total_min_time * compare_to.warp/self.warp
|
---|
731 | * MILLI_SECONDS),
|
---|
732 | min_diff,
|
---|
733 | total_avg_time * MILLI_SECONDS,
|
---|
734 | (other_total_avg_time * compare_to.warp/self.warp
|
---|
735 | * MILLI_SECONDS),
|
---|
736 | avg_diff
|
---|
737 | ))
|
---|
738 | print
|
---|
739 | print '(this=%s, other=%s)' % (self.name,
|
---|
740 | compare_to.name)
|
---|
741 | print
|
---|
742 |
|
---|
743 | class PyBenchCmdline(Application):
|
---|
744 |
|
---|
745 | header = ("PYBENCH - a benchmark test suite for Python "
|
---|
746 | "interpreters/compilers.")
|
---|
747 |
|
---|
748 | version = __version__
|
---|
749 |
|
---|
750 | debug = _debug
|
---|
751 |
|
---|
752 | options = [ArgumentOption('-n',
|
---|
753 | 'number of rounds',
|
---|
754 | Setup.Number_of_rounds),
|
---|
755 | ArgumentOption('-f',
|
---|
756 | 'save benchmark to file arg',
|
---|
757 | ''),
|
---|
758 | ArgumentOption('-c',
|
---|
759 | 'compare benchmark with the one in file arg',
|
---|
760 | ''),
|
---|
761 | ArgumentOption('-s',
|
---|
762 | 'show benchmark in file arg, then exit',
|
---|
763 | ''),
|
---|
764 | ArgumentOption('-w',
|
---|
765 | 'set warp factor to arg',
|
---|
766 | Setup.Warp_factor),
|
---|
767 | ArgumentOption('-t',
|
---|
768 | 'run only tests with names matching arg',
|
---|
769 | ''),
|
---|
770 | ArgumentOption('-C',
|
---|
771 | 'set the number of calibration runs to arg',
|
---|
772 | CALIBRATION_RUNS),
|
---|
773 | SwitchOption('-d',
|
---|
774 | 'hide noise in comparisons',
|
---|
775 | 0),
|
---|
776 | SwitchOption('-v',
|
---|
777 | 'verbose output (not recommended)',
|
---|
778 | 0),
|
---|
779 | SwitchOption('--with-gc',
|
---|
780 | 'enable garbage collection',
|
---|
781 | 0),
|
---|
782 | SwitchOption('--with-syscheck',
|
---|
783 | 'use default sys check interval',
|
---|
784 | 0),
|
---|
785 | ArgumentOption('--timer',
|
---|
786 | 'use given timer',
|
---|
787 | TIMER_PLATFORM_DEFAULT),
|
---|
788 | ]
|
---|
789 |
|
---|
790 | about = """\
|
---|
791 | The normal operation is to run the suite and display the
|
---|
792 | results. Use -f to save them for later reuse or comparisons.
|
---|
793 |
|
---|
794 | Available timers:
|
---|
795 |
|
---|
796 | time.time
|
---|
797 | time.clock
|
---|
798 | systimes.processtime
|
---|
799 |
|
---|
800 | Examples:
|
---|
801 |
|
---|
802 | python2.1 pybench.py -f p21.pybench
|
---|
803 | python2.5 pybench.py -f p25.pybench
|
---|
804 | python pybench.py -s p25.pybench -c p21.pybench
|
---|
805 | """
|
---|
806 | copyright = __copyright__
|
---|
807 |
|
---|
808 | def main(self):
|
---|
809 |
|
---|
810 | rounds = self.values['-n']
|
---|
811 | reportfile = self.values['-f']
|
---|
812 | show_bench = self.values['-s']
|
---|
813 | compare_to = self.values['-c']
|
---|
814 | hidenoise = self.values['-d']
|
---|
815 | warp = int(self.values['-w'])
|
---|
816 | withgc = self.values['--with-gc']
|
---|
817 | limitnames = self.values['-t']
|
---|
818 | if limitnames:
|
---|
819 | if _debug:
|
---|
820 | print '* limiting test names to one with substring "%s"' % \
|
---|
821 | limitnames
|
---|
822 | limitnames = re.compile(limitnames, re.I)
|
---|
823 | else:
|
---|
824 | limitnames = None
|
---|
825 | verbose = self.verbose
|
---|
826 | withsyscheck = self.values['--with-syscheck']
|
---|
827 | calibration_runs = self.values['-C']
|
---|
828 | timer = self.values['--timer']
|
---|
829 |
|
---|
830 | print '-' * LINE
|
---|
831 | print 'PYBENCH %s' % __version__
|
---|
832 | print '-' * LINE
|
---|
833 | print '* using Python %s' % (string.split(sys.version)[0])
|
---|
834 |
|
---|
835 | # Switch off garbage collection
|
---|
836 | if not withgc:
|
---|
837 | try:
|
---|
838 | import gc
|
---|
839 | except ImportError:
|
---|
840 | print '* Python version doesn\'t support garbage collection'
|
---|
841 | else:
|
---|
842 | gc.disable()
|
---|
843 | print '* disabled garbage collection'
|
---|
844 |
|
---|
845 | # "Disable" sys check interval
|
---|
846 | if not withsyscheck:
|
---|
847 | # Too bad the check interval uses an int instead of a long...
|
---|
848 | value = 2147483647
|
---|
849 | sys.setcheckinterval(value)
|
---|
850 | print '* system check interval set to maximum: %s' % value
|
---|
851 |
|
---|
852 | if timer == TIMER_SYSTIMES_PROCESSTIME:
|
---|
853 | import systimes
|
---|
854 | print '* using timer: systimes.processtime (%s)' % \
|
---|
855 | systimes.SYSTIMES_IMPLEMENTATION
|
---|
856 | else:
|
---|
857 | print '* using timer: %s' % timer
|
---|
858 |
|
---|
859 | print
|
---|
860 |
|
---|
861 | if compare_to:
|
---|
862 | try:
|
---|
863 | f = open(compare_to,'rb')
|
---|
864 | bench = pickle.load(f)
|
---|
865 | bench.name = compare_to
|
---|
866 | f.close()
|
---|
867 | compare_to = bench
|
---|
868 | except IOError, reason:
|
---|
869 | print '* Error opening/reading file %s: %s' % (
|
---|
870 | repr(compare_to),
|
---|
871 | reason)
|
---|
872 | compare_to = None
|
---|
873 |
|
---|
874 | if show_bench:
|
---|
875 | try:
|
---|
876 | f = open(show_bench,'rb')
|
---|
877 | bench = pickle.load(f)
|
---|
878 | bench.name = show_bench
|
---|
879 | f.close()
|
---|
880 | bench.print_header()
|
---|
881 | if compare_to:
|
---|
882 | bench.print_comparison(compare_to,
|
---|
883 | hidenoise=hidenoise,
|
---|
884 | limitnames=limitnames)
|
---|
885 | else:
|
---|
886 | bench.print_benchmark(hidenoise=hidenoise,
|
---|
887 | limitnames=limitnames)
|
---|
888 | except IOError, reason:
|
---|
889 | print '* Error opening/reading file %s: %s' % (
|
---|
890 | repr(show_bench),
|
---|
891 | reason)
|
---|
892 | print
|
---|
893 | return
|
---|
894 |
|
---|
895 | if reportfile:
|
---|
896 | print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
|
---|
897 | (reportfile, rounds, warp)
|
---|
898 | print
|
---|
899 |
|
---|
900 | # Create benchmark object
|
---|
901 | bench = Benchmark(reportfile,
|
---|
902 | verbose=verbose,
|
---|
903 | timer=timer,
|
---|
904 | warp=warp,
|
---|
905 | calibration_runs=calibration_runs)
|
---|
906 | bench.rounds = rounds
|
---|
907 | bench.load_tests(Setup, limitnames=limitnames)
|
---|
908 | try:
|
---|
909 | bench.calibrate()
|
---|
910 | bench.run()
|
---|
911 | except KeyboardInterrupt:
|
---|
912 | print
|
---|
913 | print '*** KeyboardInterrupt -- Aborting'
|
---|
914 | print
|
---|
915 | return
|
---|
916 | bench.print_header()
|
---|
917 | if compare_to:
|
---|
918 | bench.print_comparison(compare_to,
|
---|
919 | hidenoise=hidenoise,
|
---|
920 | limitnames=limitnames)
|
---|
921 | else:
|
---|
922 | bench.print_benchmark(hidenoise=hidenoise,
|
---|
923 | limitnames=limitnames)
|
---|
924 |
|
---|
925 | # Ring bell
|
---|
926 | sys.stderr.write('\007')
|
---|
927 |
|
---|
928 | if reportfile:
|
---|
929 | try:
|
---|
930 | f = open(reportfile,'wb')
|
---|
931 | bench.name = reportfile
|
---|
932 | pickle.dump(bench,f)
|
---|
933 | f.close()
|
---|
934 | except IOError, reason:
|
---|
935 | print '* Error opening/writing reportfile'
|
---|
936 | except IOError, reason:
|
---|
937 | print '* Error opening/writing reportfile %s: %s' % (
|
---|
938 | reportfile,
|
---|
939 | reason)
|
---|
940 | print
|
---|
941 |
|
---|
942 | if __name__ == '__main__':
|
---|
943 | PyBenchCmdline()
|
---|