wlauto.workloads.spec2000 package

Module contents

class wlauto.workloads.spec2000.CommandSpec[source]

Bases: object

class wlauto.workloads.spec2000.Spec2000(device, **kwargs)[source]

Bases: wlauto.core.workload.Workload

aliases = AC(['<wlauto.core.extension.Alias object>'])
artifacts = AC([])
asset_file = 'spec2000-assets.tar.gz'
core_modules = []
description = '\n SPEC2000 benchmarks measuring processor, memory and compiler.\n\n http://www.spec.org/cpu2000/\n\n From the web site:\n\n SPEC CPU2000 is the next-generation industry-standardized CPU-intensive benchmark suite. SPEC\n designed CPU2000 to provide a comparative measure of compute intensive performance across the\n widest practical range of hardware. The implementation resulted in source code benchmarks\n developed from real user applications. These benchmarks measure the performance of the\n processor, memory and compiler on the tested system.\n\n .. note:: At the moment, this workload relies on pre-built SPEC binaries (included in an\n asset bundle). These binaries *must* be built according to rules outlined here::\n\n http://www.spec.org/cpu2000/docs/runrules.html#toc_2.0\n\n in order for the results to be valid SPEC2000 results.\n\n .. note:: This workload does not attempt to generate results in an admissible SPEC format. No\n metadata is provided (though some, but not all, of the required metdata is collected\n by WA elsewhere). It is upto the user to post-process results to generated\n SPEC-admissible results file, if that is their intention.\n\n *base vs peak*\n\n SPEC2000 defines two build/test configuration: base and peak. Base is supposed to use basic\n configuration (e.g. default compiler flags) with no tuning, and peak is specifically optimized for\n a system. Since this workload uses externally-built binaries, there is no way for WA to be sure\n what configuration is used -- the user is expected to keep track of that. Be aware that\n base/peak also come with specific requirements for the way workloads are run (e.g. how many instances\n on multi-core systems)::\n\n http://www.spec.org/cpu2000/docs/runrules.html#toc_3\n\n These are not enforced by WA, so it is again up to the user to ensure that correct workload\n parameters are specfied inthe agenda, if they intend to collect "official" SPEC results. (Those\n interested in collecting official SPEC results should also note that setting runtime parameters\n would violate SPEC runs rules that state that no configuration must be done to the platform\n after boot).\n\n *bundle structure*\n\n This workload expects the actual benchmark binaries to be provided in a tarball "bundle" that has\n a very specific structure. At the top level of the tarball, there should be two directories: "fp"\n and "int" -- for each of the SPEC2000 categories. Under those, there is a sub-directory per benchmark.\n Each benchmark sub-directory contains three sub-sub-directorie:\n\n - "cpus" contains a subdirectory for each supported cpu (e.g. a15) with a single executable binary\n for that cpu, in addition to a "generic" subdirectory that has not been optimized for a specific\n cpu and should run on any ARM system.\n - "data" contains all additional files (input, configuration, etc) that the benchmark executable\n relies on.\n - "scripts" contains one or more one-liner shell scripts that invoke the benchmark binary with\n appropriate command line parameters. The name of the script must be in the format\n <benchmark name>[.<variant name>].sh, i.e. name of benchmark, optionally followed by variant\n name, followed by ".sh" extension. If there is more than one script, then all of them must\n have a variant; if there is only one script the it should not contain a variant.\n\n A typical bundle may look like this::\n\n |- fp\n | |-- ammp\n | | |-- cpus\n | | | |-- generic\n | | | | |-- ammp\n | | | |-- a15\n | | | | |-- ammp\n | | | |-- a7\n | | | | |-- ammp\n | | |-- data\n | | | |-- ammp.in\n | | |-- scripts\n | | | |-- ammp.sh\n | |-- applu\n . . .\n . . .\n . . .\n |- int\n .\n\n '
finalize(*args, **kwargs)
initialize(*args, **kwargs)
kind = 'workload'
launch_template = '{busybox} taskset {cpumask} {command} 1>/dev/null 2>&1'
loop_template = 'for i in $({busybox} seq 1 {threads}); do {launch_command} 1>/dev/null 2>&1 & done'
name = 'spec2000'
parameters = AC(["Param({'kind': <type 'list'>, 'mandatory': None, 'name': 'modules', 'constraint': None, 'default': None, 'allowed_values': None, 'global_alias': None, 'override': False})", "Param({'kind': <function list_or_string>, 'mandatory': None, 'name': 'benchmarks', 'constraint': None, 'default': None, 'allowed_values': None, 'global_alias': None, 'override': False})", "Param({'kind': <type 'str'>, 'mandatory': None, 'name': 'mode', 'constraint': None, 'default': 'speed', 'allowed_values': ['speed', 'rate'], 'global_alias': None, 'override': False})", "Param({'kind': <function integer>, 'mandatory': None, 'name': 'number_of_threads', 'constraint': None, 'default': None, 'allowed_values': None, 'global_alias': None, 'override': False})", "Param({'kind': <function boolean>, 'mandatory': None, 'name': 'force_extract_assets', 'constraint': None, 'default': False, 'allowed_values': None, 'global_alias': None, 'override': False})", "Param({'kind': <function boolean>, 'mandatory': None, 'name': 'force_push_assets', 'constraint': None, 'default': False, 'allowed_values': None, 'global_alias': None, 'override': False})", "Param({'kind': <function integer>, 'mandatory': None, 'name': 'timeout', 'constraint': None, 'default': 1200, 'allowed_values': None, 'global_alias': None, 'override': False})"])
rate_run_template = 'cd {datadir}; time ({loop}; wait)'
speed_run_template = 'cd {datadir}; time ({launch_command})'
summary_metrics = <wlauto.workloads.spec2000._SPECSummaryMetrics object>
timing_regex = <_sre.SRE_Pattern object>
validate(*args, **kwargs)
class wlauto.workloads.spec2000.SpecBenchmark[source]

Bases: object