summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormbligh <mbligh@592f7852-d20e-0410-864c-8624ca9c26a4>2009-01-13 23:33:33 +0000
committermbligh <mbligh@592f7852-d20e-0410-864c-8624ca9c26a4>2009-01-13 23:33:33 +0000
commit9ebf4a80d2a9c72dc52648433d59821a416fbaba (patch)
treec60459ead22acde40196f3d1011f45b438ad857d
parentcf825511fe28103d40085f4c5045d98b335ad5e4 (diff)
Change all tabs to space. somehow we lost this change from tko/ ?
Somewhat scary Signed-off-by: Martin J. Bligh <mbligh@google.com> git-svn-id: svn://test.kernel.org/autotest/trunk@2640 592f7852-d20e-0410-864c-8624ca9c26a4
-rwxr-xr-xtko/compose_query.cgi384
-rw-r--r--tko/create_db-postgres96
-rwxr-xr-xtko/draw_graphs92
-rwxr-xr-xtko/machine_aggr.cgi188
-rwxr-xr-xtko/machine_benchmark.cgi89
-rwxr-xr-xtko/machine_test_attribute_graph.cgi36
-rw-r--r--tko/perf_graph.cgi340
-rw-r--r--tko/perf_graphs.cgi74
-rwxr-xr-xtko/plotgraph114
-rwxr-xr-xtko/query_history.cgi92
-rwxr-xr-xtko/save_query.cgi94
-rwxr-xr-xtko/test.cgi82
12 files changed, 840 insertions, 841 deletions
diff --git a/tko/compose_query.cgi b/tko/compose_query.cgi
index 9bae9c71..0184c43b 100755
--- a/tko/compose_query.cgi
+++ b/tko/compose_query.cgi
@@ -87,18 +87,18 @@ next_field = {
def parse_field(form, form_field, field_default):
- if not form_field in form:
- return field_default
- field_input = form[form_field].value.lower()
- if field_input and field_input in frontend.test_view_field_dict:
- return field_input
- return field_default
+ if not form_field in form:
+ return field_default
+ field_input = form[form_field].value.lower()
+ if field_input and field_input in frontend.test_view_field_dict:
+ return field_input
+ return field_default
def parse_condition(form, form_field, field_default):
- if not form_field in form:
- return field_default
- return form[form_field].value
+ if not form_field in form:
+ return field_default
+ return form[form_field].value
form = cgi.FieldStorage()
@@ -109,7 +109,7 @@ column = parse_field(form, 'columns', 'machine_group')
condition_field = parse_condition(form, 'condition', '')
if 'brief' in form.keys() and form['brief'].value <> '0':
- display.set_brief_mode()
+ display.set_brief_mode()
## caller can specify rows and columns that shall be included into the report
## regardless of whether actual test data is available yet
@@ -118,10 +118,10 @@ force_column_field = parse_condition(form,'force_column','')
def split_forced_fields(force_field):
- if force_field:
- return force_field.split()
- else:
- return []
+ if force_field:
+ return force_field.split()
+ else:
+ return []
force_row = split_forced_fields(force_row_field)
force_column = split_forced_fields(force_column_field)
@@ -131,56 +131,56 @@ db_obj = db.db()
def construct_link(x, y):
- next_row = row
- next_column = column
- condition_list = []
- if condition_field != '':
- condition_list.append(condition_field)
- if y:
- next_row = next_field[row]
- condition_list.append("%s='%s'" % (row, y))
- if x:
- next_column = next_field[column]
- condition_list.append("%s='%s'" % (column, x))
- next_condition = '&'.join(condition_list)
- link = '/tko/compose_query.cgi?' + urllib.urlencode({'columns': next_column,
- 'rows': next_row, 'condition': next_condition,
- 'title': title_field})
- return link
+ next_row = row
+ next_column = column
+ condition_list = []
+ if condition_field != '':
+ condition_list.append(condition_field)
+ if y:
+ next_row = next_field[row]
+ condition_list.append("%s='%s'" % (row, y))
+ if x:
+ next_column = next_field[column]
+ condition_list.append("%s='%s'" % (column, x))
+ next_condition = '&'.join(condition_list)
+ link = '/tko/compose_query.cgi?' + urllib.urlencode({'columns': next_column,
+ 'rows': next_row, 'condition': next_condition,
+ 'title': title_field})
+ return link
def construct_logs_link(x, y, job_tag):
- job_path = frontend.html_root + job_tag + '/'
- test = ''
- if (row == 'test' and
- not y.split('.')[0] in ('boot', 'build', 'install')):
- test = y
- if (column == 'test' and
- not x.split('.')[0] in ('boot', 'build', 'install')):
- test = x
- return '/tko/retrieve_logs.cgi?' + urllib.urlencode({'job' : job_path,
- 'test' : test})
+ job_path = frontend.html_root + job_tag + '/'
+ test = ''
+ if (row == 'test' and
+ not y.split('.')[0] in ('boot', 'build', 'install')):
+ test = y
+ if (column == 'test' and
+ not x.split('.')[0] in ('boot', 'build', 'install')):
+ test = x
+ return '/tko/retrieve_logs.cgi?' + urllib.urlencode({'job' : job_path,
+ 'test' : test})
def create_select_options(selected_val):
- ret = ""
- for option in sorted(frontend.test_view_field_dict.keys()):
- if selected_val == option:
- selected = " SELECTED"
- else:
- selected = ""
+ ret = ""
+ for option in sorted(frontend.test_view_field_dict.keys()):
+ if selected_val == option:
+ selected = " SELECTED"
+ else:
+ selected = ""
- ret += '<OPTION VALUE="%s"%s>%s</OPTION>\n' % \
- (option, selected, option)
- return ret
+ ret += '<OPTION VALUE="%s"%s>%s</OPTION>\n' % \
+ (option, selected, option)
+ return ret
def map_kernel_base(kernel_name):
- ## insert <br> after each / in kernel name
- ## but spare consequtive //
- kernel_name = kernel_name.replace('/','/<br>')
- kernel_name = kernel_name.replace('/<br>/<br>','//')
- return kernel_name
+ ## insert <br> after each / in kernel name
+ ## but spare consequtive //
+ kernel_name = kernel_name.replace('/','/<br>')
+ kernel_name = kernel_name.replace('/<br>/<br>','//')
+ return kernel_name
def header_tuneup(field_name, header):
@@ -206,9 +206,9 @@ def header_tuneup(field_name, header):
# display. This contains the kernel base version plus the truncated
# names of all the patches,
#
-# 2.6.24-mm1 p112
-# +add-new-string-functions-
-# +x86-amd-thermal-interrupt
+# 2.6.24-mm1 p112
+# +add-new-string-functions-
+# +x86-amd-thermal-interrupt
#
# This mapping is produced when the first mapping is request, with
# a single query over the patches table; the result is then cached.
@@ -220,159 +220,159 @@ map_kernel_map = None
def map_kernel_init():
- fields = ['base', 'k.kernel_idx', 'name', 'url']
- map = {}
- for (base, idx, name, url) in db_obj.select(','.join(fields),
- 'kernels k,patches p', 'k.kernel_idx=p.kernel_idx'):
- match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$', url)
- if match:
- continue
+ fields = ['base', 'k.kernel_idx', 'name', 'url']
+ map = {}
+ for (base, idx, name, url) in db_obj.select(','.join(fields),
+ 'kernels k,patches p', 'k.kernel_idx=p.kernel_idx'):
+ match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$', url)
+ if match:
+ continue
- key = base + ' p%d' % (idx)
- if not map.has_key(key):
- map[key] = map_kernel_base(base) + ' p%d' % (idx)
- map[key] += ('<br>+<span title="' + name + '">' +
- name[0:25] + '</span>')
+ key = base + ' p%d' % (idx)
+ if not map.has_key(key):
+ map[key] = map_kernel_base(base) + ' p%d' % (idx)
+ map[key] += ('<br>+<span title="' + name + '">' +
+ name[0:25] + '</span>')
- return map
+ return map
def map_kernel(name):
- global map_kernel_map
- if map_kernel_map is None:
- map_kernel_map = map_kernel_init()
+ global map_kernel_map
+ if map_kernel_map is None:
+ map_kernel_map = map_kernel_init()
- if map_kernel_map.has_key(name):
- return map_kernel_map[name]
+ if map_kernel_map.has_key(name):
+ return map_kernel_map[name]
- return map_kernel_base(name.split(' ')[0])
+ return map_kernel_base(name.split(' ')[0])
field_map = {
- 'kernel':map_kernel
+ 'kernel':map_kernel
}
sql_wall_time = 0
def gen_matrix():
- where = None
- if condition_field.strip() != '':
- try:
- where = query_lib.parse_scrub_and_gen_condition(
- condition_field, frontend.test_view_field_dict)
- print "<!-- where clause: %s -->" % (where,)
- except:
- msg = "Unspecified error when parsing condition"
- return [[display.box(msg)]]
-
- wall_time_start = time.time()
- try:
- ## Unfortunately, we can not request reasons of failure always
- ## because it may result in an inflated size of data transfer
- ## (at the moment we fetch 500 bytes of reason descriptions into
- ## each cell )
- ## If 'status' in [row,column] then either width or height
- ## of the table <=7, hence table is not really 2D, and
- ## query_reason is relatively save.
- ## At the same time view when either rows or columns grouped
- ## by status is when users need reasons of failures the most.
-
- ## TO DO: implement [Show/Hide reasons] button or link in
- ## all views and make thorough performance testing
- test_data = frontend.get_matrix_data(db_obj, column, row, where,
- query_reasons = ('status' in [row,column])
- )
- global sql_wall_time
- sql_wall_time = time.time() - wall_time_start
-
- except db.MySQLTooManyRows, error:
- return [[display.box(str(error))]]
-
- for f_row in force_row:
- if not f_row in test_data.y_values:
- test_data.y_values.append(f_row)
- for f_column in force_column:
- if not f_column in test_data.x_values:
- test_data.x_values.append(f_column)
-
- if not test_data.y_values:
- msg = "There are no results for this query (yet?)."
- return [[display.box(msg)]]
-
- dict_url = {'columns': row,
- 'rows': column, 'condition': condition_field,
- 'title': title_field}
- link = '/tko/compose_query.cgi?' + urllib.urlencode(dict_url)
- header_row = [display.box("<center>(Flip Axis)</center>", link=link)]
-
- for x in test_data.x_values:
- dx = x
- if field_map.has_key(column):
- dx = field_map[column](x)
- x_header = header_tuneup(column, dx)
- link = construct_link(x, None)
- header_row.append(display.box(x_header,header=True,link=link))
-
- matrix = [header_row]
- # For each row, we are looping horizontally over the columns.
- for y in test_data.y_values:
- dy = y
- if field_map.has_key(row):
- dy = field_map[row](y)
- y_header = header_tuneup(row, dy)
- link = construct_link(None, y)
- cur_row = [display.box(y_header, header=True, link=link)]
- for x in test_data.x_values:
- ## next 2 lines: temporary, until non timestamped
- ## records are in the database
- if x==datetime.datetime(1970,1,1): x = None
- if y==datetime.datetime(1970,1,1): y = None
- try:
- box_data = test_data.data[x][y]
- except:
- cur_row.append(display.box(None, None,
- row_label=y, column_label=x))
- continue
- job_tag = test_data.data[x][y].job_tag
- if job_tag:
- link = construct_logs_link(x, y, job_tag)
- else:
- link = construct_link(x, y)
-
- apnd = display.status_precounted_box(db_obj, box_data,
- link, y, x)
- cur_row.append(apnd)
- matrix.append(cur_row)
- return matrix
+ where = None
+ if condition_field.strip() != '':
+ try:
+ where = query_lib.parse_scrub_and_gen_condition(
+ condition_field, frontend.test_view_field_dict)
+ print "<!-- where clause: %s -->" % (where,)
+ except:
+ msg = "Unspecified error when parsing condition"
+ return [[display.box(msg)]]
+
+ wall_time_start = time.time()
+ try:
+ ## Unfortunately, we can not request reasons of failure always
+ ## because it may result in an inflated size of data transfer
+ ## (at the moment we fetch 500 bytes of reason descriptions into
+ ## each cell )
+ ## If 'status' in [row,column] then either width or height
+ ## of the table <=7, hence table is not really 2D, and
+ ## query_reason is relatively save.
+ ## At the same time view when either rows or columns grouped
+ ## by status is when users need reasons of failures the most.
+
+ ## TO DO: implement [Show/Hide reasons] button or link in
+ ## all views and make thorough performance testing
+ test_data = frontend.get_matrix_data(db_obj, column, row, where,
+ query_reasons = ('status' in [row,column])
+ )
+ global sql_wall_time
+ sql_wall_time = time.time() - wall_time_start
+
+ except db.MySQLTooManyRows, error:
+ return [[display.box(str(error))]]
+
+ for f_row in force_row:
+ if not f_row in test_data.y_values:
+ test_data.y_values.append(f_row)
+ for f_column in force_column:
+ if not f_column in test_data.x_values:
+ test_data.x_values.append(f_column)
+
+ if not test_data.y_values:
+ msg = "There are no results for this query (yet?)."
+ return [[display.box(msg)]]
+
+ dict_url = {'columns': row,
+ 'rows': column, 'condition': condition_field,
+ 'title': title_field}
+ link = '/tko/compose_query.cgi?' + urllib.urlencode(dict_url)
+ header_row = [display.box("<center>(Flip Axis)</center>", link=link)]
+
+ for x in test_data.x_values:
+ dx = x
+ if field_map.has_key(column):
+ dx = field_map[column](x)
+ x_header = header_tuneup(column, dx)
+ link = construct_link(x, None)
+ header_row.append(display.box(x_header,header=True,link=link))
+
+ matrix = [header_row]
+ # For each row, we are looping horizontally over the columns.
+ for y in test_data.y_values:
+ dy = y
+ if field_map.has_key(row):
+ dy = field_map[row](y)
+ y_header = header_tuneup(row, dy)
+ link = construct_link(None, y)
+ cur_row = [display.box(y_header, header=True, link=link)]
+ for x in test_data.x_values:
+ ## next 2 lines: temporary, until non timestamped
+ ## records are in the database
+ if x==datetime.datetime(1970,1,1): x = None
+ if y==datetime.datetime(1970,1,1): y = None
+ try:
+ box_data = test_data.data[x][y]
+ except:
+ cur_row.append(display.box(None, None,
+ row_label=y, column_label=x))
+ continue
+ job_tag = test_data.data[x][y].job_tag
+ if job_tag:
+ link = construct_logs_link(x, y, job_tag)
+ else:
+ link = construct_link(x, y)
+
+ apnd = display.status_precounted_box(db_obj, box_data,
+ link, y, x)
+ cur_row.append(apnd)
+ matrix.append(cur_row)
+ return matrix
def main():
- if display.is_brief_mode():
- ## create main grid table only as provided by gen_matrix()
- display.print_table(gen_matrix())
- else:
- # create the actual page
- print '<html><head><title>'
- print 'Filtered Autotest Results'
- print '</title></head><body>'
- display.print_main_header()
- print html_header % (create_select_options(column),
- create_select_options(row),
- condition_field, title_field,
- ## history form
- column,row,condition_field)
- if title_field:
- print '<h1> %s </h1>' % (title_field)
- print display.color_keys_row()
- display.print_table(gen_matrix())
- print display.color_keys_row()
- total_wall_time = time.time() - total_wall_time_start
-
- perf_info = '<p style="font-size:x-small;">'
- perf_info += 'sql access wall time = %s secs,' % sql_wall_time
- perf_info += 'total wall time = %s secs</p>' % total_wall_time
- print perf_info
- print '</body></html>'
+ if display.is_brief_mode():
+ ## create main grid table only as provided by gen_matrix()
+ display.print_table(gen_matrix())
+ else:
+ # create the actual page
+ print '<html><head><title>'
+ print 'Filtered Autotest Results'
+ print '</title></head><body>'
+ display.print_main_header()
+ print html_header % (create_select_options(column),
+ create_select_options(row),
+ condition_field, title_field,
+ ## history form
+ column,row,condition_field)
+ if title_field:
+ print '<h1> %s </h1>' % (title_field)
+ print display.color_keys_row()
+ display.print_table(gen_matrix())
+ print display.color_keys_row()
+ total_wall_time = time.time() - total_wall_time_start
+
+ perf_info = '<p style="font-size:x-small;">'
+ perf_info += 'sql access wall time = %s secs,' % sql_wall_time
+ perf_info += 'total wall time = %s secs</p>' % total_wall_time
+ print perf_info
+ print '</body></html>'
main()
diff --git a/tko/create_db-postgres b/tko/create_db-postgres
index 3706b64f..0a34a1e6 100644
--- a/tko/create_db-postgres
+++ b/tko/create_db-postgres
@@ -4,116 +4,116 @@ DROP VIEW IF EXISTS test_view;
-- kernel versions
DROP SEQUENCE IF EXISTS kernel_idx_seq;
CREATE SEQUENCE kernel_idx_seq
- INCREMENT BY 1
- NO MAXVALUE
- NO MINVALUE
- CACHE 1;
+ INCREMENT BY 1
+ NO MAXVALUE
+ NO MINVALUE
+ CACHE 1;
DROP TABLE IF EXISTS kernels;
CREATE TABLE kernels (
kernel_idx int NOT NULL DEFAULT nextval('kernel_idx_seq'::TEXT) PRIMARY KEY,
-kernel_hash TEXT, -- Hash of base + all patches
-base TEXT, -- Base version without patches
-printable TEXT -- Full version with patches
+kernel_hash TEXT, -- Hash of base + all patches
+base TEXT, -- Base version without patches
+printable TEXT -- Full version with patches
);
-- main jobs table
DROP SEQUENCE IF EXISTS machine_idx_seq;
CREATE SEQUENCE machine_idx_seq
- INCREMENT BY 1
- NO MAXVALUE
- NO MINVALUE
- CACHE 1;
+ INCREMENT BY 1
+ NO MAXVALUE
+ NO MINVALUE
+ CACHE 1;
DROP TABLE IF EXISTS machines;
CREATE TABLE machines (
machine_idx int NOT NULL DEFAULT nextval('machine_idx_seq'::TEXT) PRIMARY KEY,
-hostname TEXT, -- hostname
-machine_group TEXT, -- group name
-owner TEXT -- owner name
+hostname TEXT, -- hostname
+machine_group TEXT, -- group name
+owner TEXT -- owner name
);
-- main jobs table
DROP SEQUENCE IF EXISTS job_idx_seq;
CREATE SEQUENCE job_idx_seq
- INCREMENT BY 1
- NO MAXVALUE
- NO MINVALUE
- CACHE 1;
+ INCREMENT BY 1
+ NO MAXVALUE
+ NO MINVALUE
+ CACHE 1;
DROP TABLE IF EXISTS jobs;
CREATE TABLE jobs (
job_idx int NOT NULL DEFAULT nextval('job_idx_seq'::TEXT) PRIMARY KEY,
-tag TEXT, -- job key
+tag TEXT, -- job key
label TEXT, -- job label assigned by user
-username TEXT, -- user name
-machine_idx int -- reference to machine table
+username TEXT, -- user name
+machine_idx int -- reference to machine table
);
-- One entry per patch used, anywhere
DROP TABLE IF EXISTS patches;
CREATE TABLE patches (
-kernel_idx INTEGER, -- index number
-name TEXT, -- short name
-url TEXT, -- full URL
+kernel_idx INTEGER, -- index number
+name TEXT, -- short name
+url TEXT, -- full URL
hash TEXT
);
-- test functional results
DROP SEQUENCE IF EXISTS test_idx_seq;
CREATE SEQUENCE test_idx_seq
- INCREMENT BY 1
- NO MAXVALUE
- NO MINVALUE
- CACHE 1;
+ INCREMENT BY 1
+ NO MAXVALUE
+ NO MINVALUE
+ CACHE 1;
DROP TABLE IF EXISTS tests;
CREATE TABLE tests (
test_idx int NOT NULL DEFAULT nextval('test_idx_seq'::TEXT) PRIMARY KEY,
-job_idx INTEGER, -- ref to job table
-test TEXT, -- name of test
-subdir TEXT, -- subdirectory name
-kernel_idx INTEGER, -- kernel test was AGAINST
-status INTEGER, -- test status
-reason TEXT, -- reason for test status
-machine_idx int -- reference to machine table
+job_idx INTEGER, -- ref to job table
+test TEXT, -- name of test
+subdir TEXT, -- subdirectory name
+kernel_idx INTEGER, -- kernel test was AGAINST
+status INTEGER, -- test status
+reason TEXT, -- reason for test status
+machine_idx int -- reference to machine table
);
-- test functional results
DROP TABLE IF EXISTS test_attributes;
CREATE TABLE test_attributes (
-test_idx INTEGER, -- ref to test table
-attribute TEXT, -- attribute name (e.g. 'throughput')
-value TEXT -- attribute value
+test_idx INTEGER, -- ref to test table
+attribute TEXT, -- attribute name (e.g. 'throughput')
+value TEXT -- attribute value
);
-- test functional results
DROP TABLE IF EXISTS iteration_result;
CREATE TABLE iteration_result(
-test_idx INTEGER, -- ref to test table
-iteration INTEGER, -- integer
-attribute TEXT, -- attribute name (e.g. 'throughput')
-value FLOAT -- attribute value (eg 700.1)
+test_idx INTEGER, -- ref to test table
+iteration INTEGER, -- integer
+attribute TEXT, -- attribute name (e.g. 'throughput')
+value FLOAT -- attribute value (eg 700.1)
);
-- status key
DROP SEQUENCE IF EXISTS status_idx_seq;
CREATE SEQUENCE status_idx_seq
- INCREMENT BY 1
- NO MAXVALUE
- NO MINVALUE
- CACHE 1;
+ INCREMENT BY 1
+ NO MAXVALUE
+ NO MINVALUE
+ CACHE 1;
DROP TABLE IF EXISTS status;
CREATE TABLE status (
status_idx int NOT NULL DEFAULT nextval('status_idx_seq'::TEXT) PRIMARY KEY,
-word TEXT -- status word
+word TEXT -- status word
);
-- BRRD syncronization
DROP TABLE IF EXISTS brrd_sync;
CREATE TABLE brrd_sync (
-test_idx INTEGER -- ref to test table
+test_idx INTEGER -- ref to test table
);
-- test_view (to make life easier for people trying to mine data)
diff --git a/tko/draw_graphs b/tko/draw_graphs
index 9ad4f89a..c9f36c74 100755
--- a/tko/draw_graphs
+++ b/tko/draw_graphs
@@ -15,34 +15,34 @@ my @data_files = grep /^plotdata\.[\w-]+\.[\w-]+$/, readdir PERFDIR;
closedir PERFDIR;
chdir ($perfdir);
-%axis_labels = ( 'kernbench' => 'Elapsed time (seconds)',
- 'dbench' => 'Throughput (MB/s)',
- 'tbench' => 'Throughput (MB/s)',
- 'reaim' => 'Max Jobs per Minute',
- );
+%axis_labels = ( 'kernbench' => 'Elapsed time (seconds)',
+ 'dbench' => 'Throughput (MB/s)',
+ 'tbench' => 'Throughput (MB/s)',
+ 'reaim' => 'Max Jobs per Minute',
+ );
-%plot_cols = ( 'kernbench' => '1:4:8',
- 'dbench' => '1:4:5',
- 'tbench' => '1:4:5',
- 'reaim' => '1:4:5',
- );
-
+%plot_cols = ( 'kernbench' => '1:4:8',
+ 'dbench' => '1:4:5',
+ 'tbench' => '1:4:5',
+ 'reaim' => '1:4:5',
+ );
+
foreach $data_file (@data_files) {
- $data_file =~ /^plotdata\.([\w-]+)\.([\w-]+)$/;
- ($test, $machine) = ($1, $2);
- print " === Analysing data file: $data_file $test $machine\n";
- push @machines, $machine;
- open DATAFILE, $data_file || die "Cannot open $data_file";
- while ($data = <DATAFILE>) {
- print "X " . $data;
- chomp $data;
- $data =~ s/^\d+\s+//; # get rid of count
- @data = split (/ /, $data);
- $version = $data[0];
- print "$test $version = $data\n";
- $results{$test}{$machine}{$version} = $data;
- push @versions, $version;
- }
+ $data_file =~ /^plotdata\.([\w-]+)\.([\w-]+)$/;
+ ($test, $machine) = ($1, $2);
+ print " === Analysing data file: $data_file $test $machine\n";
+ push @machines, $machine;
+ open DATAFILE, $data_file || die "Cannot open $data_file";
+ while ($data = <DATAFILE>) {
+ print "X " . $data;
+ chomp $data;
+ $data =~ s/^\d+\s+//; # get rid of count
+ @data = split (/ /, $data);
+ $version = $data[0];
+ print "$test $version = $data\n";
+ $results{$test}{$machine}{$version} = $data;
+ push @versions, $version;
+ }
}
@machines = list_uniq (@machines);
@@ -51,30 +51,30 @@ foreach $data_file (@data_files) {
@relevant = relevant_versions(@versions);
foreach $machine (@machines) {
- foreach $test (keys(%axis_labels)) {
- graph_plot($machine, "${test}.full.${machine}",
- $test, @versions);
- graph_plot($machine, "${test}.${machine}",
- $test, @relevant);
- }
+ foreach $test (keys(%axis_labels)) {
+ graph_plot($machine, "${test}.full.${machine}",
+ $test, @versions);
+ graph_plot($machine, "${test}.${machine}",
+ $test, @relevant);
+ }
}
sub graph_plot
{
- my ($machine, $filename, $test, @plot_versions) = @_;
- my $count = 0;
+ my ($machine, $filename, $test, @plot_versions) = @_;
+ my $count = 0;
- print " ----- test: $test machine: $machine $#plot_versions\n";
- open (DATA, "> $filename") || die "Cannot open data file $filename";
- foreach $version (@plot_versions) {
- my $results = $results{$test}{$machine}{$version};
- next unless ($results =~ /\S/);
- $count++;
- print "$count $version $results\n";
- print DATA "$count $results\n";
- }
- close (DATA);
- print " ----- \n";
- print `$plotgraph $filename '$axis_labels{$test}' '$plot_cols{$test}'`;
+ print " ----- test: $test machine: $machine $#plot_versions\n";
+ open (DATA, "> $filename") || die "Cannot open data file $filename";
+ foreach $version (@plot_versions) {
+ my $results = $results{$test}{$machine}{$version};
+ next unless ($results =~ /\S/);
+ $count++;
+ print "$count $version $results\n";
+ print DATA "$count $results\n";
+ }
+ close (DATA);
+ print " ----- \n";
+ print `$plotgraph $filename '$axis_labels{$test}' '$plot_cols{$test}'`;
}
diff --git a/tko/machine_aggr.cgi b/tko/machine_aggr.cgi
index 405ba156..4d3b97bb 100755
--- a/tko/machine_aggr.cgi
+++ b/tko/machine_aggr.cgi
@@ -15,102 +15,102 @@ rc_kernel = re.compile('2\.\d\.\d+(-smp-)[0-9]{3}\.[0-9]_rc[0-9]$')
db = db.db()
def main():
- form = cgi.FieldStorage()
-
- if form.has_key("benchmark_key"):
- benchmark_key = form["benchmark_key"].value
- # input is a list of benchmark:key values -- benchmark1:key1,...
- # this loop separates this out into two lists
- benchmark_idx = []
- key_idx = []
- for benchmark_key_pair in benchmark_key.split(','):
- (benchmark, key) = benchmark_key_pair.split(':')
- benchmark_idx.append(benchmark)
- key_idx.append(key)
- elif form.has_key("benchmark") and form.has_key("key"):
- benchmarks = form["benchmark"].value
- keys = form["key"].value
-
- benchmark_idx = benchmarks.split(',')
- key_idx = keys.split(',')
- else:
- # Ignore this for by setting benchmark_idx and key_idx to be
- # empty lists.
- benchmark_idx = []
- key_idx = []
-
- machine_idx = form["machine"].value
- kernel = form["kernel"].value
- if kernel == "released":
- kernel = released_kernel
- if kernel == "rc":
- kernel = rc_kernel
-
- machine = frontend.machine.select(db, {'hostname' : machine_idx})[0]
-
- #get the machine type from machinename
- for line in open('machines', 'r'):
- words = line.rstrip().split('\t')
- if words[0] == machine.hostname:
- title = '%s (%s)' % (words[-1], machine.hostname)
- else:
- title = '%s' % machine.hostname
-
- graph = plotgraph.gnuplot(title, 'Kernel', 'normalized throughput (%)', xsort = sort_kernels, size = "600,500")
- for benchmark, key in zip(benchmark_idx, key_idx):
- reference_value = None
- data = {}
- where = { 'subdir' : benchmark, 'machine_idx' : machine.idx , 'status' : 6}
-
- #select the corresponding kernels and sort by the release version
- kernels = set([])
- kernels_sort = set([])
- kernels_idx = set([])
- for test in frontend.test.select(db, where):
- if kernel == "all":
- kernels.add(test.kernel().printable)
- kernels_idx.add(str(test.kernel().idx))
-
- elif kernel == "experimental":
- if not re.match(released_kernel, test.kernel().printable)\
- and not re.match(rc_kernel, test.kernel().printable):
- kernels.add(test.kernel().printable)
- kernels_idx.add(str(test.kernel().idx))
- else:
- if re.match(kernel, test.kernel().printable):
- kernels.add(test.kernel().printable)
- kernels_idx.add(str(test.kernel().idx))
- kernels_sort = sort_kernels(list(kernels))
-
- #get the base value for each benchmark
- kernel_base = frontend.kernel.select(db, {'printable' : kernels_sort[0]})[0]
- for test in frontend.test.select(db, { 'subdir' : benchmark, 'machine_idx' : machine.idx, 'kernel_idx' : kernel_base.idx}):
- iterations = test.iterations()
- if iterations.has_key(key):
- reference_value = sum(iterations[key])/len(iterations[key])
- break
-
- wherein = { 'kernel_idx' : kernels_idx }
- for test in frontend.test.select(db, where, wherein):
- iterations = test.iterations()
- if iterations.has_key(key):
- # Maintain a list of every test result in data.
- # Initialize this list, if it does not exist.
- if not data.has_key(test.kernel().printable):
- data[test.kernel().printable] = list()
-
- if benchmark == "kernbench":
- results = [((reference_value / i - 1)*100) for i in iterations[key]]
- else:
- results = [((i / reference_value - 1)*100) for i in iterations[key]]
- data[test.kernel().printable].extend(results)
-
- graph.add_dataset(benchmark+' ( '+key+' ) ',data)
-
- graph.plot(cgi_header = True)
+ form = cgi.FieldStorage()
+
+ if form.has_key("benchmark_key"):
+ benchmark_key = form["benchmark_key"].value
+ # input is a list of benchmark:key values -- benchmark1:key1,...
+ # this loop separates this out into two lists
+ benchmark_idx = []
+ key_idx = []
+ for benchmark_key_pair in benchmark_key.split(','):
+ (benchmark, key) = benchmark_key_pair.split(':')
+ benchmark_idx.append(benchmark)
+ key_idx.append(key)
+ elif form.has_key("benchmark") and form.has_key("key"):
+ benchmarks = form["benchmark"].value
+ keys = form["key"].value
+
+ benchmark_idx = benchmarks.split(',')
+ key_idx = keys.split(',')
+ else:
+ # Ignore this for by setting benchmark_idx and key_idx to be
+ # empty lists.
+ benchmark_idx = []
+ key_idx = []
+
+ machine_idx = form["machine"].value
+ kernel = form["kernel"].value
+ if kernel == "released":
+ kernel = released_kernel
+ if kernel == "rc":
+ kernel = rc_kernel
+
+ machine = frontend.machine.select(db, {'hostname' : machine_idx})[0]
+
+ #get the machine type from machinename
+ for line in open('machines', 'r'):
+ words = line.rstrip().split('\t')
+ if words[0] == machine.hostname:
+ title = '%s (%s)' % (words[-1], machine.hostname)
+ else:
+ title = '%s' % machine.hostname
+
+ graph = plotgraph.gnuplot(title, 'Kernel', 'normalized throughput (%)', xsort = sort_kernels, size = "600,500")
+ for benchmark, key in zip(benchmark_idx, key_idx):
+ reference_value = None
+ data = {}
+ where = { 'subdir' : benchmark, 'machine_idx' : machine.idx , 'status' : 6}
+
+ #select the corresponding kernels and sort by the release version
+ kernels = set([])
+ kernels_sort = set([])
+ kernels_idx = set([])
+ for test in frontend.test.select(db, where):
+ if kernel == "all":
+ kernels.add(test.kernel().printable)
+ kernels_idx.add(str(test.kernel().idx))
+
+ elif kernel == "experimental":
+ if not re.match(released_kernel, test.kernel().printable)\
+ and not re.match(rc_kernel, test.kernel().printable):
+ kernels.add(test.kernel().printable)
+ kernels_idx.add(str(test.kernel().idx))
+ else:
+ if re.match(kernel, test.kernel().printable):
+ kernels.add(test.kernel().printable)
+ kernels_idx.add(str(test.kernel().idx))
+ kernels_sort = sort_kernels(list(kernels))
+
+ #get the base value for each benchmark
+ kernel_base = frontend.kernel.select(db, {'printable' : kernels_sort[0]})[0]
+ for test in frontend.test.select(db, { 'subdir' : benchmark, 'machine_idx' : machine.idx, 'kernel_idx' : kernel_base.idx}):
+ iterations = test.iterations()
+ if iterations.has_key(key):
+ reference_value = sum(iterations[key])/len(iterations[key])
+ break
+
+ wherein = { 'kernel_idx' : kernels_idx }
+ for test in frontend.test.select(db, where, wherein):
+ iterations = test.iterations()
+ if iterations.has_key(key):
+ # Maintain a list of every test result in data.
+ # Initialize this list, if it does not exist.
+ if not data.has_key(test.kernel().printable):
+ data[test.kernel().printable] = list()
+
+ if benchmark == "kernbench":
+ results = [((reference_value / i - 1)*100) for i in iterations[key]]
+ else:
+ results = [((i / reference_value - 1)*100) for i in iterations[key]]
+ data[test.kernel().printable].extend(results)
+
+ graph.add_dataset(benchmark+' ( '+key+' ) ',data)
+
+ graph.plot(cgi_header = True)
def sort_kernels(kernels):
- return sorted(kernels, key = kernel_versions.version_encode)
+ return sorted(kernels, key = kernel_versions.version_encode)
main()
diff --git a/tko/machine_benchmark.cgi b/tko/machine_benchmark.cgi
index 123211cf..45cf5098 100755
--- a/tko/machine_benchmark.cgi
+++ b/tko/machine_benchmark.cgi
@@ -16,56 +16,55 @@ benchmark_key = {
}
def main():
+ display.print_main_header()
+ ## it is table only; mouse hovering off
+ display.set_brief_mode()
- display.print_main_header()
- ## it is table only; mouse hovering off
- display.set_brief_mode()
+ rows = db.select('test', 'tests', {}, distinct = True)
+ benchmarks = []
+ for row in rows:
+ benchmark = row[0]
+ testname = re.sub(r'\..*', '', benchmark)
+ if not benchmark_key.has_key(testname):
+ continue
+ benchmarks.append(benchmark)
+ benchmarks = display.sort_tests(benchmarks)
- rows = db.select('test', 'tests', {}, distinct = True)
- benchmarks = []
- for row in rows:
- benchmark = row[0]
- testname = re.sub(r'\..*', '', benchmark)
- if not benchmark_key.has_key(testname):
- continue
- benchmarks.append(benchmark)
- benchmarks = display.sort_tests(benchmarks)
+ machine_idx = {}
+ benchmark_data = {}
+ for benchmark in benchmarks:
+ fields = 'machine_idx,machine_hostname,count(status_word)'
+ where = { 'subdir': benchmark, 'status_word' : 'GOOD' }
+ data = {}
+ for (idx, machine, count) in db.select(fields, 'test_view',
+ where, group_by = 'machine_hostname'):
+ data[machine] = count
+ machine_idx[machine] = idx
+ benchmark_data[benchmark] = data
- machine_idx = {}
- benchmark_data = {}
- for benchmark in benchmarks:
- fields = 'machine_idx,machine_hostname,count(status_word)'
- where = { 'subdir': benchmark, 'status_word' : 'GOOD' }
- data = {}
- for (idx, machine, count) in db.select(fields, 'test_view',
- where, group_by = 'machine_hostname'):
- data[machine] = count
- machine_idx[machine] = idx
- benchmark_data[benchmark] = data
+ print '<h1>Performance</h1>'
- print '<h1>Performance</h1>'
+ header_row = [ display.box('Benchmark', header=True) ]
+ header_row += [ display.box(re.sub(r'\.', '<br>', benchmark), header=True) for benchmark in benchmarks ]
- header_row = [ display.box('Benchmark', header=True) ]
- header_row += [ display.box(re.sub(r'\.', '<br>', benchmark), header=True) for benchmark in benchmarks ]
+ matrix = [header_row]
+ for machine in machine_idx:
+ row = [display.box(machine)]
+ for benchmark in benchmarks:
+ count = benchmark_data[benchmark].get(machine, None)
+ if not count:
+ row.append(display.box(None))
+ continue
+ key = benchmark_key[re.sub(r'\..*', '', benchmark)]
+ url = 'machine_test_attribute_graph.cgi'
+ url += '?machine=' + str(machine_idx[machine])
+ url += '&benchmark=' + benchmark
+ url += '&key=' + key
+ html = '<a href="%s">%d</a>' % (url, count)
+ row.append(display.box(html))
+ matrix.append(row)
+ matrix.append(header_row)
- matrix = [header_row]
- for machine in machine_idx:
- row = [display.box(machine)]
- for benchmark in benchmarks:
- count = benchmark_data[benchmark].get(machine, None)
- if not count:
- row.append(display.box(None))
- continue
- key = benchmark_key[re.sub(r'\..*', '', benchmark)]
- url = 'machine_test_attribute_graph.cgi'
- url += '?machine=' + str(machine_idx[machine])
- url += '&benchmark=' + benchmark
- url += '&key=' + key
- html = '<a href="%s">%d</a>' % (url, count)
- row.append(display.box(html))
- matrix.append(row)
- matrix.append(header_row)
-
- display.print_table(matrix)
+ display.print_table(matrix)
main()
diff --git a/tko/machine_test_attribute_graph.cgi b/tko/machine_test_attribute_graph.cgi
index 2d5a4779..e9b39fe0 100755
--- a/tko/machine_test_attribute_graph.cgi
+++ b/tko/machine_test_attribute_graph.cgi
@@ -13,29 +13,29 @@ from autotest_lib.client.bin import kernel_versions
db = db.db()
def main():
- form = cgi.FieldStorage()
- machine_idx = form["machine"].value
- benchmark = form["benchmark"].value
- key = form["key"].value
+ form = cgi.FieldStorage()
+ machine_idx = form["machine"].value
+ benchmark = form["benchmark"].value
+ key = form["key"].value
- machine = frontend.machine.select(db, {'machine_idx' : machine_idx})[0]
+ machine = frontend.machine.select(db, {'machine_idx' : machine_idx})[0]
- data = {}
- where = { 'subdir' : benchmark, 'machine_idx' : machine.idx }
- for test in frontend.test.select(db, where):
- iterations = test.iterations()
- if iterations.has_key(key):
- data[test.kernel().printable] = iterations[key]
+ data = {}
+ where = { 'subdir' : benchmark, 'machine_idx' : machine.idx }
+ for test in frontend.test.select(db, where):
+ iterations = test.iterations()
+ if iterations.has_key(key):
+ data[test.kernel().printable] = iterations[key]
- # for kernel in sort_kernels(data.keys()):
- # print "%s %s" % (kernel, str(data[kernel]))
- title = "%s on %s" % (benchmark, machine.hostname)
- graph = plotgraph.gnuplot(title, 'Kernel', key, xsort = sort_kernels)
- graph.add_dataset('all kernels', data)
- graph.plot(cgi_header = True)
+ # for kernel in sort_kernels(data.keys()):
+ # print "%s %s" % (kernel, str(data[kernel]))
+ title = "%s on %s" % (benchmark, machine.hostname)
+ graph = plotgraph.gnuplot(title, 'Kernel', key, xsort = sort_kernels)
+ graph.add_dataset('all kernels', data)
+ graph.plot(cgi_header = True)
def sort_kernels(kernels):
- return sorted(kernels, key = kernel_versions.version_encode)
+ return sorted(kernels, key = kernel_versions.version_encode)
main()
diff --git a/tko/perf_graph.cgi b/tko/perf_graph.cgi
index 6fcfc08f..9fddfa2a 100644
--- a/tko/perf_graph.cgi
+++ b/tko/perf_graph.cgi
@@ -17,12 +17,12 @@ selected_jobs = set()
use_all_jobs = True
benchmark_main_metrics = {
- 'dbench' : 'throughput',
- 'kernbench': '1000/elapsed',
- 'membench' : 'sweeps',
- 'tbench' : 'throughput',
- 'unixbench': 'score',
- } # keep sync'd with similar table in perf_graphs.cgi
+ 'dbench' : 'throughput',
+ 'kernbench': '1000/elapsed',
+ 'membench' : 'sweeps',
+ 'tbench' : 'throughput',
+ 'unixbench': 'score',
+ } # keep sync'd with similar table in perf_graphs.cgi
usual_platforms = ['Icarus', 'Argo', 'Ilium', 'Warp19', 'Warp18', 'Unicorn']
@@ -30,201 +30,201 @@ date_unknown = datetime.datetime(2999, 12, 31, 23, 59, 59)
def get_all_kernel_names():
- # lookup all kernel names once, and edit for graph axis
- nrows = db.cur.execute('select kernel_idx, printable from kernels')
- for idx, name in db.cur.fetchall():
- sortname = kernel_versions.version_encode(name)
- name = name.replace('-smp-', '-') # boring, in all names
- name = name.replace('2.6.18-2', '2') # reduce clutter
- kernel_names[idx] = name
- kernel_dates[name] = date_unknown
- kernel_sortkeys[name] = sortname
+ # lookup all kernel names once, and edit for graph axis
+ nrows = db.cur.execute('select kernel_idx, printable from kernels')
+ for idx, name in db.cur.fetchall():
+ sortname = kernel_versions.version_encode(name)
+ name = name.replace('-smp-', '-') # boring, in all names
+ name = name.replace('2.6.18-2', '2') # reduce clutter
+ kernel_names[idx] = name
+ kernel_dates[name] = date_unknown
+ kernel_sortkeys[name] = sortname
def kname_to_sortkey(kname):
- # cached version of version_encode(uneditted name)
- return kernel_sortkeys[kname]
+ # cached version of version_encode(uneditted name)
+ return kernel_sortkeys[kname]
def sort_kernels(kernels):
- return sorted(kernels, key=kname_to_sortkey)
+ return sorted(kernels, key=kname_to_sortkey)
def get_all_platform_info():
- # lookup all machine/platform info once
- global selected_machines
- machs = []
- cmd = 'select machine_idx, machine_group from machines'
- if selected_machine_names:
- # convert 'a,b,c' to '("a","b","c")'
- hnames = selected_machine_names.split(',')
- hnames = ['"%s"' % name for name in hnames]
- cmd += ' where hostname in (%s)' % ','.join(hnames)
- nrows = db.cur.execute(cmd)
- for idx, platform in db.cur.fetchall():
- # ignore machine variations in 'mobo_memsize_disks'
- machine_to_platform[idx] = platform.split('_', 2)[0]
- machs.append(str(idx))
- if selected_machine_names:
- selected_machines = ','.join(machs)
+ # lookup all machine/platform info once
+ global selected_machines
+ machs = []
+ cmd = 'select machine_idx, machine_group from machines'
+ if selected_machine_names:
+ # convert 'a,b,c' to '("a","b","c")'
+ hnames = selected_machine_names.split(',')
+ hnames = ['"%s"' % name for name in hnames]
+ cmd += ' where hostname in (%s)' % ','.join(hnames)
+ nrows = db.cur.execute(cmd)
+ for idx, platform in db.cur.fetchall():
+ # ignore machine variations in 'mobo_memsize_disks'
+ machine_to_platform[idx] = platform.split('_', 2)[0]
+ machs.append(str(idx))
+ if selected_machine_names:
+ selected_machines = ','.join(machs)
def get_selected_jobs():
- global use_all_jobs
- use_all_jobs = not( one_user or selected_machine_names )
- if use_all_jobs:
- return
- needs = []
- if selected_machine_names:
- needs.append('machine_idx in (%s)' % selected_machines)
- if one_user:
- needs.append('username = "%s"' % one_user)
- cmd = 'select job_idx from jobs where %s' % ' and '.join(needs)
- nrows = db.cur.execute(cmd)
- for row in db.cur.fetchall():
- job_idx = row[0]
- selected_jobs.add(job_idx)
+ global use_all_jobs
+ use_all_jobs = not( one_user or selected_machine_names )
+ if use_all_jobs:
+ return
+ needs = []
+ if selected_machine_names:
+ needs.append('machine_idx in (%s)' % selected_machines)
+ if one_user:
+ needs.append('username = "%s"' % one_user)
+ cmd = 'select job_idx from jobs where %s' % ' and '.join(needs)
+ nrows = db.cur.execute(cmd)
+ for row in db.cur.fetchall():
+ job_idx = row[0]
+ selected_jobs.add(job_idx)
def identify_relevent_tests(benchmark, platforms):
- # Collect idx's for all whole-machine test runs of benchmark
- # Also collect earliest test dates of kernels used
+ # Collect idx's for all whole-machine test runs of benchmark
+ # Also collect earliest test dates of kernels used
cmd = 'select status_idx from status where word = "GOOD"'
nrows = db.cur.execute(cmd)
good_status = db.cur.fetchall()[0][0]
- tests = {}
- cmd = ( 'select test_idx, test, kernel_idx, machine_idx,'
- ' finished_time, job_idx, status from tests'
- ' where test like "%s%%"' % benchmark )
- if selected_machine_names:
- cmd += ' and machine_idx in (%s)' % selected_machines
- nrows = db.cur.execute(cmd)
- for row in db.cur.fetchall():
- (test_idx, tname, kernel_idx,
- machine_idx, date, job_idx, status) = row
- kname = kernel_names[kernel_idx]
- if date:
- kernel_dates[kname] = min(kernel_dates[kname], date)
- # omit test runs from failed runs
- # and from unwanted platforms
- # and from partial-machine container tests
- # and from unselected machines or users
- platform = machine_to_platform[machine_idx]
- if ( status == good_status and
- platform in platforms and
- tname.find('.twoway') < 0 and
- (use_all_jobs or job_idx in selected_jobs) ):
- tests.setdefault(platform, {})
- tests[platform].setdefault(kname, [])
- tests[platform][kname].append(test_idx)
- return tests
+ tests = {}
+ cmd = ( 'select test_idx, test, kernel_idx, machine_idx,'
+ ' finished_time, job_idx, status from tests'
+ ' where test like "%s%%"' % benchmark )
+ if selected_machine_names:
+ cmd += ' and machine_idx in (%s)' % selected_machines
+ nrows = db.cur.execute(cmd)
+ for row in db.cur.fetchall():
+ (test_idx, tname, kernel_idx,
+ machine_idx, date, job_idx, status) = row
+ kname = kernel_names[kernel_idx]
+ if date:
+ kernel_dates[kname] = min(kernel_dates[kname], date)
+ # omit test runs from failed runs
+ # and from unwanted platforms
+ # and from partial-machine container tests
+ # and from unselected machines or users
+ platform = machine_to_platform[machine_idx]
+ if ( status == good_status and
+ platform in platforms and
+ tname.find('.twoway') < 0 and
+ (use_all_jobs or job_idx in selected_jobs) ):
+ tests.setdefault(platform, {})
+ tests[platform].setdefault(kname, [])
+ tests[platform][kname].append(test_idx)
+ return tests
def prune_old_kernels():
- # reduce clutter of graph and improve lookup times by pruning away
- # older experimental kernels and oldest release-candidate kernels
- today = datetime.datetime.today()
- exp_cutoff = today - datetime.timedelta(weeks=7)
- rc_cutoff = today - datetime.timedelta(weeks=18)
- kernels_forgotten = set()
- for kname in kernel_dates:
- date = kernel_dates[kname]
- if ( date == date_unknown or
- (date < exp_cutoff and not kernel_versions.is_release_candidate(kname)) or
- (date < rc_cutoff and not kernel_versions.is_released_kernel(kname) ) ):
- kernels_forgotten.add(kname)
- return kernels_forgotten
+ # reduce clutter of graph and improve lookup times by pruning away
+ # older experimental kernels and oldest release-candidate kernels
+ today = datetime.datetime.today()
+ exp_cutoff = today - datetime.timedelta(weeks=7)
+ rc_cutoff = today - datetime.timedelta(weeks=18)
+ kernels_forgotten = set()
+ for kname in kernel_dates:
+ date = kernel_dates[kname]
+ if ( date == date_unknown or
+ (date < exp_cutoff and not kernel_versions.is_release_candidate(kname)) or
+ (date < rc_cutoff and not kernel_versions.is_released_kernel(kname) ) ):
+ kernels_forgotten.add(kname)
+ return kernels_forgotten
def get_metric_at_point(tests, metric):
- nruns = len(tests)
- if metric == 'good_testrun_count':
- return [nruns]
-
- # take subsamples from largest sets of test runs
- min_sample_size = 100 # enough to approx mean & std dev
- decimator = int(nruns / min_sample_size)
- if decimator > 1:
- tests = [tests[i] for i in xrange(0, nruns, decimator)]
- # have min_sample_size <= len(tests) < min_sample_size*2
-
- invert_scale = None
- if metric.find('/') > 0:
- invert_scale, metric = metric.split('/', 1)
- invert_scale = float(invert_scale)
- # 1/ gives simple inversion of times to rates,
- # 1000/ scales Y axis labels to nice integers
-
- if not tests:
- return []
- tests = ','.join(str(idx) for idx in tests)
- cmd = ( 'select value from iteration_result'
- ' where test_idx in (%s) and attribute = "%s"'
- % ( tests, metric) )
- nrows = db.cur.execute(cmd)
- vals = [row[0] for row in db.cur.fetchall()]
- if invert_scale:
- vals = [invert_scale/v for v in vals]
- return vals
+ nruns = len(tests)
+ if metric == 'good_testrun_count':
+ return [nruns]
+
+ # take subsamples from largest sets of test runs
+ min_sample_size = 100 # enough to approx mean & std dev
+ decimator = int(nruns / min_sample_size)
+ if decimator > 1:
+ tests = [tests[i] for i in xrange(0, nruns, decimator)]
+ # have min_sample_size <= len(tests) < min_sample_size*2
+
+ invert_scale = None
+ if metric.find('/') > 0:
+ invert_scale, metric = metric.split('/', 1)
+ invert_scale = float(invert_scale)
+ # 1/ gives simple inversion of times to rates,
+ # 1000/ scales Y axis labels to nice integers
+
+ if not tests:
+ return []
+ tests = ','.join(str(idx) for idx in tests)
+ cmd = ( 'select value from iteration_result'
+ ' where test_idx in (%s) and attribute = "%s"'
+ % ( tests, metric) )
+ nrows = db.cur.execute(cmd)
+ vals = [row[0] for row in db.cur.fetchall()]
+ if invert_scale:
+ vals = [invert_scale/v for v in vals]
+ return vals
def collect_test_results(possible_tests, kernels_forgotten, metric):
- # collect selected metric of all test results for covered
- # combo's of platform and kernel
- data = {}
- for platform in possible_tests:
- for kname in possible_tests[platform]:
- if kname in kernels_forgotten:
- continue
- vals = get_metric_at_point(
- possible_tests[platform][kname], metric)
- if vals:
- data.setdefault(platform, {})
- data[platform].setdefault(kname, [])
- data[platform][kname] += vals
- return data
+ # collect selected metric of all test results for covered
+ # combo's of platform and kernel
+ data = {}
+ for platform in possible_tests:
+ for kname in possible_tests[platform]:
+ if kname in kernels_forgotten:
+ continue
+ vals = get_metric_at_point(
+ possible_tests[platform][kname], metric)
+ if vals:
+ data.setdefault(platform, {})
+ data[platform].setdefault(kname, [])
+ data[platform][kname] += vals
+ return data
def one_performance_graph(benchmark, metric=None, one_platform=None):
- # generate image of graph of one benchmark's performance over
- # most kernels (X axis) and all machines (one plotline per type)
- if one_platform:
- platforms = [one_platform]
- else:
- platforms = usual_platforms
- if not benchmark:
- benchmark = 'dbench'
- if not metric:
- metric = benchmark_main_metrics[benchmark]
-
- get_all_kernel_names()
- get_all_platform_info()
- get_selected_jobs()
- possible_tests = identify_relevent_tests(benchmark, platforms)
- kernels_forgotten = prune_old_kernels()
- data = collect_test_results(possible_tests, kernels_forgotten, metric)
-
- if data.keys():
- title = benchmark.capitalize()
- if one_user:
- title += " On %s's Runs" % one_user
- if selected_machine_names:
- title += " On Selected Machines " + selected_machine_names
- else:
- title += " Over All Machines"
- graph = plotgraph.gnuplot(title, 'Kernels',
- metric.capitalize(), xsort=sort_kernels,
- size='1000,600' )
- for platform in platforms:
- if platform in data:
- graph.add_dataset(platform, data[platform])
- graph.plot(cgi_header = True)
- else:
- # graph has no data; avoid plotgraph and Apache complaints
- print "Content-type: image/gif\n"
- print file("blank.gif", "rb").read()
+ # generate image of graph of one benchmark's performance over
+ # most kernels (X axis) and all machines (one plotline per type)
+ if one_platform:
+ platforms = [one_platform]
+ else:
+ platforms = usual_platforms
+ if not benchmark:
+ benchmark = 'dbench'
+ if not metric:
+ metric = benchmark_main_metrics[benchmark]
+
+ get_all_kernel_names()
+ get_all_platform_info()
+ get_selected_jobs()
+ possible_tests = identify_relevent_tests(benchmark, platforms)
+ kernels_forgotten = prune_old_kernels()
+ data = collect_test_results(possible_tests, kernels_forgotten, metric)
+
+ if data.keys():
+ title = benchmark.capitalize()
+ if one_user:
+ title += " On %s's Runs" % one_user
+ if selected_machine_names:
+ title += " On Selected Machines " + selected_machine_names
+ else:
+ title += " Over All Machines"
+ graph = plotgraph.gnuplot(title, 'Kernels',
+ metric.capitalize(), xsort=sort_kernels,
+ size='1000,600' )
+ for platform in platforms:
+ if platform in data:
+ graph.add_dataset(platform, data[platform])
+ graph.plot(cgi_header = True)
+ else:
+ # graph has no data; avoid plotgraph and Apache complaints
+ print "Content-type: image/gif\n"
+ print file("blank.gif", "rb").read()
cgitb.enable()
@@ -235,7 +235,7 @@ metric = form.getvalue('metric', None)
one_user = form.getvalue('user', '')
selected_machine_names = form.getvalue('machines', '')
if selected_machine_names == 'yinghans':
- selected_machine_names = 'ipbj8,prik6,bdcz12'
+ selected_machine_names = 'ipbj8,prik6,bdcz12'
db = db.db()
one_performance_graph(benchmark, metric, one_platform)
diff --git a/tko/perf_graphs.cgi b/tko/perf_graphs.cgi
index 45954eab..002a6032 100644
--- a/tko/perf_graphs.cgi
+++ b/tko/perf_graphs.cgi
@@ -4,46 +4,46 @@
import cgi, cgitb
benchmark_main_metrics = {
- 'dbench' : 'throughput',
- 'kernbench': '1000/elapsed',
- 'membench' : 'sweeps',
- 'tbench' : 'throughput',
- 'unixbench': 'score',
- } # keep sync'd with similar table in perf_graph.cgi
+ 'dbench' : 'throughput',
+ 'kernbench': '1000/elapsed',
+ 'membench' : 'sweeps',
+ 'tbench' : 'throughput',
+ 'unixbench': 'score',
+ } # keep sync'd with similar table in perf_graph.cgi
def multiple_graphs_page():
- # Generate html for web page showing graphs for all benchmarks
- # Each graph image is formed by an invocation of 2nd cgi file
- print "Content-Type: text/html\n"
- print "<html><body>"
- print "<h3> All kernel performance benchmark runs"
- if one_user:
- print "by user", one_user
- if machine_names:
- print ", on selected"
- else:
- print ", on all test"
- print "machines </h3>"
- if one_user != 'yinghan':
- print "Uncontrolled results!"
- print "Not using just the controlled benchmarking machines."
- print "All variants of a platform type (mem size, # disks, etc) are"
- print "lumped together."
- print "Non-default test args may have been applied in some cases."
- print "No-container cases and whole-machine single-container cases"
- print "are lumped together."
- for bench in benchmark_main_metrics:
- print "<h2>", bench.capitalize(), ": </h2>"
- args = ['benchmark=%s' % bench]
- if one_user:
- args.append('user=%s' % one_user)
- if one_platform:
- args.append('platform=%s' % one_platform)
- if machine_names:
- args.append('machines=%s' % machine_names)
- print "<img src='perf_graph.cgi?%s'>" % '&'.join(args)
- print "</body></html>"
+ # Generate html for web page showing graphs for all benchmarks
+ # Each graph image is formed by an invocation of 2nd cgi file
+ print "Content-Type: text/html\n"
+ print "<html><body>"
+ print "<h3> All kernel performance benchmark runs"
+ if one_user:
+ print "by user", one_user
+ if machine_names:
+ print ", on selected"
+ else:
+ print ", on all test"
+ print "machines </h3>"
+ if one_user != 'yinghan':
+ print "Uncontrolled results!"
+ print "Not using just the controlled benchmarking machines."
+ print "All variants of a platform type (mem size, # disks, etc) are"
+ print "lumped together."
+ print "Non-default test args may have been applied in some cases."
+ print "No-container cases and whole-machine single-container cases"
+ print "are lumped together."
+ for bench in benchmark_main_metrics:
+ print "<h2>", bench.capitalize(), ": </h2>"
+ args = ['benchmark=%s' % bench]
+ if one_user:
+ args.append('user=%s' % one_user)
+ if one_platform:
+ args.append('platform=%s' % one_platform)
+ if machine_names:
+ args.append('machines=%s' % machine_names)
+ print "<img src='perf_graph.cgi?%s'>" % '&'.join(args)
+ print "</body></html>"
cgitb.enable()
diff --git a/tko/plotgraph b/tko/plotgraph
index ecd33bdd..21de62bc 100755
--- a/tko/plotgraph
+++ b/tko/plotgraph
@@ -14,64 +14,64 @@ plotgraph($file, $y_label, $columns, $title);
# First column must be kernel count, second kernel version, third is job number
# $columns spec is 1:y-value:y-stddev
sub plotgraph {
- my ($file, $y_label, $columns, $title) = @_;
- my @xtics;
+ my ($file, $y_label, $columns, $title) = @_;
+ my @xtics;
- if (!$title) {
- $title = $file;
- $title =~ s#.*/##;
- }
- open (INDATA, $file);
- open (DATA_MAIN, "> ${file}.main");
- open (DATA_MM, "> ${file}.mm");
- open (DATA_OTHER, "> ${file}.other");
- my $count;
- while ($data = <INDATA>) {
- chomp $data;
- ($count, my $version, my $job) = split (/\s+/, $data);
- $short_ver = $version;
- $short_ver =~ s/\+.*/+p$job/;
- push @xtics, "\"$short_ver\" $count";
- if ($version =~ /^2\.\d+\.\d+(\.\d+|-rc\d+)?(-git\d+)?$/) {
- print DATA_MAIN "$data\n";
- $plot_main = "\"${file}.main\" using $columns title \"mainline\"";
- } elsif ($version =~ /^2\.\d+\.\d+(-rc\d+)?-mm\d+$/) {
- print DATA_MM "$data\n";
- $plot_mm = "\"${file}.mm\" using $columns title \"-mm\"";
- } else {
- print DATA_OTHER "$data\n";
- $plot_other = "\"${file}.other\" using $columns title \"other\"";
- }
- }
- close (INDATA);
- close (DATA_MAIN);
- close (DATA_MM);
- close (DATA_OTHER);
+ if (!$title) {
+ $title = $file;
+ $title =~ s#.*/##;
+ }
+ open (INDATA, $file);
+ open (DATA_MAIN, "> ${file}.main");
+ open (DATA_MM, "> ${file}.mm");
+ open (DATA_OTHER, "> ${file}.other");
+ my $count;
+ while ($data = <INDATA>) {
+ chomp $data;
+ ($count, my $version, my $job) = split (/\s+/, $data);
+ $short_ver = $version;
+ $short_ver =~ s/\+.*/+p$job/;
+ push @xtics, "\"$short_ver\" $count";
+ if ($version =~ /^2\.\d+\.\d+(\.\d+|-rc\d+)?(-git\d+)?$/) {
+ print DATA_MAIN "$data\n";
+ $plot_main = "\"${file}.main\" using $columns title \"mainline\"";
+ } elsif ($version =~ /^2\.\d+\.\d+(-rc\d+)?-mm\d+$/) {
+ print DATA_MM "$data\n";
+ $plot_mm = "\"${file}.mm\" using $columns title \"-mm\"";
+ } else {
+ print DATA_OTHER "$data\n";
+ $plot_other = "\"${file}.other\" using $columns title \"other\"";
+ }
+ }
+ close (INDATA);
+ close (DATA_MAIN);
+ close (DATA_MM);
+ close (DATA_OTHER);
- die unless ($count > 0);
- $x_res = $count * 12;
- $y_res = 900;
- push @plots, $plot_main if ($plot_main);
- push @plots, $plot_mm if ($plot_mm);
- push @plots, $plot_other if ($plot_other);
- $plots = join (',', @plots);
+ die unless ($count > 0);
+ $x_res = $count * 12;
+ $y_res = 900;
+ push @plots, $plot_main if ($plot_main);
+ push @plots, $plot_mm if ($plot_mm);
+ push @plots, $plot_other if ($plot_other);
+ $plots = join (',', @plots);
- open (GNUPLOT, "> ${file}.gnuplot");
- # print "MACHINE: $machine\n";
- print GNUPLOT "set terminal png size $x_res,$y_res\n";
- print GNUPLOT "set key below\n";
- print GNUPLOT "set title \"$title\"\n";
- print GNUPLOT "set xlabel \"Kernel\"\n";
- print GNUPLOT "set ylabel \"${y_label}\"\n";
- print GNUPLOT "set output \"${file}.png\"\n";
- print GNUPLOT "set style data yerrorlines\n";
- print GNUPLOT "set grid\n";
- $xtics = join ',', @xtics;
- print GNUPLOT "\nset xtics rotate (${xtics})\n\n";
- print GNUPLOT "plot $plots\n";
- print GNUPLOT "replot";
- close (GNUPLOT);
- `/usr/bin/gnuplot ${file}.gnuplot`;
- `chmod 644 ${file}.gnuplot`;
- `chmod 644 ${file}.png`;
+ open (GNUPLOT, "> ${file}.gnuplot");
+ # print "MACHINE: $machine\n";
+ print GNUPLOT "set terminal png size $x_res,$y_res\n";
+ print GNUPLOT "set key below\n";
+ print GNUPLOT "set title \"$title\"\n";
+ print GNUPLOT "set xlabel \"Kernel\"\n";
+ print GNUPLOT "set ylabel \"${y_label}\"\n";
+ print GNUPLOT "set output \"${file}.png\"\n";
+ print GNUPLOT "set style data yerrorlines\n";
+ print GNUPLOT "set grid\n";
+ $xtics = join ',', @xtics;
+ print GNUPLOT "\nset xtics rotate (${xtics})\n\n";
+ print GNUPLOT "plot $plots\n";
+ print GNUPLOT "replot";
+ close (GNUPLOT);
+ `/usr/bin/gnuplot ${file}.gnuplot`;
+ `chmod 644 ${file}.gnuplot`;
+ `chmod 644 ${file}.png`;
}
diff --git a/tko/query_history.cgi b/tko/query_history.cgi
index b947140e..85052937 100755
--- a/tko/query_history.cgi
+++ b/tko/query_history.cgi
@@ -8,56 +8,56 @@ uid = unique_cookie.unique_id('tko_history')
def body():
- db_obj = db.db()
- condition = "uid='%s'" % uid
- where = (condition,[])
- try:
- rows = db_obj.select("time_created,user_comment,url",
- "query_history", where)
- except MySQLdb.ProgrammingError, err:
- print err
- rows = ()
- print '<table border="1">'
- ## Display history starting with the most recent queries
- for row in reversed(rows):
- (time_created, user_comment, tko_url) = row
- print '<tr>'
- print '<td>&nbsp;%s&nbsp;</td>' % time_created
- print '<td>&nbsp;%s&nbsp;</td>' % user_comment
- dict_url = {'delete':time_created}
- link = 'save_query.cgi?' + urllib.urlencode(dict_url)
- print '<td>&nbsp;<a href="%s">Delete</a>&nbsp;</td>' % link
- print '<td><a href="%s">%s</a></td>' % (tko_url, tko_url)
- print '</tr>'
- print '</table>'
+ db_obj = db.db()
+ condition = "uid='%s'" % uid
+ where = (condition,[])
+ try:
+ rows = db_obj.select("time_created,user_comment,url",
+ "query_history", where)
+ except MySQLdb.ProgrammingError, err:
+ print err
+ rows = ()
+ print '<table border="1">'
+ ## Display history starting with the most recent queries
+ for row in reversed(rows):
+ (time_created, user_comment, tko_url) = row
+ print '<tr>'
+ print '<td>&nbsp;%s&nbsp;</td>' % time_created
+ print '<td>&nbsp;%s&nbsp;</td>' % user_comment
+ dict_url = {'delete':time_created}
+ link = 'save_query.cgi?' + urllib.urlencode(dict_url)
+ print '<td>&nbsp;<a href="%s">Delete</a>&nbsp;</td>' % link
+ print '<td><a href="%s">%s</a></td>' % (tko_url, tko_url)
+ print '</tr>'
+ print '</table>'
- last_recorded_query = ''
- if rows:
- (time_created, user_comment, last_recorded_query) = rows[-1]
- ## Link "Back to Autotest" on query history page
- back_link = os.environ.get('HTTP_REFERER')
- ## possible complications:
- ## a) HTTP_REFERER = None
- ## b) HTTP_REFERER is save_query page
- ## In both cases we still want to get to tko results.
- ## primary fall back: link to last_recorded_query
- ## secondary fall back: link to opening tko page
- if not "compose_query.cgi" in str(back_link):
- back_link = last_recorded_query
- if not back_link: ## e.g. history is empty and/or HTTP_REFERER unknown
- back_link = "compose_query.cgi"
- print '<br><a href="%s">Autotest Results</a><br>' % back_link
+ last_recorded_query = ''
+ if rows:
+ (time_created, user_comment, last_recorded_query) = rows[-1]
+ ## Link "Back to Autotest" on query history page
+ back_link = os.environ.get('HTTP_REFERER')
+ ## possible complications:
+ ## a) HTTP_REFERER = None
+ ## b) HTTP_REFERER is save_query page
+ ## In both cases we still want to get to tko results.
+ ## primary fall back: link to last_recorded_query
+ ## secondary fall back: link to opening tko page
+ if not "compose_query.cgi" in str(back_link):
+ back_link = last_recorded_query
+ if not back_link: ## e.g. history is empty and/or HTTP_REFERER unknown
+ back_link = "compose_query.cgi"
+ print '<br><a href="%s">Autotest Results</a><br>' % back_link
def main():
- print "Content-type: text/html\n"
- print
- # create the actual page
- print '<html><head><title>'
- print 'History of TKO usage'
- print '</title></head><body>'
- body()
- print '</body></html>'
+ print "Content-type: text/html\n"
+ print
+ # create the actual page
+ print '<html><head><title>'
+ print 'History of TKO usage'
+ print '</title></head><body>'
+ body()
+ print '</body></html>'
main()
diff --git a/tko/save_query.cgi b/tko/save_query.cgi
index 78d65ea9..04db50bc 100755
--- a/tko/save_query.cgi
+++ b/tko/save_query.cgi
@@ -6,75 +6,75 @@ import db, unique_cookie
## setting script globals
form = cgi.FieldStorage()
if 'label' in form.keys():
- comment = form['label'].value
+ comment = form['label'].value
else:
- comment = ''
+ comment = ''
dict_url = {}
for key in form.keys():
- dict_url[key] = form[key].value
+ dict_url[key] = form[key].value
tm = time.asctime()
uid = unique_cookie.unique_id('tko_history')
HTTP_REFERER = os.environ.get('HTTP_REFERER')
if HTTP_REFERER is None:
- ## fall back strategy for proxy connection
- ## substitute relative url
- HTTP_REFERER = 'compose_query.cgi?' + urllib.urlencode(dict_url)
+ ## fall back strategy for proxy connection
+ ## substitute relative url
+ HTTP_REFERER = 'compose_query.cgi?' + urllib.urlencode(dict_url)
class QueryHistoryError(Exception):
- pass
+ pass
def log_query():
- db_obj = db.db()
- data_to_insert = {'uid':uid, 'time_created':tm,
- 'user_comment':comment, 'url':HTTP_REFERER }
- try:
- db_obj.insert('query_history', data_to_insert)
- except:
- raise QueryHistoryError("Could not save query")
+ db_obj = db.db()
+ data_to_insert = {'uid':uid, 'time_created':tm,
+ 'user_comment':comment, 'url':HTTP_REFERER }
+ try:
+ db_obj.insert('query_history', data_to_insert)
+ except:
+ raise QueryHistoryError("Could not save query")
def delete_query(time_stamp):
- ## query is marked for delete by time stamp
- db_obj = db.db()
- data_to_delete = {'time_created':time_stamp}
- try:
- db_obj.delete('query_history', data_to_delete)
- except Exception:
- raise QueryHistoryError("Could not delete query")
-
+ ## query is marked for delete by time stamp
+ db_obj = db.db()
+ data_to_delete = {'time_created':time_stamp}
+ try:
+ db_obj.delete('query_history', data_to_delete)
+ except Exception:
+ raise QueryHistoryError("Could not delete query")
+
def body():
- if not 'delete' in dict_url.keys():
- log_query()
- print '<b>%s</b><br><br>' % "Your query has been saved"
- print 'time: %s<br>' % tm
- print 'comments: %s<br><br>' % comment
- else:
- ## key 'delete' has arg value of time_stamp
- ## which identifies the query to be deleted
- time_stamp = dict_url['delete']
- delete_query(time_stamp)
- print '<b>%s</b><br><br>' % "Your query has been deleted"
-
- print '<a href="query_history.cgi">View saved queries</a>&nbsp;&nbsp;'
- print '<br><br>'
- if not 'delete' in dict_url.keys():
- print '<a href="%s">Back to Autotest</a><br>' % HTTP_REFERER
- else:
- print '<a href="compose_query.cgi">Autotest Results</a><br>'
+ if not 'delete' in dict_url.keys():
+ log_query()
+ print '<b>%s</b><br><br>' % "Your query has been saved"
+ print 'time: %s<br>' % tm
+ print 'comments: %s<br><br>' % comment
+ else:
+ ## key 'delete' has arg value of time_stamp
+ ## which identifies the query to be deleted
+ time_stamp = dict_url['delete']
+ delete_query(time_stamp)
+ print '<b>%s</b><br><br>' % "Your query has been deleted"
+
+ print '<a href="query_history.cgi">View saved queries</a>&nbsp;&nbsp;'
+ print '<br><br>'
+ if not 'delete' in dict_url.keys():
+ print '<a href="%s">Back to Autotest</a><br>' % HTTP_REFERER
+ else:
+ print '<a href="compose_query.cgi">Autotest Results</a><br>'
def main():
- print "Content-type: text/html\n"
- print '<html><head><title>'
- print '</title></head>'
- print '<body>'
- body()
- print '</body>'
- print '</html>'
+ print "Content-type: text/html\n"
+ print '<html><head><title>'
+ print '</title></head>'
+ print '<body>'
+ body()
+ print '</body>'
+ print '</html>'
main()
diff --git a/tko/test.cgi b/tko/test.cgi
index a1597ae4..4b4f6688 100755
--- a/tko/test.cgi
+++ b/tko/test.cgi
@@ -15,53 +15,53 @@ from autotest_lib.tko import db, display, frontend
db = db.db()
def main():
- display.print_main_header()
-
- form = cgi.FieldStorage()
+ display.print_main_header()
+
+ form = cgi.FieldStorage()
- if form.has_key('sql'):
- sql = form['sql'].value
+ if form.has_key('sql'):
+ sql = form['sql'].value
- if form.has_key('values'):
- values = [val for val in form['values'].value.split(',')]
+ if form.has_key('values'):
+ values = [val for val in form['values'].value.split(',')]
- if not sql:
- return
- if not values:
- return
+ if not sql:
+ return
+ if not values:
+ return
- tests = frontend.test.select_sql(db, sql, values)
+ tests = frontend.test.select_sql(db, sql, values)
- # get the list of tests/machines to populate the row and column header.
- testname = [test.testname for test in tests]
- machine_idx = [test.machine_idx for test in tests]
+ # get the list of tests/machines to populate the row and column header.
+ testname = [test.testname for test in tests]
+ machine_idx = [test.machine_idx for test in tests]
- # We dont want repetitions in the table row/column headers,
- # so eliminate the dups.
- uniq_test = list(set(testname))
- uniq_machine_idx = list(set(machine_idx))
+ # We dont want repetitions in the table row/column headers,
+ # so eliminate the dups.
+ uniq_test = list(set(testname))
+ uniq_machine_idx = list(set(machine_idx))
- header_row = [ display.box('', header = True) ]
- for test_name in uniq_test:
- header_row.append(display.box(test_name, header=True))
- matrix = [header_row]
- for machine in uniq_machine_idx:
- mach_name = db.select_sql('hostname', 'machines',
- ' where machine_idx=%s', [str(machine)])
- row = [display.box(mach_name[0][0])]
- for test_name in uniq_test:
- testlist = [test for test in tests
- if test.machine_idx == machine
- and test.testname == test_name]
- # url link to the first test.
- # TODO: provide another level to show the different
- # test results.
- link = None
- if testlist and testlist[0]:
- link = testlist[0].url
- box = display.status_count_box(db, testlist, link=link)
- row.append(box)
- matrix.append(row)
- display.print_table(matrix)
+ header_row = [ display.box('', header = True) ]
+ for test_name in uniq_test:
+ header_row.append(display.box(test_name, header=True))
+ matrix = [header_row]
+ for machine in uniq_machine_idx:
+ mach_name = db.select_sql('hostname', 'machines',
+ ' where machine_idx=%s', [str(machine)])
+ row = [display.box(mach_name[0][0])]
+ for test_name in uniq_test:
+ testlist = [test for test in tests
+ if test.machine_idx == machine
+ and test.testname == test_name]
+ # url link to the first test.
+ # TODO: provide another level to show the different
+ # test results.
+ link = None
+ if testlist and testlist[0]:
+ link = testlist[0].url
+ box = display.status_count_box(db, testlist, link=link)
+ row.append(box)
+ matrix.append(row)
+ display.print_table(matrix)
main()