diff options
author | Milosz Wasilewski <milosz.wasilewski@linaro.org> | 2014-03-25 13:15:28 +0000 |
---|---|---|
committer | Milosz Wasilewski <milosz.wasilewski@linaro.org> | 2014-03-25 13:15:28 +0000 |
commit | c4b87999efcfc9d7de0bea0bf2355dac7a9349d6 (patch) | |
tree | 62841c42b51f11bce461db514a975159cd7ec655 | |
parent | 53491ed2c61161f99980b45d06ffc3f7b1cc17ac (diff) |
Postprocessing: removed the list of possible metrics
All metrics collected in the database are now presented in the LAVA
results. This was done due to the fact that the 3rd party benchmarks
produce metrics with no standard name. As a bonus temperature data
is logged in LAVA.
Signed-off-by: Milosz Wasilewski <milosz.wasilewski@linaro.org>
-rwxr-xr-x | postprocessing.py | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/postprocessing.py b/postprocessing.py index 4995ea1..3930ed8 100755 --- a/postprocessing.py +++ b/postprocessing.py @@ -29,15 +29,13 @@ from optparse import OptionParser SQLITE_DB_GLOBAL_PATH = "/root/db_results/" modes_prepares_statement = "select distinct(id) from workload_specs;" -metric_prepared_statement = """SELECT label as workload, metric, value, units, wsr.spec_id +metric_prepared_statement = """ +SELECT label as workload, metric, value, units, wsr.spec_id FROM metrics AS m INNER JOIN ( SELECT ws.OID as spec_oid, ws.id as spec_id, uuid, label FROM workload_specs AS ws INNER JOIN runs AS r ON ws.run_oid = r.OID ) AS wsr ON wsr.spec_oid = m.spec_oid -WHERE metric = ? AND spec_id = ?;""" - -metrics_common = ['execution_time', 'arm,vexpress-energy A7 Jcore', 'arm,vexpress-energy A15 Jcore'] -metrics_bbench = ['Performance'] +WHERE spec_id = ?;""" if __name__ == '__main__': @@ -52,23 +50,27 @@ if __name__ == '__main__': conn = sqlite3.connect(SQLITE_DB_GLOBAL_PATH + options.job_name + ".db") modes_cursor = conn.cursor() + metrics = {} for mode_row in modes_cursor.execute(modes_prepares_statement): - metrics_local = deepcopy(metrics_common) - if 'bbench' in mode_row[0]: - metrics_local = metrics_local + metrics_bbench - for metric in metrics_local: - params = (metric, mode_row[0]) - metric_values = [] - metric_cursor = conn.cursor() - units = "" - for metric_row in metric_cursor.execute(metric_prepared_statement, params): - metric_values.append(float(metric_row[2])) - units = metric_row[3] - print "%s_%s Measurement:%s Units:%s Result:pass" % ( - mode_row[0], - metric.replace(" ", "_").replace(",", "_"), - mean(array(metric_values)), - units) - metric_cursor.close() + metric_cursor = conn.cursor() + metrics[mode_row[0]] = {} + for metric_row in metric_cursor.execute( + metric_prepared_statement, + (mode_row[0], )): + if metric_row[2] in metrics[mode_row[0]]: + metrics[mode_row[0]][metric_row[2]][VALUE].append( + float(metric_row[3])) + else: + metrics[mode_row[0]][metric_row[2]] = { + VALUE: [float(metric_row[3])], + UNIT: metric_row[4]} + metric_cursor.close() modes_cursor.close() conn.close() + for key in metrics.iterkeys(): + for metric in metrics[key].iterkeys(): + print "%s_%s Measurement:%s Units:%s Result:pass" % ( + key, + metric.replace(" ", "_").replace(",", "_"), + mean(array(metrics[key][metric][VALUE])), + metrics[key][metric][UNIT]) |