aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPrathamesh Kulkarni <prathamesh.kulkarni@linaro.org>2022-06-15 18:25:06 +0530
committerPrathamesh Kulkarni <prathamesh.kulkarni@linaro.org>2022-06-15 18:25:06 +0530
commitef48d60a611436eebab5cf781bc1f207638a1053 (patch)
tree4db3a3b7830a55cb7f611bfa59c9fddef3c65619
parent6a38f36d43338683409aa4554f0ddc4b31cf2e59 (diff)
Handle case where exe is not found.
Change-Id: I59dbcbc7916172fa376169877f18e5cf1b6de6c1
-rwxr-xr-xmd5sum-data-to-csv.py4
-rw-r--r--metric_utils.py7
-rwxr-xr-xnm-data-to-csv.py8
3 files changed, 16 insertions, 3 deletions
diff --git a/md5sum-data-to-csv.py b/md5sum-data-to-csv.py
index bae835a..de4b207 100755
--- a/md5sum-data-to-csv.py
+++ b/md5sum-data-to-csv.py
@@ -40,6 +40,10 @@ def main():
benchmarks = metric_utils.get_benchmarks_from_results_dir(results_dir)
for bmk in benchmarks:
+ # Skip entries for bmk if exe is not found.
+ if bmk.exe_path is None:
+ continue
+
sym_to_md5sum = calculate_md5sum(bmk.exe_path)
for symbol in sym_to_md5sum.keys():
long_symbol = "[.] " + symbol
diff --git a/metric_utils.py b/metric_utils.py
index 4190ef2..a0975cb 100644
--- a/metric_utils.py
+++ b/metric_utils.py
@@ -19,7 +19,7 @@ class Benchmark:
def exe_path(self):
exe_paths = glob.glob("{0}/perf.*.data/.debug/**/{1}/**/elf".format(self._node_path, self.name), recursive=True)
if len(exe_paths) == 0:
- raise AssertionError("For {0}, no exe found".format(self.name))
+ return None
elif len(exe_paths) > 1:
raise AssertionError("Multiple paths found for {0}: {1}".format(self.name, exe_paths))
assert len(exe_paths) == 1
@@ -27,7 +27,10 @@ class Benchmark:
@property
def exe_name(self):
- return self.exe_path.split("/")[-3]
+ if self.exe_path is not None:
+ return self.exe_path.split("/")[-3]
+ # FIXME: How to obtain exe name without path ?
+ return "null"
@property
def libs(self):
diff --git a/nm-data-to-csv.py b/nm-data-to-csv.py
index 0cedb60..ad890d7 100755
--- a/nm-data-to-csv.py
+++ b/nm-data-to-csv.py
@@ -50,9 +50,15 @@ def main():
benchmarks = metric_utils.get_benchmarks_from_results_dir(results_dir)
for bmk in benchmarks:
+ exe_path = bmk.exe_path
+ # Skip calculating size, if exe is not found.
+ if exe_path is None:
+ csvwriter.writerow((bmk.name, bmk.exe_name, 0))
+ continue
+
# exes holds the main exe and libs used by benchmark.
exes = []
- exes.append(Executable(bmk.exe_name, bmk.exe_path))
+ exes.append(Executable(bmk.exe_name, exe_path))
libs = bmk.libs
for libname in libs.keys():