aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell/lib/perf_metric_validation.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell/lib/perf_metric_validation.py')
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py31
1 files changed, 27 insertions, 4 deletions
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 81bd2bf38b67..3c3a9b4f8b82 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -12,7 +12,7 @@ class Validator:
self.reportfname = reportfname
self.rules = None
self.collectlist=metrics
- self.metrics = set()
+ self.metrics = set(metrics)
self.tolerance = t
self.workloads = [x for x in workload.split(",") if x]
@@ -148,6 +148,7 @@ class Validator:
self.errlist.append("Metric '%s' is not collected"%(name))
elif val < 0:
negmetric.add("{0}(={1:.4f})".format(name, val))
+ self.collectlist[0].append(name)
else:
pcnt += 1
tcnt += 1
@@ -266,6 +267,7 @@ class Validator:
passcnt += 1
else:
faillist.append({'MetricName':m['Name'], 'CollectedValue':result})
+ self.collectlist[0].append(m['Name'])
self.totalcnt += totalcnt
self.passedcnt += passcnt
@@ -348,7 +350,7 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = set(metrics)
+ collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
for idx, metrics in collectlist.items():
if idx == 0: wl = "sleep 0.5".split()
@@ -356,9 +358,12 @@ class Validator:
for metric in metrics:
command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
command.extend(wl)
+ print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
self.convert(data, idx)
+ self.collectlist = dict()
+ self.collectlist[0] = list()
# End of Collector and Converter
# Start of Rule Generator
@@ -386,6 +391,20 @@ class Validator:
return
+ def remove_unsupported_rules(self, rules, skiplist: set = None):
+ for m in skiplist:
+ self.metrics.discard(m)
+ new_rules = []
+ for rule in rules:
+ add_rule = True
+ for m in rule["Metrics"]:
+ if m["Name"] not in self.metrics:
+ add_rule = False
+ break
+ if add_rule:
+ new_rules.append(rule)
+ return new_rules
+
def create_rules(self):
"""
Create full rules which includes:
@@ -394,7 +413,10 @@ class Validator:
Reindex all the rules to avoid repeated RuleIndex
"""
- self.rules = self.read_json(self.rulefname)['RelationshipRules']
+ data = self.read_json(self.rulefname)
+ rules = data['RelationshipRules']
+ skiplist = set(data['SkipList'])
+ self.rules = self.remove_unsupported_rules(rules, skiplist)
pctgrule = {'RuleIndex':0,
'TestType':'SingleMetricTest',
'RangeLower':'0',
@@ -453,7 +475,8 @@ class Validator:
The final report is written into a JSON file.
'''
- self.parse_perf_metrics()
+ if not self.collectlist:
+ self.parse_perf_metrics()
self.create_rules()
for i in range(0, len(self.workloads)):
self._init_data()