summaryrefslogtreecommitdiff
path: root/automated
diff options
context:
space:
mode:
authorChase Qi <chase.qi@linaro.org>2016-11-18 15:25:02 +0000
committerLinaro Code Review <review@review.linaro.org>2016-11-18 15:25:02 +0000
commit48fbb8bf1d72cb3a1fd3a92725c4a8020839cf5b (patch)
treeca127c86377c0c37975366141f58620ec6755faf /automated
parent89e5357d822cb0ec9e1fd582a070ea54df757be1 (diff)
parent2fea70e2a9f2937d9932ca7fee86162edbbe3beb (diff)
Merge "automated: test-runner: added option to record manual results"
Diffstat (limited to 'automated')
-rwxr-xr-xautomated/utils/test-runner.py259
1 files changed, 213 insertions, 46 deletions
diff --git a/automated/utils/test-runner.py b/automated/utils/test-runner.py
index cfa032e..c79c932 100755
--- a/automated/utils/test-runner.py
+++ b/automated/utils/test-runner.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import argparse
import csv
+import cmd
import json
import logging
import os
@@ -34,7 +35,7 @@ class TestPlan(object):
self.skip_install = args.skip_install
self.logger = logging.getLogger('RUNNER.TestPlan')
- def test_list(self):
+ def test_list(self, kind="automated"):
if self.test_def:
if not os.path.exists(self.test_def):
self.logger.error(' %s NOT found, exiting...' % self.test_def)
@@ -52,7 +53,14 @@ class TestPlan(object):
with open(self.test_plan, 'r') as f:
test_plan = yaml.safe_load(f)
try:
- test_list = test_plan['requirements']['tests']['automated']
+ test_list = []
+ for requirement in test_plan['requirements']:
+ if 'tests' in requirement.keys():
+ if requirement['tests'] and \
+ kind in requirement['tests'].keys() and \
+ requirement['tests'][kind]:
+ for test in requirement['tests'][kind]:
+ test_list.append(test)
for test in test_list:
test['uuid'] = str(uuid4())
except KeyError as e:
@@ -73,10 +81,12 @@ class TestSetup(object):
def __init__(self, test, args):
self.output = os.path.realpath(args.output)
self.test_name = os.path.splitext(test['path'].split('/')[-1])[0]
+ self.repo_test_path = test['path']
self.uuid = test['uuid']
self.test_uuid = self.test_name + '_' + self.uuid
self.test_path = os.path.join(self.output, self.test_uuid)
self.logger = logging.getLogger('RUNNER.TestSetup')
+ self.test_kind = args.kind
def validate_env(self):
# Inspect if environment set properly.
@@ -95,8 +105,16 @@ class TestSetup(object):
def copy_test_repo(self):
self.validate_env()
shutil.rmtree(self.test_path, ignore_errors=True)
- shutil.copytree(self.repo_path, self.test_path, symlinks=True)
- self.logger.info('Test repo copied to: %s' % self.test_path)
+ if self.test_kind == 'manual':
+ test_dir_path = os.path.join(self.repo_path, self.repo_test_path.rsplit("/", 1)[0])
+ shutil.copytree(test_dir_path, self.test_path, symlinks=True)
+ self.logger.info('Test copied to: %s' % self.test_path)
+ else:
+ if self.repo_path in self.test_path:
+ self.logger.error("Cannot copy repository into itself. Please choose output directory outside repository path")
+ sys.exit(1)
+ shutil.copytree(self.repo_path, self.test_path, symlinks=True)
+ self.logger.info('Test repo copied to: %s' % self.test_path)
def create_uuid_file(self):
with open('%s/uuid' % self.test_path, 'w') as f:
@@ -109,6 +127,8 @@ class TestDefinition(object):
"""
def __init__(self, test, args):
+ self.test = test
+ self.args = args
self.output = os.path.realpath(args.output)
self.test_def = test['path']
self.test_name = os.path.splitext(self.test_def.split('/')[-1])[0]
@@ -116,6 +136,7 @@ class TestDefinition(object):
self.test_path = os.path.join(self.output, self.test_uuid)
self.logger = logging.getLogger('RUNNER.TestDef')
self.skip_install = args.skip_install
+ self.is_manual = False
if 'skip_install' in test:
self.skip_install = test['skip_install']
self.custom_params = None
@@ -123,8 +144,13 @@ class TestDefinition(object):
self.custom_params = test['parameters']
if 'params' in test:
self.custom_params = test['params']
- with open(self.test_def, 'r') as f:
- self.testdef = yaml.safe_load(f)
+ self.exists = False
+ if os.path.isfile(self.test_def):
+ self.exists = True
+ with open(self.test_def, 'r') as f:
+ self.testdef = yaml.safe_load(f)
+ if self.testdef['metadata']['format'].startswith("Manual Test Definition"):
+ self.is_manual = True
def definition(self):
with open('%s/testdef.yaml' % self.test_path, 'w') as f:
@@ -135,28 +161,34 @@ class TestDefinition(object):
f.write(yaml.dump(self.testdef['metadata'], encoding='utf-8', allow_unicode=True))
def run(self):
- with open('%s/run.sh' % self.test_path, 'a') as f:
- f.write('#!/bin/sh\n')
-
- self.parameters = self.handle_parameters()
- if self.parameters:
- for line in self.parameters:
- f.write(line)
-
- f.write('set -e\n')
- f.write('export TESTRUN_ID=%s\n' % self.testdef['metadata']['name'])
- f.write('cd %s\n' % self.test_path)
- f.write('UUID=`cat uuid`\n')
- f.write('echo "<STARTRUN $TESTRUN_ID $UUID>"\n')
- steps = self.testdef['run'].get('steps', [])
- if steps:
- for cmd in steps:
- if '--cmd' in cmd or '--shell' in cmd:
- cmd = re.sub(r'\$(\d+)\b', r'\\$\1', cmd)
- f.write('%s\n' % cmd)
- f.write('echo "<ENDRUN $TESTRUN_ID $UUID>"\n')
-
- os.chmod('%s/run.sh' % self.test_path, 0755)
+ if not self.is_manual:
+ with open('%s/run.sh' % self.test_path, 'a') as f:
+ f.write('#!/bin/sh\n')
+
+ self.parameters = self.handle_parameters()
+ if self.parameters:
+ for line in self.parameters:
+ f.write(line)
+
+ f.write('set -e\n')
+ f.write('export TESTRUN_ID=%s\n' % self.testdef['metadata']['name'])
+ f.write('cd %s\n' % self.test_path)
+ f.write('UUID=`cat uuid`\n')
+ f.write('echo "<STARTRUN $TESTRUN_ID $UUID>"\n')
+ steps = self.testdef['run'].get('steps', [])
+ if steps:
+ for cmd in steps:
+ if '--cmd' in cmd or '--shell' in cmd:
+ cmd = re.sub(r'\$(\d+)\b', r'\\$\1', cmd)
+ f.write('%s\n' % cmd)
+ f.write('echo "<ENDRUN $TESTRUN_ID $UUID>"\n')
+
+ os.chmod('%s/run.sh' % self.test_path, 0755)
+
+ def get_test_run(self):
+ if self.is_manual:
+ return ManualTestRun(self.test, self.args)
+ return AutomatedTestRun(self.test, self.args)
def handle_parameters(self):
ret_val = ['###default parameters from test definition###\n']
@@ -197,15 +229,25 @@ class TestRun(object):
self.test_name = os.path.splitext(test['path'].split('/')[-1])[0]
self.test_uuid = self.test_name + '_' + test['uuid']
self.test_path = os.path.join(self.output, self.test_uuid)
+ self.logger = logging.getLogger('RUNNER.TestRun')
self.test_timeout = args.timeout
if 'timeout' in test:
self.test_timeout = test['timeout']
- self.logger = logging.getLogger('RUNNER.TestRun')
+
+ def run(self):
+ raise NotImplementedError
+
+ def check_result(self):
+ raise NotImplementedError
+
+
+class AutomatedTestRun(TestRun):
+ def run(self):
self.logger.info('Executing %s/run.sh' % self.test_path)
shell_cmd = '%s/run.sh 2>&1 | tee %s/stdout.log' % (self.test_path, self.test_path)
self.child = pexpect.spawn('/bin/sh', ['-c', shell_cmd])
- def check_output(self):
+ def check_result(self):
if self.test_timeout:
self.logger.info('Test timeout: %s' % self.test_timeout)
test_end = time.time() + self.test_timeout
@@ -225,6 +267,120 @@ class TestRun(object):
break
+class ManualTestShell(cmd.Cmd):
+ def __init__(self, test_dict, result_path):
+ cmd.Cmd.__init__(self)
+ self.test_dict = test_dict
+ self.result_path = result_path
+ self.current_step_index = 0
+ self.steps = self.test_dict['run']['steps']
+ self.expected = self.test_dict['run']['expected']
+ self.prompt = "%s > " % self.test_dict['metadata']['name']
+ self.result = None
+ self.intro = """
+ Welcome to manual test executor. Type 'help' for available commands.
+ This shell is meant to be executed on your computer, not on the system
+ under test. Please execute the steps from the test case, compare to
+ expected result and record the test result as 'pass' or 'fail'. If there
+ is an issue that prevents from executing the step, please record the result
+ as 'skip'.
+ """
+
+ def do_quit(self, line):
+ """
+ Exit test execution
+ """
+ if self.result is not None:
+ return True
+ if line.find("-f") >= 0:
+ self._record_result("skip")
+ return True
+ print "Test result not recorded. Use -f to force. Forced quit records result as 'skip'"
+
+ do_EOF = do_quit
+
+ def do_description(self, line):
+ """
+ Prints current test overall description
+ """
+ print self.test_dict['metadata']['description']
+
+ def do_steps(self, line):
+ """
+ Prints all steps of the current test case
+ """
+ for index, step in enumerate(self.steps):
+ print "%s. %s" % (index, step)
+
+ def do_expected(self, line):
+ """
+ Prints all expected results of the current test case
+ """
+ for index, expected in enumerate(self.expected):
+ print "%s. %s" % (index, expected)
+
+ def do_current(self, line):
+ """
+ Prints current test step
+ """
+ self._print_step()
+
+ do_start = do_current
+
+ def do_next(self, line):
+ """
+ Prints next test step
+ """
+ if len(self.steps) > self.current_step_index + 1:
+ self.current_step_index += 1
+ self._print_step()
+
+ def _print_step(self):
+ print "%s. %s" % (self.current_step_index, self.steps[self.current_step_index])
+
+ def _record_result(self, result):
+ print "Recording %s in %s/stdout.log" % (result, self.result_path)
+ with open("%s/stdout.log" % self.result_path, "a") as f:
+ f.write("<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=%s RESULT=%s>" %
+ (self.test_dict['metadata']['name'], result))
+
+ def do_pass(self, line):
+ """
+ Records PASS as test result
+ """
+ self.result = "pass"
+ self._record_result(self.result)
+ return True
+
+ def do_fail(self, line):
+ """
+ Records FAIL as test result
+ """
+ self.result = "fail"
+ self._record_result(self.result)
+ return True
+
+ def do_skip(self, line):
+ """
+ Records SKIP as test result
+ """
+ self.result = "skip"
+ self._record_result(self.result)
+ return True
+
+
+class ManualTestRun(TestRun, cmd.Cmd):
+ def run(self):
+ print self.test_name
+ with open('%s/testdef.yaml' % self.test_path, 'r') as f:
+ self.testdef = yaml.safe_load(f)
+
+ ManualTestShell(self.testdef, self.test_path).cmdloop()
+
+ def check_result(self):
+ pass
+
+
class ResultParser(object):
def __init__(self, test, args):
self.output = os.path.realpath(args.output)
@@ -332,6 +488,12 @@ def get_args():
path to the test definition to run.
Format example: "ubuntu/smoke-tests-basic.yaml"
''')
+ parser.add_argument('-k', '--kind', default="automated", dest='kind',
+ choices=['automated', 'manual'],
+ help='''
+ Selects type of tests to be executed from the test plan.
+ Possible options: automated, manual
+ '''),
parser.add_argument('-t', '--timeout', type=int, default=None,
dest='timeout', help='Specify test timeout')
parser.add_argument('-s', '--skip_install', dest='skip_install',
@@ -351,14 +513,15 @@ def main():
ch.setFormatter(formatter)
logger.addHandler(ch)
- if os.geteuid() != 0:
- logger.error("Sorry, you need to run this as root")
- sys.exit(1)
+ args = get_args()
+ if args.kind != "manual":
+ if os.geteuid() != 0:
+ logger.error("Sorry, you need to run this as root")
+ sys.exit(1)
# Generate test plan.
- args = get_args()
test_plan = TestPlan(args)
- test_list = test_plan.test_list()
+ test_list = test_plan.test_list(args.kind)
logger.info('Tests to run:')
for test in test_list:
print(test)
@@ -373,17 +536,21 @@ def main():
# Convert test definition.
test_def = TestDefinition(test, args)
- test_def.definition()
- test_def.metadata()
- test_def.run()
-
- # Run test.
- test_run = TestRun(test, args)
- test_run.check_output()
-
- # Parse test output, save results in json and csv format.
- result_parser = ResultParser(test, args)
- result_parser.run()
+ if test_def.exists:
+ test_def.definition()
+ test_def.metadata()
+ test_def.run()
+
+ # Run test.
+ test_run = test_def.get_test_run()
+ test_run.run()
+ test_run.check_result()
+
+ # Parse test output, save results in json and csv format.
+ result_parser = ResultParser(test, args)
+ result_parser.run()
+ else:
+ logger.warning("Requested test definition %s doesn't exist" % test['path'])
if __name__ == "__main__":
main()