summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorCharles Oliveira <charles.oliveira@linaro.org>2020-04-14 12:14:30 -0300
committerCharles Oliveira <charles.oliveira@linaro.org>2020-04-14 12:15:30 -0300
commit944293c6c16cfbc4a35f50ce81f258551d946cb2 (patch)
tree8106db3cd4cb29c6a235d387fcb77c244b1e1b7c /tests
parent144b27ee09eddfc5fc74675cf872a4d3d9a9ac41 (diff)
tests: add tests for submit command
Diffstat (limited to 'tests')
-rw-r--r--tests/submit_results/sample_log.log1
-rw-r--r--tests/submit_results/sample_metadata.json4
-rw-r--r--tests/submit_results/sample_metrics.json1
-rw-r--r--tests/submit_results/sample_results.json7
-rw-r--r--tests/submit_results/sample_results.yaml5
-rw-r--r--tests/submit_results/sample_results_malformed.json7
-rw-r--r--tests/submit_results/sample_results_malformed.yaml4
-rw-r--r--tests/test_shortcuts.py7
-rw-r--r--tests/test_submit.py122
9 files changed, 155 insertions, 3 deletions
diff --git a/tests/submit_results/sample_log.log b/tests/submit_results/sample_log.log
new file mode 100644
index 0000000..93cf9bc
--- /dev/null
+++ b/tests/submit_results/sample_log.log
@@ -0,0 +1 @@
+sample log
diff --git a/tests/submit_results/sample_metadata.json b/tests/submit_results/sample_metadata.json
new file mode 100644
index 0000000..2d3edd9
--- /dev/null
+++ b/tests/submit_results/sample_metadata.json
@@ -0,0 +1,4 @@
+{
+ "json-metadata-field-1": "json-metadata-field-1-value",
+ "job_id": "jsonmetadatajobid1"
+}
diff --git a/tests/submit_results/sample_metrics.json b/tests/submit_results/sample_metrics.json
new file mode 100644
index 0000000..f2e8238
--- /dev/null
+++ b/tests/submit_results/sample_metrics.json
@@ -0,0 +1 @@
+{"json-metric-1": 42}
diff --git a/tests/submit_results/sample_results.json b/tests/submit_results/sample_results.json
new file mode 100644
index 0000000..dcebfee
--- /dev/null
+++ b/tests/submit_results/sample_results.json
@@ -0,0 +1,7 @@
+{
+ "json-test-1": "pass",
+ "json-test-2": {
+ "log": "json-test-2 log",
+ "result": "fail"
+ }
+}
diff --git a/tests/submit_results/sample_results.yaml b/tests/submit_results/sample_results.yaml
new file mode 100644
index 0000000..b4894e3
--- /dev/null
+++ b/tests/submit_results/sample_results.yaml
@@ -0,0 +1,5 @@
+---
+yaml-test-1: pass
+yaml-test-2:
+ log: yaml-test-2 log
+ result: fail
diff --git a/tests/submit_results/sample_results_malformed.json b/tests/submit_results/sample_results_malformed.json
new file mode 100644
index 0000000..52d9aab
--- /dev/null
+++ b/tests/submit_results/sample_results_malformed.json
@@ -0,0 +1,7 @@
+{
+ "json-test-1": "pass",
+ "json-test-2": {
+ "log": "json-test-2 log":
+ "result": "fail"
+ }
+}
diff --git a/tests/submit_results/sample_results_malformed.yaml b/tests/submit_results/sample_results_malformed.yaml
new file mode 100644
index 0000000..e05e7b0
--- /dev/null
+++ b/tests/submit_results/sample_results_malformed.yaml
@@ -0,0 +1,4 @@
+---
+yaml-test-1: pass
+yaml-test-2: log: yaml-test-2 log
+ result: fail
diff --git a/tests/test_shortcuts.py b/tests/test_shortcuts.py
index 6e69c7e..f240d5c 100644
--- a/tests/test_shortcuts.py
+++ b/tests/test_shortcuts.py
@@ -39,11 +39,12 @@ class SubmitResultsShortcutTest(TestCase):
self.assertTrue(len(results) > 0)
def test_malformed_data(self):
+ # job_id already exists
metadata = {'job_id': '12345', 'a-metadata-field': 'value'}
- tests = {'testa': 'pass', 'testb': {'result': 'pass', 'log': 'the log'}}
+ tests = {'test-malformed': 'pass', 'testb': {'result': 'pass', 'log': 'the log'}}
metrics = {'metrica': 42}
submit_results(group_project_slug='my_group/my_project', build_version='my_build',
env_slug='my_env', tests=tests, metrics=metrics, metadata=metadata)
- results = self.squad.tests(name='testa')
- self.assertTrue(len(results) > 0)
+ results = self.squad.tests(name='test-malformed')
+ self.assertTrue(len(results) == 0)
diff --git a/tests/test_submit.py b/tests/test_submit.py
index 398c1ec..77753a4 100644
--- a/tests/test_submit.py
+++ b/tests/test_submit.py
@@ -1,4 +1,5 @@
import unittest
+import subprocess as sp
from . import settings
from squad_client.core.api import SquadApi
@@ -59,3 +60,124 @@ class SquadSubmitTest(unittest.TestCase):
t = first(results)
self.assertEqual(t.log, test.log)
self.assertEqual(t.name, test.name)
+
+
+class SubmitCommandTest(unittest.TestCase):
+
+ testing_server = 'http://localhost:%s' % settings.DEFAULT_SQUAD_PORT
+ testing_token = '193cd8bb41ab9217714515954e8724f651ef8601'
+
+ def setUp(self):
+ self.squad = Squad()
+ SquadApi.configure(url=self.testing_server, token=self.testing_token)
+
+ def manage_submit(self, results=None, result_name=None, result_value=None, metrics=None,
+ metadata=None, attachments=None, logs=None, environment=None):
+ argv = ['./manage.py', '--squad-host', self.testing_server, '--squad-token', self.testing_token,
+ 'submit', '--group', 'my_group', '--project', 'my_project', '--build', 'my_build6', '--environment', 'test_submit_env']
+
+ if logs:
+ argv += ['--logs', logs]
+ if results:
+ argv += ['--results', results]
+ if metrics:
+ argv += ['--metrics', metrics]
+ if metadata:
+ argv += ['--metadata', metadata]
+ if attachments:
+ argv += ['--attachments', attachments]
+ if result_name:
+ argv += ['--result-name', result_name]
+ if result_value:
+ argv += ['--result-value', result_value]
+
+ proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.PIPE)
+ proc.ok = False
+
+ try:
+ out, err = proc.communicate()
+ proc.ok = (proc.returncode == 0)
+ except sp.TimeoutExpired:
+ self.logger.error('Running "%s" time out after %i seconds!' % ' '.join(argv))
+ proc.kill()
+ out, err = proc.communicate()
+
+ proc.out = out.decode('utf-8')
+ proc.err = err.decode('utf-8')
+ return proc
+
+ def test_submit_empty(self):
+ proc = self.manage_submit()
+ self.assertFalse(proc.ok)
+ self.assertIn('At least one of --result-name, --results, --metrics is required', proc.err)
+
+ def test_submit_single_test(self):
+ proc = self.manage_submit(result_name='single-test', result_value='pass')
+ self.assertTrue(proc.ok)
+ self.assertIn('1 tests', proc.err)
+
+ test = first(self.squad.tests(name='single-test'))
+ self.assertEqual('single-test', test.name)
+ self.assertEqual('pass', test.status)
+
+ def test_submit_invalid_result_value(self):
+ proc = self.manage_submit(result_name='single-invalid-test', result_value='not-valid')
+ self.assertFalse(proc.ok)
+ self.assertIn("result-value: invalid choice: 'not-valid'", proc.err)
+
+ def test_submit_results_json(self):
+ proc = self.manage_submit(results='tests/submit_results/sample_results.json')
+ self.assertTrue(proc.ok)
+ self.assertIn('2 tests', proc.err)
+
+ test = first(self.squad.tests(name='json-test-1'))
+ self.assertEqual('json-test-1', test.name)
+ self.assertEqual('pass', test.status)
+
+ test = first(self.squad.tests(name='json-test-2'))
+ self.assertEqual('json-test-2', test.name)
+ self.assertEqual('fail', test.status)
+ self.assertEqual('json-test-2 log', test.log)
+
+ def test_submit_results_malformed_json(self):
+ proc = self.manage_submit(results='tests/submit_results/sample_results_malformed.json')
+ self.assertFalse(proc.ok)
+ self.assertIn('Failed parsing file', proc.err)
+
+ def test_submit_results_yaml(self):
+ proc = self.manage_submit(results='tests/submit_results/sample_results.yaml')
+ self.assertTrue(proc.ok)
+ self.assertIn('2 tests', proc.err)
+
+ test = first(self.squad.tests(name='yaml-test-1'))
+ self.assertEqual('yaml-test-1', test.name)
+ self.assertEqual('pass', test.status)
+
+ test = first(self.squad.tests(name='yaml-test-2'))
+ self.assertEqual('yaml-test-2', test.name)
+ self.assertEqual('fail', test.status)
+ self.assertEqual('yaml-test-2 log', test.log)
+
+ def test_submit_results_malformed_yaml(self):
+ proc = self.manage_submit(results='tests/submit_results/sample_results_malformed.yaml')
+ self.assertFalse(proc.ok)
+ self.assertIn('Failed parsing file', proc.err)
+
+ def test_submit_single_metric(self):
+ proc = self.manage_submit(metrics='tests/submit_results/sample_metrics.json')
+ self.assertTrue(proc.ok)
+ self.assertIn('1 metrics', proc.err)
+
+ def test_submit_everything(self):
+ proc = self.manage_submit(results='tests/submit_results/sample_results.json',
+ metrics='tests/submit_results/sample_metrics.json',
+ metadata='tests/submit_results/sample_metadata.json',
+ logs='tests/submit_results/sample_log.log')
+ self.assertTrue(proc.ok)
+ self.assertIn('2 tests, 1 metrics', proc.err)
+
+ testrun = first(self.squad.testruns(job_id='jsonmetadatajobid1'))
+ self.assertEqual('jsonmetadatajobid1', testrun.job_id)
+
+ self.assertEqual(2, len(testrun.tests()))
+ self.assertEqual(1, len(testrun.metrics()))