From c51011a5029be1afaadf265c49c28909a144c829de7a7ec91d09bd86a1789064 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20Schr=C3=B6ter?= Date: Fri, 3 May 2024 20:41:27 +0200 Subject: [PATCH] Sync from SUSE:SLFO:Main python-flaky revision 245a4f999fde6aa8682c79e0f49aed06 --- .gitattributes | 23 ++ _multibuild | 3 + flaky-3.7.0.tar.gz | 3 + python-flaky.changes | 84 ++++ python-flaky.spec | 94 +++++ remove_mock.patch | 14 + remove_nose.patch | 913 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 1134 insertions(+) create mode 100644 .gitattributes create mode 100644 _multibuild create mode 100644 flaky-3.7.0.tar.gz create mode 100644 python-flaky.changes create mode 100644 python-flaky.spec create mode 100644 remove_mock.patch create mode 100644 remove_nose.patch diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9b03811 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,23 @@ +## Default LFS +*.7z filter=lfs diff=lfs merge=lfs -text +*.bsp filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.gem filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.jar filter=lfs diff=lfs merge=lfs -text +*.lz filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.obscpio filter=lfs diff=lfs merge=lfs -text +*.oxt filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.rpm filter=lfs diff=lfs merge=lfs -text +*.tbz filter=lfs diff=lfs merge=lfs -text +*.tbz2 filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.txz filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text diff --git a/_multibuild b/_multibuild new file mode 100644 index 0000000..fcc7b97 --- /dev/null +++ b/_multibuild @@ -0,0 +1,3 @@ + + test + diff --git a/flaky-3.7.0.tar.gz b/flaky-3.7.0.tar.gz new file mode 100644 index 0000000..e691111 --- /dev/null +++ b/flaky-3.7.0.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad100780721a1911f57a165809b7ea265a7863305acb66708220820caf8aa0d +size 29591 diff --git a/python-flaky.changes b/python-flaky.changes new file mode 100644 index 0000000..0fe6e3f --- /dev/null +++ b/python-flaky.changes @@ -0,0 +1,84 @@ +------------------------------------------------------------------- +Fri Apr 21 12:25:08 UTC 2023 - Dirk Müller + +- add sle15_python_module_pythons (jsc#PED-68) + +------------------------------------------------------------------- +Thu Apr 13 22:41:16 UTC 2023 - Matej Cepl + +- Make calling of %{sle15modernpython} optional. + +------------------------------------------------------------------- +Mon Dec 14 00:48:47 UTC 2020 - Benjamin Greiner + +- Fix condition around BuildRequirement + +------------------------------------------------------------------- +Sun Dec 13 20:27:15 UTC 2020 - Matej Cepl + +- We don't need to break Python 2.7 + +------------------------------------------------------------------- +Fri Dec 11 15:14:53 UTC 2020 - Matej Cepl + +- Add remove_mock.patch to remove dependency on the external mock package. + +------------------------------------------------------------------- +Fri Dec 11 14:24:39 UTC 2020 - Matej Cepl + +- Add remove_nose.patch to remove dependency on nose. + +------------------------------------------------------------------- +Wed Jul 29 05:39:42 UTC 2020 - Steve Kowalik + +- Update to 3.7.0: + * Flaky now retries tests which fail during setup. + +------------------------------------------------------------------- +Fri Jan 31 01:54:06 UTC 2020 - Stefan Brüns + +- Add required suffix to package name for test package. Although + no binary package is created, the OBS blocks any dependent + packages until also the test package has been built (Giveaway: + main and test flavor create the same .src.rpm) + +------------------------------------------------------------------- +Thu Sep 12 12:33:03 UTC 2019 - Tomáš Chvátal + +- Update to 3.6.1: + * Reraise KeyboardInterrupt when running tests under pytest. + +------------------------------------------------------------------- +Fri Jul 26 10:48:12 UTC 2019 - pgajdos@suse.com + +- version update to 3.6.0 + * Do not print an empty report if no tests marked 'flaky' were run at all (#116). NOTE: This change could be breaking if you relied on the flaky report being printed. +- test via multibuild +- added sources + + _multibuild + +------------------------------------------------------------------- +Tue Mar 12 15:24:55 UTC 2019 - Tomáš Chvátal + +- Update to 3.5.3: + * Bugfixes - Flaky is now compatible with pytest >= 4.1. + * Officially support and test on Python 3.6 and 3.7. + * Adds a pytest marker that can be used instead of `@flaky. + * Replaced references to 'slaveoutput', where possible with 'workeroutput', following the convention chosen by pytest. + * Prints formatted tracebacks in the flaky report when using nose. + +------------------------------------------------------------------- +Thu Nov 9 06:10:34 UTC 2017 - arun@gmx.de + +- specfile: + * update copyright year + +- update to version 3.4.0: + * Bugfixes - Flaky for pytest will no longer silently swallow errors + that occur during test setup. + +------------------------------------------------------------------- +Tue Dec 20 17:37:08 UTC 2016 - jmatejek@suse.com + +- initial commit + diff --git a/python-flaky.spec b/python-flaky.spec new file mode 100644 index 0000000..10869a9 --- /dev/null +++ b/python-flaky.spec @@ -0,0 +1,94 @@ +# +# spec file +# +# Copyright (c) 2023 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +%{?!python_module:%define python_module() python-%{**} python3-%{**}} +%global flavor @BUILD_FLAVOR@%{nil} +%if "%{flavor}" == "test" +%define psuffix -test +%bcond_without test +%else +%bcond_with test +%endif +%{?sle15_python_module_pythons} +Name: python-flaky%{?psuffix} +Version: 3.7.0 +Release: 0 +Summary: Plugin for nose or py.test that automatically reruns flaky tests +License: Apache-2.0 +URL: https://github.com/box/flaky +Source: https://files.pythonhosted.org/packages/source/f/flaky/flaky-%{version}.tar.gz +# PATCH-FEATURE-UPSTREAM remove_nose.patch gh#box/flaky#171 mcepl@suse.com +# remove dependency on nose +Patch0: remove_nose.patch +# PATCH-FEATURE-UPSTREAM remove_mock.patch gh#box/flaky#171 mcepl@suse.com +# this patch makes things totally awesome +Patch1: remove_mock.patch +BuildRequires: %{python_module setuptools} +BuildRequires: fdupes +BuildRequires: python-rpm-macros +BuildArch: noarch +%if %{with test} +BuildRequires: %{python_module flaky >= %{version}} +BuildRequires: %{python_module genty} +BuildRequires: %{python_module pytest} +%if 0%{?suse_version} <= 1500 +BuildRequires: python-mock +%endif +%endif +%python_subpackages + +%description +Flaky is a plugin for py.test that automatically reruns flaky tests. + +Ideally, tests reliably pass or fail, but sometimes test fixtures must rely on components that aren't 100% +reliable. With flaky, instead of removing those tests or marking them to @skip, they can be automatically +retried. + +For more information about flaky, see `this presentation `_. + +%prep +%autosetup -p1 -n flaky-%{version} + +%if !%{with test} +%build +%python_build +%endif + +%if !%{with test} +%install +%python_install +%python_expand %fdupes %{buildroot}%{$python_sitelib} +%endif + +%if %{with test} +%check +%pytest -k 'example and not options' --doctest-modules test/test_pytest/ +%pytest -k 'example and not options' test/test_pytest/ +%pytest -p no:flaky test/test_pytest/test_flaky_pytest_plugin.py +export PYTEST_ADDOPTS="--force-flaky --max-runs 2" +%pytest test/test_pytest/test_pytest_options_example.py +%endif + +%if !%{with test} +%files %{python_files} +%doc README.rst +%license LICENSE +%{python_sitelib}/* +%endif + +%changelog diff --git a/remove_mock.patch b/remove_mock.patch new file mode 100644 index 0000000..115255a --- /dev/null +++ b/remove_mock.patch @@ -0,0 +1,14 @@ +--- a/test/test_pytest/test_flaky_pytest_plugin.py ++++ b/test/test_pytest/test_flaky_pytest_plugin.py +@@ -2,7 +2,10 @@ + + from __future__ import unicode_literals + from io import StringIO +-from mock import Mock, patch ++try: ++ from unittest.mock import Mock, patch ++except ImportError: ++ from mock import Mock, patch + # pylint:disable=import-error + import pytest + from _pytest.runner import CallInfo diff --git a/remove_nose.patch b/remove_nose.patch new file mode 100644 index 0000000..149d3ed --- /dev/null +++ b/remove_nose.patch @@ -0,0 +1,913 @@ +--- a/test/test_nose/__init__.py ++++ /dev/null +@@ -1,3 +0,0 @@ +-# coding: utf-8 +- +-from __future__ import unicode_literals, absolute_import +--- a/test/test_nose/test_flaky_nose_plugin.py ++++ /dev/null +@@ -1,446 +0,0 @@ +-# coding: utf-8 +- +-from __future__ import unicode_literals +- +-from unittest import TestCase +- +-from genty import genty, genty_dataset +-import mock +-from mock import MagicMock, Mock, patch +- +-from flaky import defaults, flaky_nose_plugin +-from flaky.flaky_decorator import flaky +-from flaky.names import FlakyNames +- +- +-@genty +-class TestFlakyNosePlugin(TestCase): +- def setUp(self): +- super(TestFlakyNosePlugin, self).setUp() +- +- self._mock_test_result = MagicMock() +- self._mock_stream = None +- self._flaky_plugin = flaky_nose_plugin.FlakyPlugin() +- self._mock_nose_result = Mock(flaky_nose_plugin.TextTestResult) +- self._flaky_plugin.prepareTestResult(self._mock_nose_result) +- self._mock_test = MagicMock(name='flaky_plugin_test') +- self._mock_test_case = MagicMock( +- name='flaky_plugin_test_case', +- spec=TestCase +- ) +- self._mock_test_case.address = MagicMock() +- self._mock_test_case.test = self._mock_test +- self._mock_test_module_name = 'test_module' +- self._mock_test_class_name = 'TestClass' +- self._mock_test_method_name = 'test_method' +- self._mock_test_names = '{}:{}.{}'.format( +- self._mock_test_module_name, +- self._mock_test_class_name, +- self._mock_test_method_name +- ) +- self._mock_exception = Exception('Error in {}'.format( +- self._mock_test_method_name) +- ) +- self._mock_stack_trace = '' +- self._mock_exception_type = Exception +- self._mock_error = ( +- self._mock_exception_type, +- self._mock_exception, +- None, +- ) +- self._mock_test_method = MagicMock( +- name=self._mock_test_method_name, +- spec=['__call__'] + list(FlakyNames().items()), +- ) +- setattr( +- self._mock_test, +- self._mock_test_method_name, +- self._mock_test_method, +- ) +- +- def _assert_flaky_plugin_configured(self): +- options = Mock() +- options.multiprocess_workers = 0 +- conf = Mock() +- self._flaky_plugin.enabled = True +- with patch.object(flaky_nose_plugin, 'TextTestResult') as flaky_result: +- flaky_result.return_value = self._mock_test_result +- from io import StringIO +- self._mock_stream = MagicMock(spec=StringIO) +- with patch.object(self._flaky_plugin, '_get_stream') as get_stream: +- get_stream.return_value = self._mock_stream +- self._flaky_plugin.configure(options, conf) +- +- def test_flaky_plugin_report(self): +- flaky_report = 'Flaky tests passed; others failed. ' \ +- 'No more tests; that ship has sailed.' +- self._test_flaky_plugin_report(flaky_report) +- +- def test_flaky_plugin_handles_success_for_test_method(self): +- self._test_flaky_plugin_handles_success() +- +- def test_flaky_plugin_handles_success_for_test_instance(self): +- self._test_flaky_plugin_handles_success(is_test_method=False) +- +- def test_flaky_plugin_handles_success_for_needs_rerun(self): +- self._test_flaky_plugin_handles_success(min_passes=2) +- +- def test_flaky_plugin_ignores_success_for_non_flaky_test(self): +- self._expect_test_not_flaky() +- self._flaky_plugin.addSuccess(self._mock_test_case) +- self._assert_test_ignored() +- +- def test_flaky_plugin_ignores_error_for_non_flaky_test(self): +- self._expect_test_not_flaky() +- self._flaky_plugin.handleError(self._mock_test_case, None) +- self._assert_test_ignored() +- +- def test_flaky_plugin_ignores_failure_for_non_flaky_test(self): +- self._expect_test_not_flaky() +- self._flaky_plugin.handleFailure(self._mock_test_case, None) +- self._assert_test_ignored() +- +- def test_flaky_plugin_ignores_error_for_nose_failure(self): +- self._mock_test_case.address.return_value = ( +- None, +- self._mock_test_module_name, +- None, +- ) +- self._flaky_plugin.handleError(self._mock_test_case, None) +- self._assert_test_ignored() +- +- def test_flaky_plugin_handles_error_for_test_method(self): +- self._test_flaky_plugin_handles_failure_or_error() +- +- def test_flaky_plugin_handles_error_for_test_instance(self): +- self._test_flaky_plugin_handles_failure_or_error(is_test_method=False) +- +- def test_flaky_plugin_handles_failure_for_test_method(self): +- self._test_flaky_plugin_handles_failure_or_error(is_failure=True) +- +- def test_flaky_plugin_handles_failure_for_test_instance(self): +- self._test_flaky_plugin_handles_failure_or_error( +- is_failure=True, +- is_test_method=False +- ) +- +- def test_flaky_plugin_handles_failure_for_no_more_retries(self): +- self._test_flaky_plugin_handles_failure_or_error( +- is_failure=True, +- max_runs=1 +- ) +- +- def test_flaky_plugin_handles_additional_errors(self): +- self._test_flaky_plugin_handles_failure_or_error( +- current_errors=[self._mock_error] +- ) +- +- def test_flaky_plugin_handles_bare_test(self): +- self._mock_test_names = self._mock_test_method_name +- self._mock_test.test = Mock() +- self._expect_call_test_address() +- attrib = defaults.default_flaky_attributes(2, 1) +- for name, value in attrib.items(): +- setattr( +- self._mock_test.test, +- name, +- value, +- ) +- delattr(self._mock_test, self._mock_test_method_name) +- self._flaky_plugin.prepareTestCase(self._mock_test_case) +- self.assertTrue(self._flaky_plugin.handleError( +- self._mock_test_case, +- self._mock_error, +- )) +- self.assertFalse(self._flaky_plugin.handleError( +- self._mock_test_case, +- self._mock_error, +- )) +- +- def _expect_call_test_address(self): +- self._mock_test_case.address.return_value = ( +- None, +- None, +- self._mock_test_names +- ) +- +- def _expect_test_flaky(self, is_test_method, max_runs, min_passes): +- self._expect_call_test_address() +- if is_test_method: +- mock_test_method = getattr( +- self._mock_test, +- self._mock_test_method_name +- ) +- for flaky_attr in FlakyNames(): +- setattr(self._mock_test, flaky_attr, None) +- setattr(mock_test_method, flaky_attr, None) +- flaky(max_runs, min_passes)(mock_test_method) +- else: +- flaky(max_runs, min_passes)(self._mock_test) +- mock_test_method = getattr( +- self._mock_test, +- self._mock_test_method_name +- ) +- for flaky_attr in FlakyNames(): +- setattr(mock_test_method, flaky_attr, None) +- +- def _expect_test_not_flaky(self): +- self._expect_call_test_address() +- for test_object in ( +- self._mock_test, +- getattr(self._mock_test, self._mock_test_method_name) +- ): +- for flaky_attr in FlakyNames(): +- setattr(test_object, flaky_attr, None) +- +- def _assert_test_ignored(self): +- self._mock_test_case.address.assert_called_with() +- self.assertEqual( +- self._mock_test_case.mock_calls, +- [mock.call.address()], +- ) +- self.assertEqual(self._mock_test.mock_calls, []) +- self.assertEqual(self._mock_nose_result.mock_calls, []) +- +- def _get_flaky_attributes(self): +- actual_flaky_attributes = { +- attr: getattr( +- self._mock_test_case, +- attr, +- None, +- ) for attr in FlakyNames() +- } +- for key, value in actual_flaky_attributes.items(): +- if isinstance(value, list): +- actual_flaky_attributes[key] = tuple(value) +- return actual_flaky_attributes +- +- def _set_flaky_attribute(self, attr, value): +- setattr(self._mock_test, attr, value) +- +- def _assert_flaky_attributes_contains( +- self, +- expected_flaky_attributes, +- ): +- actual_flaky_attributes = self._get_flaky_attributes() +- self.assertDictContainsSubset( +- expected_flaky_attributes, +- actual_flaky_attributes, +- 'Unexpected flaky attributes. Expected {} got {}'.format( +- expected_flaky_attributes, +- actual_flaky_attributes +- ) +- ) +- +- def _test_flaky_plugin_handles_failure_or_error( +- self, +- current_errors=None, +- current_passes=0, +- current_runs=0, +- is_failure=False, +- is_test_method=True, +- max_runs=2, +- min_passes=1, +- ): +- self._assert_flaky_plugin_configured() +- self._expect_test_flaky(is_test_method, max_runs, min_passes) +- if current_errors is None: +- current_errors = [self._mock_error] +- else: +- current_errors.append(self._mock_error) +- self._set_flaky_attribute( +- FlakyNames.CURRENT_ERRORS, +- current_errors, +- ) +- self._set_flaky_attribute( +- FlakyNames.CURRENT_PASSES, +- current_passes, +- ) +- self._set_flaky_attribute( +- FlakyNames.CURRENT_RUNS, +- current_runs, +- ) +- +- retries_remaining = current_runs + 1 < max_runs +- too_few_passes = current_passes < min_passes +- expected_plugin_handles_failure = too_few_passes and retries_remaining +- did_plugin_retry_test = max_runs > 1 +- +- self._flaky_plugin.prepareTestCase(self._mock_test_case) +- if is_failure: +- actual_plugin_handles_failure = self._flaky_plugin.handleFailure( +- self._mock_test_case, +- self._mock_error, +- ) +- else: +- actual_plugin_handles_failure = self._flaky_plugin.handleError( +- self._mock_test_case, +- self._mock_error, +- ) +- +- self.assertEqual( +- expected_plugin_handles_failure or None, +- actual_plugin_handles_failure, +- 'Expected plugin{} to handle the test run, but it did{}.'.format( +- ' to' if expected_plugin_handles_failure else '', +- '' if actual_plugin_handles_failure else ' not' +- ), +- ) +- self._assert_flaky_attributes_contains( +- { +- FlakyNames.CURRENT_RUNS: current_runs + 1, +- FlakyNames.CURRENT_ERRORS: tuple(current_errors), +- }, +- ) +- expected_test_case_calls = [mock.call.address(), mock.call.address()] +- expected_result_calls = [] +- if expected_plugin_handles_failure: +- expected_test_case_calls.append(('__hash__',)) +- expected_stream_calls = [mock.call.writelines([ +- self._mock_test_method_name, +- ' failed ({} runs remaining out of {}).'.format( +- max_runs - current_runs - 1, max_runs +- ), +- 'Exception: Error in test_method', +- '\n', +- ])] +- else: +- if did_plugin_retry_test: +- if is_failure: +- expected_result_calls.append( +- mock.call.addFailure( +- self._mock_test_case, +- self._mock_error, +- ), +- ) +- else: +- expected_result_calls.append(mock.call.addError( +- self._mock_test_case, +- self._mock_error, +- )) +- expected_stream_calls = [mock.call.writelines([ +- self._mock_test_method_name, +- ' failed; it passed {} out of the required {} times.'.format( +- current_passes, +- min_passes +- ), +- 'Exception: Error in test_method', +- '\n' +- ])] +- self.assertEqual( +- self._mock_nose_result.mock_calls, +- expected_result_calls, +- ) +- self.assertEqual( +- self._mock_test_case.mock_calls, +- expected_test_case_calls, +- 'Unexpected TestCase calls: {} vs {}'.format( +- self._mock_test_case.mock_calls, +- expected_test_case_calls +- ) +- ) +- self.assertEqual(self._mock_stream.mock_calls, expected_stream_calls) +- +- def _test_flaky_plugin_handles_success( +- self, +- current_passes=0, +- current_runs=0, +- is_test_method=True, +- max_runs=2, +- min_passes=1 +- ): +- self._assert_flaky_plugin_configured() +- self._expect_test_flaky(is_test_method, max_runs, min_passes) +- self._set_flaky_attribute( +- FlakyNames.CURRENT_PASSES, +- current_passes, +- ) +- self._set_flaky_attribute( +- FlakyNames.CURRENT_RUNS, +- current_runs, +- ) +- +- retries_remaining = current_runs + 1 < max_runs +- too_few_passes = current_passes + 1 < min_passes +- expected_plugin_handles_success = too_few_passes and retries_remaining +- +- self._flaky_plugin.prepareTestCase(self._mock_test_case) +- actual_plugin_handles_success = self._flaky_plugin.addSuccess( +- self._mock_test_case, +- ) +- +- self.assertEqual( +- expected_plugin_handles_success or None, +- actual_plugin_handles_success, +- 'Expected plugin{} to handle the test run, but it did{}.'.format( +- ' not' if expected_plugin_handles_success else '', +- '' if actual_plugin_handles_success else ' not' +- ), +- ) +- self._assert_flaky_attributes_contains( +- { +- FlakyNames.CURRENT_RUNS: current_runs + 1, +- FlakyNames.CURRENT_PASSES: current_passes + 1, +- }, +- ) +- expected_test_case_calls = [mock.call.address(), mock.call.address()] +- expected_stream_calls = [mock.call.writelines([ +- self._mock_test_method_name, +- " passed {} out of the required {} times. ".format( +- current_passes + 1, +- min_passes, +- ), +- ])] +- if expected_plugin_handles_success: +- _rerun_text = 'Running test again until it passes {0} times.\n' +- expected_test_case_calls.append(('__hash__',)) +- expected_stream_calls.append( +- mock.call.write(_rerun_text.format(min_passes)), +- ) +- else: +- expected_stream_calls.append(mock.call.write('Success!\n')) +- self.assertEqual( +- self._mock_test_case.mock_calls, +- expected_test_case_calls, +- 'Unexpected TestCase calls = {} vs {}'.format( +- self._mock_test_case.mock_calls, +- expected_test_case_calls, +- ), +- ) +- self.assertEqual(self._mock_stream.mock_calls, expected_stream_calls) +- +- def _test_flaky_plugin_report(self, expected_stream_value): +- self._assert_flaky_plugin_configured() +- mock_stream = Mock() +- self._mock_stream.getvalue.return_value = expected_stream_value +- +- self._flaky_plugin.report(mock_stream) +- +- self.assertEqual( +- mock_stream.mock_calls, +- [ +- mock.call.write('===Flaky Test Report===\n\n'), +- mock.call.write(expected_stream_value), +- mock.call.write('\n===End Flaky Test Report===\n'), +- ], +- ) +- +- @genty_dataset( +- multiprocess_plugin_absent=(None, 'StringIO'), +- processes_argument_absent=(0, 'StringIO'), +- processes_equals_one=(1, 'MultiprocessingStringIO'), +- processes_equals_two=(2, 'MultiprocessingStringIO'), +- ) +- def test_flaky_plugin_get_stream(self, mp_workers, expected_class_name): +- options = Mock() +- conf = Mock() +- self._flaky_plugin.enabled = True +- options.multiprocess_workers = mp_workers +- if mp_workers is None: +- del options.multiprocess_workers +- self._flaky_plugin.configure(options, conf) +- # pylint:disable=protected-access +- self.assertEqual( +- self._flaky_plugin._stream.__class__.__name__, +- expected_class_name, +- ) +--- a/test/test_nose/test_nose_example.py ++++ /dev/null +@@ -1,98 +0,0 @@ +-# coding: utf-8 +- +-from __future__ import unicode_literals +- +-from unittest import TestCase, skip +- +-from genty import genty, genty_dataset +-from nose.tools import raises +- +-from flaky import flaky +- +- +-# This is an end-to-end example of the flaky package in action. Consider it +-# a live tutorial, showing the various features in action. +- +- +-class ExampleTests(TestCase): +- _threshold = -1 +- +- def test_non_flaky_thing(self): +- """Flaky will not interact with this test""" +- +- @raises(AssertionError) +- def test_non_flaky_failing_thing(self): +- """Flaky will also not interact with this test""" +- self.assertEqual(0, 1) +- +- @flaky(3, 2) +- def test_flaky_thing_that_fails_then_succeeds(self): +- """ +- Flaky will run this test 3 times. It will fail once and then succeed twice. +- """ +- self._threshold += 1 +- if self._threshold < 1: +- raise Exception("Threshold is not high enough: {} vs {}.".format( +- self._threshold, 1), +- ) +- +- @flaky(3, 2) +- def test_flaky_thing_that_succeeds_then_fails_then_succeeds(self): +- """ +- Flaky will run this test 3 times. It will succeed once, fail once, and then succeed one more time. +- """ +- self._threshold += 1 +- if self._threshold == 1: +- self.assertEqual(0, 1) +- +- @flaky(2, 2) +- def test_flaky_thing_that_always_passes(self): +- """Flaky will run this test twice. Both will succeed.""" +- +- @skip("This really fails! Remove this decorator to see the test failure.") +- @flaky() +- def test_flaky_thing_that_always_fails(self): +- """Flaky will run this test twice. Both will fail.""" +- self.assertEqual(0, 1) +- +- +-@flaky +-class ExampleFlakyTests(TestCase): +- _threshold = -1 +- +- def test_flaky_thing_that_fails_then_succeeds(self): +- """ +- Flaky will run this test twice. It will fail once and then succeed. +- """ +- self._threshold += 1 +- if self._threshold < 1: +- raise Exception("Threshold is not high enough: {} vs {}.".format( +- self._threshold, 1), +- ) +- +- +-def test_function(): +- """ +- Nose will import this function and wrap it in a :class:`FunctionTestCase`. +- It's included in the example to make sure flaky handles it correctly. +- """ +- +- +-@flaky +-def test_flaky_function(param=[]): +- # pylint:disable=dangerous-default-value +- param_length = len(param) +- param.append(None) +- assert param_length == 1 +- +- +-@genty +-class ExampleFlakyTestsWithUnicodeTestNames(ExampleFlakyTests): +- @genty_dataset('ascii name', 'ńőń ȁŝćȉȉ ŝƭȕƒƒ') +- def test_non_flaky_thing(self, message): +- self._threshold += 1 +- if self._threshold < 1: +- raise Exception( +- "Threshold is not high enough: {} vs {} for '{}'.".format( +- self._threshold, 1, message), +- ) +--- a/test/test_nose/test_nose_options_example.py ++++ /dev/null +@@ -1,54 +0,0 @@ +-# coding: utf-8 +- +-from __future__ import unicode_literals +- +-from unittest import TestCase +- +-from flaky import flaky +- +-# This is a series of tests that do not use the flaky decorator; the flaky +-# behavior is intended to be enabled with the --force-flaky option on the +-# command line. +- +- +-class ExampleTests(TestCase): +- _threshold = -2 +- +- def test_something_flaky(self): +- """ +- Flaky will run this test twice. +- It will fail once and then succeed once. +- This ensures that we mark tests as flaky even if they don't have a +- decorator when we use the command-line options. +- """ +- self._threshold += 1 +- if self._threshold < 0: +- raise Exception("Threshold is not high enough.") +- +- @flaky(3, 1) +- def test_flaky_thing_that_fails_then_succeeds(self): +- """ +- Flaky will run this test 3 times. +- It will fail twice and then succeed once. +- This ensures that the flaky decorator overrides any command-line +- options we specify. +- """ +- self._threshold += 1 +- if self._threshold < 1: +- raise Exception("Threshold is not high enough.") +- +- +-@flaky(3, 1) +-class ExampleFlakyTests(TestCase): +- _threshold = -1 +- +- def test_flaky_thing_that_fails_then_succeeds(self): +- """ +- Flaky will run this test 3 times. +- It will fail twice and then succeed once. +- This ensures that the flaky decorator on a test suite overrides any +- command-line options we specify. +- """ +- self._threshold += 1 +- if self._threshold < 1: +- raise Exception("Threshold is not high enough.") +--- a/setup.py ++++ b/setup.py +@@ -68,9 +68,6 @@ def main(): + cmdclass={'test': Tox}, + zip_safe=False, + entry_points={ +- 'nose.plugins.0.10': [ +- 'flaky = flaky.flaky_nose_plugin:FlakyPlugin' +- ], + 'pytest11': [ + 'flaky = flaky.flaky_pytest_plugin' + ] +--- a/flaky/flaky_nose_plugin.py ++++ /dev/null +@@ -1,285 +0,0 @@ +-# coding: utf-8 +- +-from __future__ import unicode_literals +- +-import logging +-from optparse import OptionGroup +-import os +- +-from nose.failure import Failure +-from nose.plugins import Plugin +-from nose.result import TextTestResult +- +-from flaky._flaky_plugin import _FlakyPlugin +- +- +-class FlakyPlugin(_FlakyPlugin, Plugin): +- """ +- Plugin for nosetests that allows retrying flaky tests. +- """ +- name = 'flaky' +- +- def __init__(self): +- super(FlakyPlugin, self).__init__() +- self._logger = logging.getLogger('nose.plugins.flaky') +- self._flaky_result = None +- self._nose_result = None +- self._flaky_report = True +- self._force_flaky = False +- self._max_runs = None +- self._min_passes = None +- self._test_status = {} +- self._tests_that_reran = set() +- self._tests_that_have_been_reported = set() +- +- def options(self, parser, env=os.environ): +- """ +- Base class override. +- Add options to the nose argument parser. +- """ +- # pylint:disable=dangerous-default-value +- super(FlakyPlugin, self).options(parser, env=env) +- self.add_report_option(parser.add_option) +- group = OptionGroup( +- parser, "Force flaky", "Force all tests to be flaky.") +- self.add_force_flaky_options(group.add_option) +- parser.add_option_group(group) +- +- def _get_stream(self, multiprocess=False): +- """ +- Get the stream used to store the flaky report. +- If this nose run is going to use the multiprocess plugin, then use +- a multiprocess-list backed StringIO proxy; otherwise, use the default +- stream. +- +- :param multiprocess: +- Whether or not this test run is configured for multiprocessing. +- :type multiprocess: +- `bool` +- :return: +- The stream to use for storing the flaky report. +- :rtype: +- :class:`StringIO` or :class:`MultiprocessingStringIO` +- """ +- if multiprocess: +- from flaky.multiprocess_string_io import MultiprocessingStringIO +- return MultiprocessingStringIO() +- return self._stream +- +- def configure(self, options, conf): +- """Base class override.""" +- super(FlakyPlugin, self).configure(options, conf) +- if not self.enabled: +- return +- is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0 +- self._stream = self._get_stream(is_multiprocess) +- self._flaky_result = TextTestResult(self._stream, [], 0) +- self._flaky_report = options.flaky_report +- self._flaky_success_report = options.flaky_success_report +- self._force_flaky = options.force_flaky +- self._max_runs = options.max_runs +- self._min_passes = options.min_passes +- +- def startTest(self, test): +- """ +- Base class override. Called before a test is run. +- +- Add the test to the test status tracker, so it can potentially +- be rerun during afterTest. +- +- :param test: +- The test that is going to be run. +- :type test: +- :class:`nose.case.Test` +- """ +- # pylint:disable=invalid-name +- self._test_status[test] = None +- +- def afterTest(self, test): +- """ +- Base class override. Called after a test is run. +- +- If the test was marked for rerun, rerun the test. +- +- :param test: +- The test that has been run. +- :type test: +- :class:`nose.case.Test` +- """ +- # pylint:disable=invalid-name +- if self._test_status[test]: +- self._tests_that_reran.add(id(test)) +- test.run(self._flaky_result) +- self._test_status.pop(test, None) +- +- def _mark_test_for_rerun(self, test): +- """ +- Base class override. Rerun a flaky test. +- +- In this case, don't actually rerun the test, but mark it for +- rerun during afterTest. +- +- :param test: +- The test that is going to be rerun. +- :type test: +- :class:`nose.case.Test` +- """ +- self._test_status[test] = True +- +- def handleError(self, test, err): +- """ +- Baseclass override. Called when a test raises an exception. +- +- If the test isn't going to be rerun again, then report the error +- to the nose test result. +- +- :param test: +- The test that has raised an error +- :type test: +- :class:`nose.case.Test` +- :param err: +- Information about the test failure (from sys.exc_info()) +- :type err: +- `tuple` of `class`, :class:`Exception`, `traceback` +- :return: +- True, if the test will be rerun; False, if nose should handle it. +- :rtype: +- `bool` +- """ +- # pylint:disable=invalid-name +- want_error = self._handle_test_error_or_failure(test, err) +- if not want_error and id(test) in self._tests_that_reran: +- self._nose_result.addError(test, err) +- return want_error or None +- +- def handleFailure(self, test, err): +- """ +- Baseclass override. Called when a test fails. +- +- If the test isn't going to be rerun again, then report the failure +- to the nose test result. +- +- :param test: +- The test that has raised an error +- :type test: +- :class:`nose.case.Test` +- :param err: +- Information about the test failure (from sys.exc_info()) +- :type err: +- `tuple` of `class`, :class:`Exception`, `traceback` +- :return: +- True, if the test will be rerun; False, if nose should handle it. +- :rtype: +- `bool` +- """ +- # pylint:disable=invalid-name +- want_failure = self._handle_test_error_or_failure(test, err) +- if not want_failure and id(test) in self._tests_that_reran: +- self._nose_result.addFailure(test, err) +- return want_failure or None +- +- def addSuccess(self, test): +- """ +- Baseclass override. Called when a test succeeds. +- +- Count remaining retries and compare with number of required successes +- that have not yet been achieved; retry if necessary. +- +- Returning True from this method keeps the test runner from reporting +- the test as a success; this way we can retry and only report as a +- success if we have achieved the required number of successes. +- +- :param test: +- The test that has succeeded +- :type test: +- :class:`nose.case.Test` +- :return: +- True, if the test will be rerun; False, if nose should handle it. +- :rtype: +- `bool` +- """ +- # pylint:disable=invalid-name +- will_handle = self._handle_test_success(test) +- test_id = id(test) +- # If this isn't a rerun, the builtin reporter is going to report it as a success +- if will_handle and test_id not in self._tests_that_reran: +- self._tests_that_have_been_reported.add(test_id) +- # If this test hasn't already been reported as successful, then do it now +- if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported: +- self._nose_result.addSuccess(test) +- return will_handle or None +- +- def report(self, stream): +- """ +- Baseclass override. Write details about flaky tests to the test report. +- +- :param stream: +- The test stream to which the report can be written. +- :type stream: +- `file` +- """ +- if self._flaky_report: +- self._add_flaky_report(stream) +- +- def prepareTestResult(self, result): +- """ +- Baseclass override. Called right before the first test is run. +- +- Stores the test result so that errors and failures can be reported +- to the nose test result. +- +- :param result: +- The nose test result that needs to be informed of test failures. +- :type result: +- :class:`nose.result.TextTestResult` +- """ +- # pylint:disable=invalid-name +- self._nose_result = result +- +- def prepareTestCase(self, test): +- """ +- Baseclass override. Called right before a test case is run. +- +- If the test class is marked flaky and the test callable is not, copy +- the flaky attributes from the test class to the test callable. +- +- :param test: +- The test that is being prepared to run +- :type test: +- :class:`nose.case.Test` +- """ +- # pylint:disable=invalid-name +- if not isinstance(test.test, Failure): +- test_class = test.test +- self._copy_flaky_attributes(test, test_class) +- if self._force_flaky and not self._has_flaky_attributes(test): +- self._make_test_flaky( +- test, self._max_runs, self._min_passes) +- +- @staticmethod +- def _get_test_callable_name(test): +- """ +- Base class override. +- """ +- _, _, class_and_callable_name = test.address() +- first_dot_index = class_and_callable_name.find('.') +- test_callable_name = class_and_callable_name[first_dot_index + 1:] +- return test_callable_name +- +- @classmethod +- def _get_test_callable(cls, test): +- """ +- Base class override. +- +- :param test: +- The test that has raised an error or succeeded +- :type test: +- :class:`nose.case.Test` +- """ +- callable_name = cls._get_test_callable_name(test) +- test_callable = getattr( +- test.test, +- callable_name, +- getattr(test.test, 'test', test.test), +- ) +- return test_callable