Remove pytest<6 support logic
This commit is contained in:
parent
1706700ce6
commit
18c05bf92b
|
@ -1,22 +1,14 @@
|
|||
import pytest
|
||||
|
||||
from tests.utils import PYTEST_6
|
||||
|
||||
pytest_plugins = "pytester"
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "pytest_params" in metafunc.fixturenames:
|
||||
if PYTEST_6:
|
||||
parametrizations = [
|
||||
pytest.param([], id="no-import-mode"),
|
||||
pytest.param(["--import-mode=prepend"], id="--import-mode=prepend"),
|
||||
pytest.param(["--import-mode=append"], id="--import-mode=append"),
|
||||
pytest.param(["--import-mode=importlib"], id="--import-mode=importlib"),
|
||||
]
|
||||
else:
|
||||
parametrizations = [[]]
|
||||
metafunc.parametrize(
|
||||
"pytest_params",
|
||||
parametrizations,
|
||||
)
|
||||
parametrizations = [
|
||||
pytest.param([], id="no-import-mode"),
|
||||
pytest.param(["--import-mode=prepend"], id="--import-mode=prepend"),
|
||||
pytest.param(["--import-mode=append"], id="--import-mode=append"),
|
||||
pytest.param(["--import-mode=importlib"], id="--import-mode=importlib"),
|
||||
]
|
||||
metafunc.parametrize("pytest_params", parametrizations)
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
import textwrap
|
||||
|
||||
from pytest_bdd.utils import collect_dumped_objects
|
||||
from tests.utils import assert_outcomes
|
||||
|
||||
STEPS = """\
|
||||
from pytest_bdd import parsers, given, when, then
|
||||
|
@ -114,7 +113,7 @@ def test_unused_params(testdir):
|
|||
)
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
assert_outcomes(result, passed=1)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_outlined_with_other_fixtures(testdir):
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
import textwrap
|
||||
|
||||
from tests.utils import assert_outcomes
|
||||
|
||||
|
||||
def test_scenario_not_found(testdir, pytest_params):
|
||||
"""Test the situation when scenario is not found."""
|
||||
|
@ -32,7 +30,7 @@ def test_scenario_not_found(testdir, pytest_params):
|
|||
)
|
||||
result = testdir.runpytest_subprocess(*pytest_params)
|
||||
|
||||
assert_outcomes(result, errors=1)
|
||||
result.assert_outcomes(errors=1)
|
||||
result.stdout.fnmatch_lines('*Scenario "NOT FOUND" in feature "Scenario is not found" in*')
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
"""Test scenarios shortcut."""
|
||||
import textwrap
|
||||
|
||||
from tests.utils import assert_outcomes
|
||||
|
||||
|
||||
def test_scenarios(testdir, pytest_params):
|
||||
"""Test scenarios shortcut (used together with @scenario for individual test override)."""
|
||||
|
@ -66,7 +64,7 @@ def test_scenarios(testdir, pytest_params):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest_subprocess("-v", "-s", *pytest_params)
|
||||
assert_outcomes(result, passed=4, failed=1)
|
||||
result.assert_outcomes(passed=4, failed=1)
|
||||
result.stdout.fnmatch_lines(["*collected 5 items"])
|
||||
result.stdout.fnmatch_lines(["*test_test_subfolder_scenario *bar!", "PASSED"])
|
||||
result.stdout.fnmatch_lines(["*test_test_scenario *bar!", "PASSED"])
|
||||
|
@ -86,5 +84,5 @@ def test_scenarios_none_found(testdir, pytest_params):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest_subprocess(testpath, *pytest_params)
|
||||
assert_outcomes(result, errors=1)
|
||||
result.assert_outcomes(errors=1)
|
||||
result.stdout.fnmatch_lines(["*NoScenariosFound*"])
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
import textwrap
|
||||
|
||||
from tests.utils import assert_outcomes
|
||||
|
||||
|
||||
def test_multiple_features_single_file(testdir):
|
||||
"""Test validation error when multiple features are placed in a single file."""
|
||||
|
@ -51,5 +49,5 @@ def test_multiple_features_single_file(testdir):
|
|||
)
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
assert_outcomes(result, errors=1)
|
||||
result.assert_outcomes(errors=1)
|
||||
result.stdout.fnmatch_lines("*FeatureError: Multiple features are not allowed in a single feature file.*")
|
||||
|
|
|
@ -3,7 +3,6 @@ import itertools
|
|||
import textwrap
|
||||
|
||||
from pytest_bdd.scenario import get_python_name_generator
|
||||
from tests.utils import assert_outcomes
|
||||
|
||||
|
||||
def test_python_name_generator():
|
||||
|
@ -65,7 +64,7 @@ def test_generate_missing(testdir):
|
|||
)
|
||||
|
||||
result = testdir.runpytest("--generate-missing", "--feature", "generation.feature")
|
||||
assert_outcomes(result, passed=0, failed=0, errors=0)
|
||||
result.assert_outcomes(passed=0, failed=0, errors=0)
|
||||
assert not result.stderr.str()
|
||||
assert result.ret == 0
|
||||
|
||||
|
@ -133,7 +132,7 @@ def test_generate_missing_with_step_parsers(testdir):
|
|||
)
|
||||
|
||||
result = testdir.runpytest("--generate-missing", "--feature", "generation.feature")
|
||||
assert_outcomes(result, passed=0, failed=0, errors=0)
|
||||
result.assert_outcomes(result, passed=0, failed=0, errors=0)
|
||||
assert not result.stderr.str()
|
||||
assert result.ret == 0
|
||||
|
||||
|
|
|
@ -1,50 +1,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
import pytest
|
||||
from packaging.utils import Version
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from _pytest.pytester import RunResult
|
||||
|
||||
# We leave this here for the future as an easy way to do feature-based testing.
|
||||
PYTEST_VERSION = Version(pytest.__version__)
|
||||
PYTEST_6 = PYTEST_VERSION >= Version("6")
|
||||
|
||||
|
||||
if PYTEST_6:
|
||||
|
||||
def assert_outcomes(
|
||||
result: RunResult,
|
||||
passed: int = 0,
|
||||
skipped: int = 0,
|
||||
failed: int = 0,
|
||||
errors: int = 0,
|
||||
xpassed: int = 0,
|
||||
xfailed: int = 0,
|
||||
) -> None:
|
||||
"""Compatibility function for result.assert_outcomes"""
|
||||
result.assert_outcomes(
|
||||
errors=errors, passed=passed, skipped=skipped, failed=failed, xpassed=xpassed, xfailed=xfailed
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
def assert_outcomes(
|
||||
result: RunResult,
|
||||
passed: int = 0,
|
||||
skipped: int = 0,
|
||||
failed: int = 0,
|
||||
errors: int = 0,
|
||||
xpassed: int = 0,
|
||||
xfailed: int = 0,
|
||||
) -> None:
|
||||
"""Compatibility function for result.assert_outcomes"""
|
||||
result.assert_outcomes(
|
||||
error=errors, # Pytest < 6 uses the singular form
|
||||
passed=passed,
|
||||
skipped=skipped,
|
||||
failed=failed,
|
||||
xpassed=xpassed,
|
||||
xfailed=xfailed,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue