Merge pull request #383 from pytest-dev/update-pytests

Update pytests
This commit is contained in:
Alessio Bogon 2020-09-06 08:53:42 +02:00 committed by GitHub
commit 882291524b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 81 additions and 20 deletions

View File

@ -2,11 +2,11 @@
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 19.3b0
rev: 20.8b1
hooks:
- id: black
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer

View File

@ -51,11 +51,6 @@ class GherkinTerminalReporter(TerminalReporter):
def __init__(self, config):
TerminalReporter.__init__(self, config)
def pytest_runtest_logstart(self, nodeid, location):
# Prevent locationline from being printed since we already
# show the module_name & in verbose mode the test name.
pass
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)

View File

@ -198,7 +198,12 @@ def _get_scenario_decorator(feature, feature_name, scenario, scenario_name, enco
def scenario(
feature_name, scenario_name, encoding="utf-8", example_converters=None, caller_module=None, features_base_dir=None,
feature_name,
scenario_name,
encoding="utf-8",
example_converters=None,
caller_module=None,
features_base_dir=None,
):
"""Scenario decorator.

View File

@ -1 +1 @@
mock
packaging

View File

@ -155,7 +155,7 @@ def test_step_trace(testdir):
"line": 12,
"match": {"location": ""},
"name": "a failing step",
"result": {"error_message": OfType(string), "status": "failed", "duration": OfType(int),},
"result": {"error_message": OfType(string), "status": "failed", "duration": OfType(int)},
},
],
"tags": [{"name": "scenario-failing-tag", "line": 9}],

View File

@ -1,6 +1,7 @@
"""Scenario Outline tests."""
import textwrap
from tests.utils import assert_outcomes
STEPS = """\
from pytest_bdd import given, when, then
@ -111,11 +112,11 @@ def test_wrongly_outlined(testdir):
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert_outcomes(result, errors=1)
result.stdout.fnmatch_lines(
'*ScenarioExamplesNotValidError: Scenario "Outlined with wrong examples"*has not valid examples*',
)
result.stdout.fnmatch_lines("*should match set of example values [[]'eat', 'left', 'start', 'unknown_param'[]].*",)
result.stdout.fnmatch_lines("*should match set of example values [[]'eat', 'left', 'start', 'unknown_param'[]].*")
def test_wrong_vertical_examples_scenario(testdir):
@ -151,7 +152,7 @@ def test_wrong_vertical_examples_scenario(testdir):
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert_outcomes(result, errors=1)
result.stdout.fnmatch_lines(
"*Scenario has not valid examples. Example rows should contain unique parameters. "
'"start" appeared more than once.*'
@ -192,7 +193,7 @@ def test_wrong_vertical_examples_feature(testdir):
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert_outcomes(result, errors=1)
result.stdout.fnmatch_lines(
"*Feature has not valid examples. Example rows should contain unique parameters. "
'"start" appeared more than once.*'

View File

@ -2,6 +2,8 @@
import textwrap
from tests.utils import assert_outcomes
def test_scenario_not_found(testdir):
"""Test the situation when scenario is not found."""
@ -30,7 +32,7 @@ def test_scenario_not_found(testdir):
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert_outcomes(result, errors=1)
result.stdout.fnmatch_lines('*Scenario "NOT FOUND" in feature "Scenario is not found" in*')

View File

@ -2,6 +2,8 @@
import textwrap
from tests.utils import assert_outcomes
def test_multiple_features_single_file(testdir):
"""Test validation error when multiple features are placed in a single file."""
@ -49,5 +51,5 @@ def test_multiple_features_single_file(testdir):
)
)
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert_outcomes(result, errors=1)
result.stdout.fnmatch_lines("*FeatureError: Multiple features are not allowed in a single feature file.*")

View File

@ -3,6 +3,7 @@ import itertools
import textwrap
from pytest_bdd.scenario import get_python_name_generator
from tests.utils import assert_outcomes
def test_python_name_generator():
@ -60,7 +61,7 @@ def test_generate_missing(testdir):
)
result = testdir.runpytest("--generate-missing", "--feature", "generation.feature")
result.assert_outcomes(passed=0, failed=0, error=0)
assert_outcomes(result, passed=0, failed=0, errors=0)
assert not result.stderr.str()
assert result.ret == 0

View File

@ -42,7 +42,8 @@ def test_when_then(testdir):
@pytest.mark.parametrize(
("step", "keyword"), [("given", "Given"), ("when", "When"), ("then", "Then")],
("step", "keyword"),
[("given", "Given"), ("when", "When"), ("then", "Then")],
)
def test_preserve_decorator(testdir, step, keyword):
"""Check that we preserve original function attributes after decorating it."""

View File

@ -52,7 +52,12 @@ def test_item_collection_does_not_break_on_non_function_items(testdir):
@pytest.mark.tryfirst
def pytest_collection_modifyitems(session, config, items):
items[:] = [CustomItem(name=item.name, parent=item.parent) for item in items]
try:
item_creator = CustomItem.from_parent # Only available in pytest >= 5.4.0
except AttributeError:
item_creator = CustomItem
items[:] = [item_creator(name=item.name, parent=item.parent) for item in items]
class CustomItem(pytest.Item):
def runtest(self):

46
tests/utils.py Normal file
View File

@ -0,0 +1,46 @@
from __future__ import absolute_import, unicode_literals
import pytest
from packaging.utils import Version
PYTEST_VERSION = Version(pytest.__version__)
_errors_key = "error" if PYTEST_VERSION < Version("6") else "errors"
if PYTEST_VERSION < Version("6"):
def assert_outcomes(
result,
passed=0,
skipped=0,
failed=0,
errors=0,
xpassed=0,
xfailed=0,
):
"""Compatibility function for result.assert_outcomes"""
return result.assert_outcomes(
error=errors, # Pytest < 6 uses the singular form
passed=passed,
skipped=skipped,
failed=failed,
xpassed=xpassed,
xfailed=xfailed,
)
else:
def assert_outcomes(
result,
passed=0,
skipped=0,
failed=0,
errors=0,
xpassed=0,
xfailed=0,
):
"""Compatibility function for result.assert_outcomes"""
return result.assert_outcomes(
errors=errors, passed=passed, skipped=skipped, failed=failed, xpassed=xpassed, xfailed=xfailed
)

View File

@ -2,7 +2,7 @@
distshare = {homedir}/.tox/distshare
envlist = py38-pytestlatest-linters,
py27-pytest{43,44,45,46}-coverage,
py38-pytest{43,44,45,46,50,51,52, latest}-coverage,
py38-pytest{43,44,45,46,50,51,52,53,54,60, latest}-coverage,
py{35,36,38}-pytestlatest-coverage,
py27-pytestlatest-xdist-coverage
skip_missing_interpreters = true
@ -13,6 +13,9 @@ setenv =
xdist: _PYTEST_MORE_ARGS=-n3 -rfsxX
deps =
pytestlatest: pytest
pytest60: pytest~=6.0.0
pytest54: pytest~=5.4.0
pytest53: pytest~=5.3.0
pytest52: pytest~=5.2.0
pytest51: pytest~=5.1.0
pytest50: pytest~=5.0.0