Apply `black` formatter to the codebase

This commit is contained in:
Alessio Bogon 2019-08-23 20:08:09 +02:00 committed by Alessio Bogon
parent c7002a4862
commit 6047a35ef1
58 changed files with 1370 additions and 1274 deletions

View File

@ -17,34 +17,34 @@
import sys, os
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(".."))
import pytest_bdd
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'Pytest-BDD'
copyright = u'2013, Oleg Pidsadnyi'
project = u"Pytest-BDD"
copyright = u"2013, Oleg Pidsadnyi"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -57,171 +57,163 @@ release = pytest_bdd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pytest-BDDdoc'
htmlhelp_basename = "Pytest-BDDdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pytest-BDD.tex', u'Pytest-BDD Documentation',
u'Oleg Pidsadnyi', 'manual'),
]
latex_documents = [("index", "Pytest-BDD.tex", u"Pytest-BDD Documentation", u"Oleg Pidsadnyi", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytest-bdd', u'Pytest-BDD Documentation',
[u'Oleg Pidsadnyi'], 1)
]
man_pages = [("index", "pytest-bdd", u"Pytest-BDD Documentation", [u"Oleg Pidsadnyi"], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@ -230,16 +222,22 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pytest-BDD', u'Pytest-BDD Documentation',
u'Oleg Pidsadnyi', 'Pytest-BDD', 'One line description of project.',
'Miscellaneous'),
(
"index",
"Pytest-BDD",
u"Pytest-BDD Documentation",
u"Oleg Pidsadnyi",
"Pytest-BDD",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'

View File

@ -3,6 +3,6 @@
from pytest_bdd.steps import given, when, then
from pytest_bdd.scenario import scenario, scenarios
__version__ = '3.2.1'
__version__ = "3.2.1"
__all__ = [given.__name__, when.__name__, then.__name__, scenario.__name__, scenarios.__name__]

View File

@ -77,13 +77,10 @@ class LogBDDCucumberJSON(object):
if report.passed or not step["failed"]: # ignore setup/teardown
result = {"status": "passed"}
elif report.failed and step["failed"]:
result = {
"status": "failed",
"error_message": force_unicode(report.longrepr) if error_message else "",
}
result = {"status": "failed", "error_message": force_unicode(report.longrepr) if error_message else ""}
elif report.skipped:
result = {"status": "skipped"}
result['duration'] = long(math.floor((10 ** 9) * step["duration"])) # nanosec
result["duration"] = long(math.floor((10 ** 9) * step["duration"])) # nanosec
return result
def _serialize_tags(self, item):
@ -98,17 +95,11 @@ class LogBDDCucumberJSON(object):
}
]
"""
return [
{
"name": tag,
"line": item["line_number"] - 1
}
for tag in item["tags"]
]
return [{"name": tag, "line": item["line_number"] - 1} for tag in item["tags"]]
def _format_name(self, name, keys, values):
for param, value in zip(keys, values):
name = name.replace('<{}>'.format(param), value)
name = name.replace("<{}>".format(param), value)
return name
def _format_step_name(self, report, step):
@ -136,8 +127,8 @@ class LogBDDCucumberJSON(object):
def stepmap(step):
error_message = False
if step['failed'] and not scenario.setdefault('failed', False):
scenario['failed'] = True
if step["failed"] and not scenario.setdefault("failed", False):
scenario["failed"] = True
error_message = True
if self.expand:
@ -149,12 +140,10 @@ class LogBDDCucumberJSON(object):
step_name = step["name"]
return {
"keyword": step['keyword'],
"keyword": step["keyword"],
"name": step_name,
"line": step['line_number'],
"match": {
"location": "",
},
"line": step["line_number"],
"match": {"location": ""},
"result": self._get_result(step, report, error_message),
}
@ -164,22 +153,24 @@ class LogBDDCucumberJSON(object):
"uri": scenario["feature"]["rel_filename"],
"name": scenario["feature"]["name"] or scenario["feature"]["rel_filename"],
"id": scenario["feature"]["rel_filename"].lower().replace(" ", "-"),
"line": scenario['feature']["line_number"],
"line": scenario["feature"]["line_number"],
"description": scenario["feature"]["description"],
"tags": self._serialize_tags(scenario["feature"]),
"elements": [],
}
self.features[scenario["feature"]["filename"]]["elements"].append({
"keyword": "Scenario",
"id": report.item["name"],
"name": scenario["name"],
"line": scenario["line_number"],
"description": "",
"tags": self._serialize_tags(scenario),
"type": "scenario",
"steps": [stepmap(step) for step in scenario["steps"]],
})
self.features[scenario["feature"]["filename"]]["elements"].append(
{
"keyword": "Scenario",
"id": report.item["name"],
"name": scenario["name"],
"line": scenario["line_number"],
"description": "",
"tags": self._serialize_tags(scenario),
"type": "scenario",
"steps": [stepmap(step) for step in scenario["steps"]],
}
)
def pytest_sessionstart(self):
self.suite_start_time = time.time()

View File

@ -58,7 +58,7 @@ STEP_PREFIXES = [
]
STEP_PARAM_RE = re.compile(r"\<(.+?)\>")
COMMENT_RE = re.compile(r'(^|(?<=\s))#')
COMMENT_RE = re.compile(r"(^|(?<=\s))#")
def get_step_type(line):
@ -82,7 +82,7 @@ def strip_comments(line):
"""
res = COMMENT_RE.search(line)
if res:
line = line[:res.start()]
line = line[: res.start()]
return line.strip()
@ -95,7 +95,7 @@ def parse_line(line):
"""
for prefix, _ in STEP_PREFIXES:
if line.startswith(prefix):
return prefix.strip(), line[len(prefix):].strip()
return prefix.strip(), line[len(prefix) :].strip()
return "", line
@ -136,11 +136,9 @@ def get_tags(line):
:return: List of tags.
"""
if not line or not line.strip().startswith('@'):
if not line or not line.strip().startswith("@"):
return set()
return (
set((tag.lstrip('@') for tag in line.strip().split(' @') if len(tag) > 1))
)
return set((tag.lstrip("@") for tag in line.strip().split(" @") if len(tag) > 1))
def get_features(paths, **kwargs):
@ -156,12 +154,7 @@ def get_features(paths, **kwargs):
if path not in seen_names:
seen_names.add(path)
if op.isdir(path):
features.extend(
get_features(
glob2.iglob(op.join(path, "**", "*.feature")),
**kwargs
)
)
features.extend(get_features(glob2.iglob(op.join(path, "**", "*.feature")), **kwargs))
else:
base, name = op.split(path)
feature = Feature.get_feature(base, name, **kwargs)
@ -204,9 +197,7 @@ class Examples(object):
"""
if param in self.example_params:
raise exceptions.ExamplesNotValidError(
"""Example rows should contain unique parameters. "{0}" appeared more than once""".format(
param,
)
"""Example rows should contain unique parameters. "{0}" appeared more than once""".format(param)
)
self.example_params.append(param)
self.vertical_examples.append(values)
@ -232,7 +223,7 @@ class Examples(object):
raw_value = example[index]
if converters and param in converters:
value = converters[param](raw_value)
if not builtin or value.__class__.__module__ in {'__builtin__', 'builtins'}:
if not builtin or value.__class__.__module__ in {"__builtin__", "builtins"}:
example[index] = value
params.append(example)
return [self.example_params, params]
@ -297,31 +288,47 @@ class Feature(object):
allowed_prev_mode = (types.BACKGROUND, types.GIVEN)
if not strict_gherkin:
allowed_prev_mode += (types.WHEN, )
allowed_prev_mode += (types.WHEN,)
if not scenario and prev_mode not in allowed_prev_mode and mode in types.STEP_TYPES:
raise exceptions.FeatureError(
"Step definition outside of a Scenario or a Background", line_number, clean_line, filename)
"Step definition outside of a Scenario or a Background", line_number, clean_line, filename
)
if strict_gherkin:
if (self.background and not scenario and mode not in (
types.SCENARIO, types.SCENARIO_OUTLINE, types.GIVEN, types.TAG)):
if (
self.background
and not scenario
and mode not in (types.SCENARIO, types.SCENARIO_OUTLINE, types.GIVEN, types.TAG)
):
raise exceptions.FeatureError(
"Background section can only contain Given steps", line_number, clean_line, filename)
"Background section can only contain Given steps", line_number, clean_line, filename
)
if mode == types.GIVEN and prev_mode not in (
types.GIVEN, types.SCENARIO, types.SCENARIO_OUTLINE, types.BACKGROUND):
types.GIVEN,
types.SCENARIO,
types.SCENARIO_OUTLINE,
types.BACKGROUND,
):
raise exceptions.FeatureError(
"Given steps must be the first within the Scenario", line_number, clean_line, filename)
"Given steps must be the first within the Scenario", line_number, clean_line, filename
)
if mode == types.WHEN and prev_mode not in (
types.SCENARIO, types.SCENARIO_OUTLINE, types.GIVEN, types.WHEN):
types.SCENARIO,
types.SCENARIO_OUTLINE,
types.GIVEN,
types.WHEN,
):
raise exceptions.FeatureError(
"When steps must be the first or follow Given steps", line_number, clean_line, filename)
"When steps must be the first or follow Given steps", line_number, clean_line, filename
)
if not self.background and mode == types.THEN and prev_mode not in types.STEP_TYPES:
raise exceptions.FeatureError(
"Then steps must follow Given or When steps", line_number, clean_line, filename)
"Then steps must follow Given or When steps", line_number, clean_line, filename
)
if mode == types.FEATURE:
if prev_mode is None or prev_mode == types.TAG:
@ -333,7 +340,10 @@ class Feature(object):
else:
raise exceptions.FeatureError(
"Multiple features are not allowed in a single feature file",
line_number, clean_line, filename)
line_number,
clean_line,
filename,
)
prev_mode = mode
@ -343,10 +353,7 @@ class Feature(object):
tags = get_tags(prev_line)
self.scenarios[parsed_line] = scenario = Scenario(self, parsed_line, line_number, tags=tags)
elif mode == types.BACKGROUND:
self.background = Background(
feature=self,
line_number=line_number,
)
self.background = Background(feature=self, line_number=line_number)
elif mode == types.EXAMPLES:
mode = types.EXAMPLES_HEADERS
(scenario or self).examples.line_number = line_number
@ -355,7 +362,8 @@ class Feature(object):
(scenario or self).examples.line_number = line_number
elif mode == types.EXAMPLES_HEADERS:
(scenario or self).examples.set_param_names(
[l.strip() for l in parsed_line.split("|")[1:-1] if l.strip()])
[l.strip() for l in parsed_line.split("|")[1:-1] if l.strip()]
)
mode = types.EXAMPLE_LINE
elif mode == types.EXAMPLE_LINE:
(scenario or self).examples.add_example([l.strip() for l in stripped_line.split("|")[1:-1]])
@ -366,19 +374,21 @@ class Feature(object):
except exceptions.ExamplesNotValidError as exc:
if scenario:
raise exceptions.FeatureError(
"""Scenario has not valid examples. {0}""".format(
exc.args[0]), line_number, clean_line, filename)
"""Scenario has not valid examples. {0}""".format(exc.args[0]),
line_number,
clean_line,
filename,
)
else:
raise exceptions.FeatureError(
"""Feature has not valid examples. {0}""".format(
exc.args[0]), line_number, clean_line, filename)
"""Feature has not valid examples. {0}""".format(exc.args[0]),
line_number,
clean_line,
filename,
)
elif mode and mode not in (types.FEATURE, types.TAG):
step = Step(
name=parsed_line,
type=mode,
indent=line_indent,
line_number=line_number,
keyword=keyword,
name=parsed_line, type=mode, indent=line_indent, line_number=line_number, keyword=keyword
)
if self.background and (mode == types.GIVEN or not strict_gherkin) and not scenario:
target = self.background
@ -485,7 +495,7 @@ class Scenario(object):
raise exceptions.ScenarioExamplesNotValidError(
"""Scenario "{0}" in the feature "{1}" has not valid examples. """
"""Set of step parameters {2} should match set of example values {3}.""".format(
self.name, self.feature.filename, sorted(params), sorted(example_params),
self.name, self.feature.filename, sorted(params), sorted(example_params)
)
)

View File

@ -6,10 +6,7 @@ import os.path
from mako.lookup import TemplateLookup
import py
from .scenario import (
find_argumented_step_fixture_name,
make_python_name,
)
from .scenario import find_argumented_step_fixture_name, make_python_name
from .steps import get_step_fixture_name
from .feature import get_features
from .types import STEP_TYPES
@ -50,12 +47,14 @@ def generate_code(features, scenarios, steps):
grouped_steps = group_steps(steps)
template = template_lookup.get_template("test.py.mak")
return template.render(
features=features, scenarios=scenarios, steps=grouped_steps, make_python_name=make_python_name)
features=features, scenarios=scenarios, steps=grouped_steps, make_python_name=make_python_name
)
def show_missing_code(config):
"""Wrap pytest session to show missing code."""
from _pytest.main import wrap_session
return wrap_session(config, _show_missing_code_main)
@ -99,8 +98,7 @@ def print_missing_code(scenarios, steps):
tw.line()
features = sorted(
set(scenario.feature for scenario in scenarios),
key=lambda feature: feature.name or feature.filename
set(scenario.feature for scenario in scenarios), key=lambda feature: feature.name or feature.filename
)
code = generate_code(features, scenarios, steps)
tw.write(code)
@ -134,11 +132,10 @@ def parse_feature_files(paths):
features = get_features(paths)
scenarios = sorted(
itertools.chain.from_iterable(feature.scenarios.values() for feature in features),
key=lambda scenario: (
scenario.feature.name or scenario.feature.filename, scenario.name))
key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name),
)
steps = sorted(
set(itertools.chain.from_iterable(scenario.steps for scenario in scenarios)),
key=lambda step: step.name,
set(itertools.chain.from_iterable(scenario.steps for scenario in scenarios)), key=lambda step: step.name
)
return features, scenarios, steps
@ -148,9 +145,9 @@ def group_steps(steps):
steps = sorted(steps, key=lambda step: step.type)
seen_steps = set()
grouped_steps = []
for step in (itertools.chain.from_iterable(
sorted(group, key=lambda step: step.name)
for _, group in itertools.groupby(steps, lambda step: step.type))):
for step in itertools.chain.from_iterable(
sorted(group, key=lambda step: step.name) for _, group in itertools.groupby(steps, lambda step: step.type)
):
if step.name not in seen_steps:
grouped_steps.append(step)
seen_steps.add(step.name)

View File

@ -12,13 +12,11 @@ from .feature import STEP_PARAM_RE
def add_options(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
'--gherkin-terminal-reporter',
"--gherkin-terminal-reporter",
action="store_true",
dest="gherkin_terminal_reporter",
default=False,
help=(
"enable gherkin output"
)
help=("enable gherkin output"),
)
group._addoption(
"--gherkin-terminal-reporter-expanded",
@ -32,22 +30,24 @@ def add_options(parser):
def configure(config):
if config.option.gherkin_terminal_reporter:
# Get the standard terminal reporter plugin and replace it with our
current_reporter = config.pluginmanager.getplugin('terminalreporter')
current_reporter = config.pluginmanager.getplugin("terminalreporter")
if current_reporter.__class__ != TerminalReporter:
raise Exception("gherkin-terminal-reporter is not compatible with any other terminal reporter."
"You can use only one terminal reporter."
"Currently '{0}' is used."
"Please decide to use one by deactivating {0} or gherkin-terminal-reporter."
.format(current_reporter.__class__))
raise Exception(
"gherkin-terminal-reporter is not compatible with any other terminal reporter."
"You can use only one terminal reporter."
"Currently '{0}' is used."
"Please decide to use one by deactivating {0} or gherkin-terminal-reporter.".format(
current_reporter.__class__
)
)
gherkin_reporter = GherkinTerminalReporter(config)
config.pluginmanager.unregister(current_reporter)
config.pluginmanager.register(gherkin_reporter, 'terminalreporter')
config.pluginmanager.register(gherkin_reporter, "terminalreporter")
if config.pluginmanager.getplugin("dsession"):
raise Exception("gherkin-terminal-reporter is not compatible with 'xdist' plugin.")
class GherkinTerminalReporter(TerminalReporter):
def __init__(self, config):
TerminalReporter.__init__(self, config)
@ -69,47 +69,46 @@ class GherkinTerminalReporter(TerminalReporter):
word, word_markup = word
else:
if rep.passed:
word_markup = {'green': True}
word_markup = {"green": True}
elif rep.failed:
word_markup = {'red': True}
word_markup = {"red": True}
elif rep.skipped:
word_markup = {'yellow': True}
feature_markup = {'blue': True}
word_markup = {"yellow": True}
feature_markup = {"blue": True}
scenario_markup = word_markup
if self.verbosity <= 0:
return TerminalReporter.pytest_runtest_logreport(self, rep)
elif self.verbosity == 1:
if hasattr(report, 'scenario'):
if hasattr(report, "scenario"):
self.ensure_newline()
self._tw.write('Feature: ', **feature_markup)
self._tw.write(report.scenario['feature']['name'], **feature_markup)
self._tw.write('\n')
self._tw.write(' Scenario: ', **scenario_markup)
self._tw.write(report.scenario['name'], **scenario_markup)
self._tw.write(' ')
self._tw.write("Feature: ", **feature_markup)
self._tw.write(report.scenario["feature"]["name"], **feature_markup)
self._tw.write("\n")
self._tw.write(" Scenario: ", **scenario_markup)
self._tw.write(report.scenario["name"], **scenario_markup)
self._tw.write(" ")
self._tw.write(word, **word_markup)
self._tw.write('\n')
self._tw.write("\n")
else:
return TerminalReporter.pytest_runtest_logreport(self, rep)
elif self.verbosity > 1:
if hasattr(report, 'scenario'):
if hasattr(report, "scenario"):
self.ensure_newline()
self._tw.write('Feature: ', **feature_markup)
self._tw.write(report.scenario['feature']['name'], **feature_markup)
self._tw.write('\n')
self._tw.write(' Scenario: ', **scenario_markup)
self._tw.write(report.scenario['name'], **scenario_markup)
self._tw.write('\n')
for step in report.scenario['steps']:
self._tw.write("Feature: ", **feature_markup)
self._tw.write(report.scenario["feature"]["name"], **feature_markup)
self._tw.write("\n")
self._tw.write(" Scenario: ", **scenario_markup)
self._tw.write(report.scenario["name"], **scenario_markup)
self._tw.write("\n")
for step in report.scenario["steps"]:
if self.config.option.expand:
step_name = self._format_step_name(step['name'], **report.scenario['example_kwargs'])
step_name = self._format_step_name(step["name"], **report.scenario["example_kwargs"])
else:
step_name = step['name']
self._tw.write(' {} {}\n'.format(step['keyword'],
step_name), **scenario_markup)
self._tw.write(' ' + word, **word_markup)
self._tw.write('\n\n')
step_name = step["name"]
self._tw.write(" {} {}\n".format(step["keyword"], step_name), **scenario_markup)
self._tw.write(" " + word, **word_markup)
self._tw.write("\n\n")
else:
return TerminalReporter.pytest_runtest_logreport(self, rep)
self.stats.setdefault(cat, []).append(rep)

View File

@ -106,9 +106,9 @@ def get_parser(step_name):
"""
if isinstance(step_name, six.string_types):
if isinstance(step_name, six.binary_type): # Python 2 compatibility
step_name = step_name.decode('utf-8')
step_name = step_name.decode("utf-8")
return string(step_name)
elif not hasattr(step_name, 'is_matching') or not hasattr(step_name, 'parse_arguments'):
elif not hasattr(step_name, "is_matching") or not hasattr(step_name, "parse_arguments"):
raise InvalidStepParserError(step_name)
else:
return step_name

View File

@ -13,12 +13,13 @@ from .utils import CONFIG_STACK
def pytest_addhooks(pluginmanager):
"""Register plugin hooks."""
from pytest_bdd import hooks
pluginmanager.add_hookspecs(hooks)
@given('trace')
@when('trace')
@then('trace')
@given("trace")
@when("trace")
@then("trace")
def trace():
"""Enter pytest's pdb trace."""
pytest.set_trace()
@ -33,11 +34,8 @@ def pytest_addoption(parser):
def add_bdd_ini(parser):
parser.addini('bdd_features_base_dir',
'Base features directory.')
parser.addini('bdd_strict_gherkin',
'Parse features to be strict gherkin.',
type='bool', default=True)
parser.addini("bdd_features_base_dir", "Base features directory.")
parser.addini("bdd_strict_gherkin", "Parse features to be strict gherkin.", type="bool", default=True)
@pytest.mark.trylast
@ -102,8 +100,9 @@ def pytest_collection_modifyitems(session, config, items):
# since there may be other hooks that are executed before this and that want to reorder item as well
def item_key(item):
if isinstance(item, pytest.Function):
declaration_order = getattr(item.function, '__pytest_bdd_counter__', 0)
declaration_order = getattr(item.function, "__pytest_bdd_counter__", 0)
else:
declaration_order = 0
return (item.reportinfo()[:2], declaration_order)
items.sort(key=item_key)

View File

@ -76,8 +76,9 @@ class ScenarioReport(object):
self.param_index = None
parametrize_args = get_parametrize_markers_args(node)
if parametrize_args and scenario.examples:
param_names = parametrize_args[0] if isinstance(parametrize_args[0], (tuple, list)) else [
parametrize_args[0]]
param_names = (
parametrize_args[0] if isinstance(parametrize_args[0], (tuple, list)) else [parametrize_args[0]]
)
param_values = parametrize_args[1]
node_param_values = [node.funcargs[param_name] for param_name in param_names]
if node_param_values in param_values:
@ -136,14 +137,16 @@ class ScenarioReport(object):
"rows": params,
"row_index": self.param_index,
}
] if scenario.examples else [],
]
if scenario.examples
else [],
"example_kwargs": self.example_kwargs,
}
def fail(self):
"""Stop collecting information and finalize the report as failed."""
self.current_step_report.finalize(failed=True)
remaining_steps = self.scenario.steps[len(self.step_reports):]
remaining_steps = self.scenario.steps[len(self.step_reports) :]
# Fail the rest of the steps and make reports.
for step in remaining_steps:

View File

@ -16,22 +16,15 @@ import os
import re
import pytest
try:
from _pytest import fixtures as pytest_fixtures
except ImportError:
from _pytest import python as pytest_fixtures
from . import exceptions
from .feature import (
Feature,
force_unicode,
get_features,
)
from .steps import (
get_caller_module,
get_step_fixture_name,
inject_fixture,
)
from .feature import Feature, force_unicode, get_features
from .steps import get_caller_module, get_step_fixture_name, inject_fixture
from .types import GIVEN
from .utils import CONFIG_STACK, get_args
@ -93,9 +86,7 @@ def _find_step_function(request, step, scenario, encoding):
raise exceptions.StepDefinitionNotFoundError(
u"""Step definition is not found: {step}."""
""" Line {step.line_number} in scenario "{scenario.name}" in the feature "{feature.filename}""".format(
step=step,
scenario=scenario,
feature=scenario.feature,
step=step, scenario=scenario, feature=scenario.feature
)
)
@ -109,13 +100,7 @@ def _execute_step_function(request, scenario, step, step_func):
:param function step_func: Step function.
:param example: Example table.
"""
kw = dict(
request=request,
feature=scenario.feature,
scenario=scenario,
step=step,
step_func=step_func,
)
kw = dict(request=request, feature=scenario.feature, scenario=scenario, step=step, step_func=step_func)
request.config.hook.pytest_bdd_before_step(**kw)
@ -142,11 +127,7 @@ def _execute_scenario(feature, scenario, request, encoding):
:param request: request.
:param encoding: Encoding.
"""
request.config.hook.pytest_bdd_before_scenario(
request=request,
feature=feature,
scenario=scenario,
)
request.config.hook.pytest_bdd_before_scenario(request=request, feature=feature, scenario=scenario)
try:
givens = set()
@ -156,11 +137,7 @@ def _execute_scenario(feature, scenario, request, encoding):
step_func = _find_step_function(request, step, scenario, encoding=encoding)
except exceptions.StepDefinitionNotFoundError as exception:
request.config.hook.pytest_bdd_step_func_lookup_error(
request=request,
feature=feature,
scenario=scenario,
step=step,
exception=exception,
request=request, feature=feature, scenario=scenario, step=step, exception=exception
)
raise
@ -170,7 +147,7 @@ def _execute_scenario(feature, scenario, request, encoding):
if step_func.fixture in givens:
raise exceptions.GivenAlreadyUsed(
u'Fixture "{0}" that implements this "{1}" given step has been already used.'.format(
step_func.fixture, step.name,
step_func.fixture, step.name
)
)
givens.add(step_func.fixture)
@ -188,11 +165,7 @@ def _execute_scenario(feature, scenario, request, encoding):
_execute_step_function(request, scenario, step, step_func)
finally:
request.config.hook.pytest_bdd_after_scenario(
request=request,
feature=feature,
scenario=scenario,
)
request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario)
FakeRequest = collections.namedtuple("FakeRequest", ["module"])
@ -213,7 +186,7 @@ def _get_scenario_decorator(feature, feature_name, scenario, scenario_name, enco
def decorator(*args):
if not args:
raise exceptions.ScenarioIsDecoratorOnly(
"scenario function can only be used as a decorator. Refer to the documentation.",
"scenario function can only be used as a decorator. Refer to the documentation."
)
[fn] = args
args = get_args(fn)
@ -235,16 +208,25 @@ def _get_scenario_decorator(feature, feature_name, scenario, scenario_name, enco
config.hook.pytest_bdd_apply_tag(tag=tag, function=scenario_wrapper)
scenario_wrapper.__doc__ = u"{feature_name}: {scenario_name}".format(
feature_name=feature_name, scenario_name=scenario_name)
feature_name=feature_name, scenario_name=scenario_name
)
scenario_wrapper.__scenario__ = scenario
scenario_wrapper.__pytest_bdd_counter__ = counter
scenario.test_function = scenario_wrapper
return scenario_wrapper
return decorator
def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=None,
caller_module=None, features_base_dir=None, strict_gherkin=None):
def scenario(
feature_name,
scenario_name,
encoding="utf-8",
example_converters=None,
caller_module=None,
features_base_dir=None,
strict_gherkin=None,
):
"""Scenario decorator.
:param str feature_name: Feature file name. Absolute or relative to the configured feature base path.
@ -270,9 +252,7 @@ def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=N
except KeyError:
raise exceptions.ScenarioNotFound(
u'Scenario "{scenario_name}" in feature "{feature_name}" in {feature_filename} is not found.'.format(
scenario_name=scenario_name,
feature_name=feature.name or "[Empty]",
feature_filename=feature.filename,
scenario_name=scenario_name, feature_name=feature.name or "[Empty]", feature_filename=feature.filename
)
)
@ -282,17 +262,13 @@ def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=N
scenario.validate()
return _get_scenario_decorator(
feature=feature,
feature_name=feature_name,
scenario=scenario,
scenario_name=scenario_name,
encoding=encoding,
feature=feature, feature_name=feature_name, scenario=scenario, scenario_name=scenario_name, encoding=encoding
)
def get_features_base_dir(caller_module):
default_base_dir = os.path.dirname(caller_module.__file__)
return get_from_ini('bdd_features_base_dir', default_base_dir)
return get_from_ini("bdd_features_base_dir", default_base_dir)
def get_from_ini(key, default):
@ -302,12 +278,12 @@ def get_from_ini(key, default):
"""
config = CONFIG_STACK[-1]
value = config.getini(key)
return value if value != '' else default
return value if value != "" else default
def get_strict_gherkin():
config = CONFIG_STACK[-1]
return config.getini('bdd_strict_gherkin')
return config.getini("bdd_strict_gherkin")
def make_python_name(string):
@ -319,15 +295,16 @@ def make_python_name(string):
def get_python_name_generator(name):
"""Generate a sequence of suitable python names out of given arbitrary string name."""
python_name = make_python_name(name)
suffix = ''
suffix = ""
index = 0
def get_name():
return 'test_{0}{1}'.format(python_name, suffix)
return "test_{0}{1}".format(python_name, suffix)
while True:
yield get_name()
index += 1
suffix = '_{0}'.format(index)
suffix = "_{0}".format(index)
def scenarios(*feature_paths, **kwargs):
@ -338,11 +315,11 @@ def scenarios(*feature_paths, **kwargs):
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
features_base_dir = kwargs.get('features_base_dir')
features_base_dir = kwargs.get("features_base_dir")
if features_base_dir is None:
features_base_dir = get_features_base_dir(module)
strict_gherkin = kwargs.get('strict_gherkin')
strict_gherkin = kwargs.get("strict_gherkin")
if strict_gherkin is None:
strict_gherkin = get_strict_gherkin()
@ -355,15 +332,19 @@ def scenarios(*feature_paths, **kwargs):
module_scenarios = frozenset(
(attr.__scenario__.feature.filename, attr.__scenario__.name)
for name, attr in module.__dict__.items() if hasattr(attr, '__scenario__'))
for name, attr in module.__dict__.items()
if hasattr(attr, "__scenario__")
)
for feature in get_features(abs_feature_paths, strict_gherkin=strict_gherkin):
for scenario_name, scenario_object in feature.scenarios.items():
# skip already bound scenarios
if (scenario_object.feature.filename, scenario_name) not in module_scenarios:
@scenario(feature.filename, scenario_name, **kwargs)
def _scenario():
pass # pragma: no cover
for test_name in get_python_name_generator(scenario_name):
if test_name not in module.__dict__:
# found an unique test name

View File

@ -6,10 +6,7 @@ import re
import glob2
from .generation import (
generate_code,
parse_feature_files,
)
from .generation import generate_code, parse_feature_files
MIGRATE_REGEX = re.compile(r"\s?(\w+)\s\=\sscenario\((.+)\)", flags=re.MULTILINE)
@ -30,12 +27,12 @@ def migrate_tests_in_file(file_path):
if new_content != content:
# the regex above potentially causes the end of the file to
# have an extra newline
new_content = new_content.rstrip('\n') + '\n'
new_content = new_content.rstrip("\n") + "\n"
fd.seek(0)
fd.write(new_content)
print("migrated: {0}".format(file_path))
print ("migrated: {0}".format(file_path))
else:
print("skipped: {0}".format(file_path))
print ("skipped: {0}".format(file_path))
except IOError:
pass
@ -51,13 +48,13 @@ def print_generated_code(args):
"""Print generated test code for the given filenames."""
features, scenarios, steps = parse_feature_files(args.files)
code = generate_code(features, scenarios, steps)
print(code)
print (code)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(prog="pytest-bdd")
subparsers = parser.add_subparsers(help="sub-command help", dest='command')
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
subparsers.required = True
parser_generate = subparsers.add_parser("generate", help="generate help")
parser_generate.add_argument(
@ -70,13 +67,9 @@ def main():
parser_generate.set_defaults(func=print_generated_code)
parser_migrate = subparsers.add_parser("migrate", help="migrate help")
parser_migrate.add_argument(
"path",
metavar="PATH",
help="Migrate outdated tests to the most recent form",
)
parser_migrate.add_argument("path", metavar="PATH", help="Migrate outdated tests to the most recent form")
parser_migrate.set_defaults(func=migrate_tests)
args = parser.parse_args()
if hasattr(args, 'func'):
if hasattr(args, "func"):
args.func(args)

View File

@ -36,6 +36,7 @@ import inspect
import sys
import pytest
try:
from _pytest import fixtures as pytest_fixtures
except ImportError:
@ -43,9 +44,7 @@ except ImportError:
from .feature import parse_line, force_encode
from .types import GIVEN, WHEN, THEN
from .exceptions import (
StepError,
)
from .exceptions import StepError
from .parsers import get_parser
from .utils import get_args
@ -60,10 +59,11 @@ def get_step_fixture_name(name, type_, encoding=None):
:rtype: string
"""
return "pytestbdd_{type}_{name}".format(
type=type_, name=force_encode(name, **(dict(encoding=encoding) if encoding else {})))
type=type_, name=force_encode(name, **(dict(encoding=encoding) if encoding else {}))
)
def given(name, fixture=None, converters=None, scope='function', target_fixture=None):
def given(name, fixture=None, converters=None, scope="function", target_fixture=None):
"""Given step decorator.
:param name: Given step name.
@ -83,7 +83,7 @@ def given(name, fixture=None, converters=None, scope='function', target_fixture=
step_func.step_type = GIVEN
step_func.converters = converters
step_func.__name__ = force_encode(name, 'ascii')
step_func.__name__ = force_encode(name, "ascii")
step_func.fixture = fixture
func = pytest.fixture(scope=scope)(lambda: step_func)
func.__doc__ = 'Alias for the "{0}" fixture.'.format(fixture)
@ -129,10 +129,10 @@ def _not_a_fixture_decorator(func):
:raises: `StepError` if was used as a decorator.
"""
raise StepError('Cannot be used as a decorator when the fixture is specified')
raise StepError("Cannot be used as a decorator when the fixture is specified")
def _step_decorator(step_type, step_name, converters=None, scope='function', target_fixture=None):
def _step_decorator(step_type, step_name, converters=None, scope="function", target_fixture=None):
"""Step decorator for the type and the name.
:param str step_type: Step type (GIVEN, WHEN or THEN).
@ -148,6 +148,7 @@ def _step_decorator(step_type, step_name, converters=None, scope='function', tar
:note: If the step type is GIVEN it will automatically apply the pytest
fixture decorator to the step function.
"""
def decorator(func):
step_func = func
parser_instance = get_parser(step_name)
@ -206,16 +207,16 @@ def inject_fixture(request, arg, value):
:param value: argument value
"""
fd_kwargs = {
'fixturemanager': request._fixturemanager,
'baseid': None,
'argname': arg,
'func': lambda: value,
'scope': "function",
'params': None,
"fixturemanager": request._fixturemanager,
"baseid": None,
"argname": arg,
"func": lambda: value,
"scope": "function",
"params": None,
}
if 'yieldctx' in get_args(pytest_fixtures.FixtureDef.__init__):
fd_kwargs['yieldctx'] = False
if "yieldctx" in get_args(pytest_fixtures.FixtureDef.__init__):
fd_kwargs["yieldctx"] = False
fd = pytest_fixtures.FixtureDef(**fd_kwargs)
fd.cached_result = (value, 0, None)

View File

@ -17,10 +17,9 @@ def get_args(func):
:return: A list of argument names.
:rtype: list
"""
if hasattr(inspect, 'signature'):
if hasattr(inspect, "signature"):
params = inspect.signature(func).parameters.values()
return [param.name for param in params
if param.kind == param.POSITIONAL_OR_KEYWORD]
return [param.name for param in params if param.kind == param.POSITIONAL_OR_KEYWORD]
else:
return inspect.getargspec(func).args
@ -31,7 +30,7 @@ def get_parametrize_markers_args(node):
This function uses that API if it is available otherwise it uses MarkInfo objects.
"""
mark_name = 'parametrize'
mark_name = "parametrize"
try:
return get_markers_args_using_iter_markers(node, mark_name)
except AttributeError:
@ -48,4 +47,4 @@ def get_markers_args_using_iter_markers(node, mark_name):
def get_markers_args_using_get_marker(node, mark_name):
"""Deprecated on pytest>=3.6"""
return getattr(node.get_marker(mark_name), 'args', ())
return getattr(node.get_marker(mark_name), "args", ())

View File

@ -14,12 +14,12 @@ class ToxTestCommand(TestCommand):
"""Test command which runs tox under the hood."""
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
user_options = [("tox-args=", "a", "Arguments to pass to tox")]
def initialize_options(self):
"""Initialize options and set their defaults."""
TestCommand.initialize_options(self)
self.tox_args = '--recreate'
self.tox_args = "--recreate"
def finalize_options(self):
"""Add options to the test runner (tox)."""
@ -32,6 +32,7 @@ class ToxTestCommand(TestCommand):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(args=shlex.split(self.tox_args))
sys.exit(errno)
@ -39,13 +40,15 @@ class ToxTestCommand(TestCommand):
dirname = os.path.dirname(__file__)
long_description = (
codecs.open(os.path.join(dirname, "README.rst"), encoding="utf-8").read() + "\n" +
codecs.open(os.path.join(dirname, "AUTHORS.rst"), encoding="utf-8").read() + "\n" +
codecs.open(os.path.join(dirname, "CHANGES.rst"), encoding="utf-8").read()
codecs.open(os.path.join(dirname, "README.rst"), encoding="utf-8").read()
+ "\n"
+ codecs.open(os.path.join(dirname, "AUTHORS.rst"), encoding="utf-8").read()
+ "\n"
+ codecs.open(os.path.join(dirname, "CHANGES.rst"), encoding="utf-8").read()
)
with codecs.open(os.path.join(dirname, 'pytest_bdd', '__init__.py'), encoding='utf-8') as fd:
VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(fd.read()).group(1)
with codecs.open(os.path.join(dirname, "pytest_bdd", "__init__.py"), encoding="utf-8") as fd:
VERSION = re.compile(r".*__version__ = ['\"](.*?)['\"]", re.S).match(fd.read()).group(1)
setup(
name="pytest-bdd",
@ -67,26 +70,15 @@ setup(
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3"
] + [("Programming Language :: Python :: %s" % x) for x in "2.7 3.4 3.5 3.6 3.7".split()],
"Programming Language :: Python :: 3",
]
+ [("Programming Language :: Python :: %s" % x) for x in "2.7 3.4 3.5 3.6 3.7".split()],
cmdclass={"test": ToxTestCommand},
install_requires=[
"glob2",
"Mako",
"parse",
"parse_type",
"py",
"pytest>=3.0.0",
"six>=1.9.0",
],
install_requires=["glob2", "Mako", "parse", "parse_type", "py", "pytest>=3.0.0", "six>=1.9.0"],
# the following makes a plugin available to py.test
entry_points={
"pytest11": [
"pytest-bdd = pytest_bdd.plugin",
],
"console_scripts": [
"pytest-bdd = pytest_bdd.scripts:main",
]
"pytest11": ["pytest-bdd = pytest_bdd.plugin"],
"console_scripts": ["pytest-bdd = pytest_bdd.scripts:main"],
},
tests_require=["tox"],
packages=["pytest_bdd"],

View File

@ -1,75 +1,71 @@
"""Step arguments tests."""
import functools
from pytest_bdd import (
given,
parsers,
scenario,
then,
when,
)
from pytest_bdd import given, parsers, scenario, then, when
import pytest
from pytest_bdd import exceptions
scenario_when = functools.partial(scenario, '../when_arguments.feature')
scenario_when = functools.partial(scenario, "../when_arguments.feature")
scenario_args = functools.partial(scenario, '../args_steps.feature')
scenario_args = functools.partial(scenario, "../args_steps.feature")
@scenario_args('Every step takes a parameter with the same name')
@scenario_args("Every step takes a parameter with the same name")
def test_steps():
pass
@scenario_when('Argument in when, step 1')
@scenario_when("Argument in when, step 1")
def test_argument_in_when_step_1():
pass
@scenario_when('Argument in when, step 2')
@scenario_when("Argument in when, step 2")
def test_argument_in_when_step_2():
pass
def test_multiple_given(request):
"""Using the same given fixture raises an error."""
@scenario_args('Using the same given fixture raises an error')
@scenario_args("Using the same given fixture raises an error")
def test():
pass
with pytest.raises(exceptions.GivenAlreadyUsed):
test(request)
@given(parsers.cfparse('I have {euro:d} Euro'))
@given(parsers.cfparse("I have {euro:d} Euro"))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.cfparse('I pay {euro:d} Euro'))
@when(parsers.cfparse("I pay {euro:d} Euro"))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.cfparse('I should have {euro:d} Euro'))
@then(parsers.cfparse("I should have {euro:d} Euro"))
def i_should_have(euro, values):
assert euro == values.pop(0)
@given(parsers.cfparse('I have an argument {arg:Number}', extra_types=dict(Number=int)))
@given(parsers.cfparse("I have an argument {arg:Number}", extra_types=dict(Number=int)))
def argument(arg):
"""I have an argument."""
return dict(arg=arg)
@when(parsers.cfparse('I get argument {arg:d}'))
@when(parsers.cfparse("I get argument {arg:d}"))
def get_argument(argument, arg):
"""Getting argument."""
argument['arg'] = arg
argument["arg"] = arg
@then(parsers.cfparse('My argument should be {arg:d}'))
@then(parsers.cfparse("My argument should be {arg:d}"))
def assert_that_my_argument_is_arg(argument, arg):
"""Assert that arg from when equals arg."""
assert argument['arg'] == arg
assert argument["arg"] == arg

View File

@ -1,75 +1,71 @@
"""Step arguments tests."""
import functools
from pytest_bdd import (
given,
parsers,
scenario,
then,
when,
)
from pytest_bdd import given, parsers, scenario, then, when
import pytest
from pytest_bdd import exceptions
scenario_when = functools.partial(scenario, '../when_arguments.feature')
scenario_when = functools.partial(scenario, "../when_arguments.feature")
scenario_args = functools.partial(scenario, '../args_steps.feature')
scenario_args = functools.partial(scenario, "../args_steps.feature")
@scenario_args('Every step takes a parameter with the same name')
@scenario_args("Every step takes a parameter with the same name")
def test_steps():
pass
@scenario_when('Argument in when, step 1')
@scenario_when("Argument in when, step 1")
def test_argument_in_when_step_1():
pass
@scenario_when('Argument in when, step 2')
@scenario_when("Argument in when, step 2")
def test_argument_in_when_step_2():
pass
def test_multiple_given(request):
"""Using the same given fixture raises an error."""
@scenario_args('Using the same given fixture raises an error')
@scenario_args("Using the same given fixture raises an error")
def test():
pass
with pytest.raises(exceptions.GivenAlreadyUsed):
test(request)
@given(parsers.parse('I have {euro:d} Euro'))
@given(parsers.parse("I have {euro:d} Euro"))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.parse('I pay {euro:d} Euro'))
@when(parsers.parse("I pay {euro:d} Euro"))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.parse('I should have {euro:d} Euro'))
@then(parsers.parse("I should have {euro:d} Euro"))
def i_should_have(euro, values):
assert euro == values.pop(0)
@given(parsers.parse('I have an argument {arg:Number}', extra_types=dict(Number=int)))
@given(parsers.parse("I have an argument {arg:Number}", extra_types=dict(Number=int)))
def argument(arg):
"""I have an argument."""
return dict(arg=arg)
@when(parsers.parse('I get argument {arg:d}'))
@when(parsers.parse("I get argument {arg:d}"))
def get_argument(argument, arg):
"""Getting argument."""
argument['arg'] = arg
argument["arg"] = arg
@then(parsers.parse('My argument should be {arg:d}'))
@then(parsers.parse("My argument should be {arg:d}"))
def assert_that_my_argument_is_arg(argument, arg):
"""Assert that arg from when equals arg."""
assert argument['arg'] == arg
assert argument["arg"] == arg

View File

@ -2,75 +2,71 @@
import functools
import re
from pytest_bdd import (
given,
parsers,
scenario,
then,
when,
)
from pytest_bdd import given, parsers, scenario, then, when
import pytest
from pytest_bdd import exceptions
scenario_when = functools.partial(scenario, '../when_arguments.feature')
scenario_when = functools.partial(scenario, "../when_arguments.feature")
scenario_args = functools.partial(scenario, '../args_steps.feature')
scenario_args = functools.partial(scenario, "../args_steps.feature")
@scenario_args('Every step takes a parameter with the same name')
@scenario_args("Every step takes a parameter with the same name")
def test_steps():
pass
@scenario_when('Argument in when, step 1')
@scenario_when("Argument in when, step 1")
def test_argument_in_when_step_1():
pass
@scenario_when('Argument in when, step 2')
@scenario_when("Argument in when, step 2")
def test_argument_in_when_step_2():
pass
def test_multiple_given(request):
"""Using the same given fixture raises an error."""
@scenario_args('Using the same given fixture raises an error')
@scenario_args("Using the same given fixture raises an error")
def test():
pass
with pytest.raises(exceptions.GivenAlreadyUsed):
test(request)
@given(parsers.re(r'I have (?P<euro>\d+) Euro'), converters=dict(euro=int))
@given(parsers.re(r"I have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.re(r'I pay (?P<euro>\d+) Euro'), converters=dict(euro=int))
@when(parsers.re(r"I pay (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.re(r'I should have (?P<euro>\d+) Euro'), converters=dict(euro=int))
@then(parsers.re(r"I should have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_should_have(euro, values):
assert euro == values.pop(0)
@given(parsers.re(r'I have an argument (?P<arg>\d+)'))
@given(parsers.re(r"I have an argument (?P<arg>\d+)"))
def argument(arg):
"""I have an argument."""
return dict(arg=arg)
@when(parsers.re(r'I get argument (?P<arg>\d+)'))
@when(parsers.re(r"I get argument (?P<arg>\d+)"))
def get_argument(argument, arg):
"""Getting argument."""
argument['arg'] = arg
argument["arg"] = arg
@then(parsers.re(r'My argument should be (?P<arg>\d+)'))
@then(parsers.re(r"My argument should be (?P<arg>\d+)"))
def assert_that_my_argument_is_arg(argument, arg):
"""Assert that arg from when equals arg."""
assert argument['arg'] == arg
assert argument["arg"] == arg

View File

@ -5,4 +5,4 @@ Scenario: Executed with steps matching step definitons with arguments
And I append 2 to the list
And I append 3 to the list
Then foo should have value "foo"
And the list should be [1, 2, 3]
And the list should be [1, 2, 3]

View File

@ -1,42 +1,33 @@
"""Test step arguments with complex folder structure."""
from pytest_bdd import (
given,
parsers,
scenario,
then,
when,
)
from pytest_bdd import given, parsers, scenario, then, when
@scenario(
'args.feature',
'Executed with steps matching step definitons with arguments',
)
@scenario("args.feature", "Executed with steps matching step definitons with arguments")
def test_steps():
pass
@given('I have a foo fixture with value "foo"')
def foo():
return 'foo'
return "foo"
@given('there is a list')
@given("there is a list")
def results():
return []
@when(parsers.parse('I append {n:d} to the list'))
@when(parsers.parse("I append {n:d} to the list"))
def append_to_list(results, n):
results.append(n)
@then('foo should have value "foo"')
def foo_is_foo(foo):
assert foo == 'foo'
assert foo == "foo"
@then('the list should be [1, 2, 3]')
@then("the list should be [1, 2, 3]")
def check_results(results):
assert results == [1, 2, 3]

View File

@ -4,7 +4,9 @@ import textwrap
def test_arg_fixture_mix(testdir):
subdir = testdir.mkpydir("arg_fixture_mix")
subdir.join("test_a.py").write(textwrap.dedent("""
subdir.join("test_a.py").write(
textwrap.dedent(
"""
import re
import pytest
from pytest_bdd import scenario, given, then, parsers
@ -45,9 +47,13 @@ def test_arg_fixture_mix(testdir):
@then('foo should be fine')
def foo_should_be_fine(foo):
assert foo == "fine"
"""))
"""
)
)
subdir.join("test_b.py").write(textwrap.dedent("""
subdir.join("test_b.py").write(
textwrap.dedent(
"""
import re
import pytest
from pytest_bdd import scenario, given, then
@ -76,9 +82,12 @@ def test_arg_fixture_mix(testdir):
def test_bar(foo):
assert foo == 'fine'
"""))
"""
)
)
subdir.join("arg_and_fixture_mix.feature").write("""
subdir.join("arg_and_fixture_mix.feature").write(
"""
Scenario: Use the step argument with the same name as fixture of another test
Given foo is "Hello"
Then foo should be "Hello"
@ -87,7 +96,8 @@ def test_arg_fixture_mix(testdir):
Scenario: Everything is fine
Given it is all fine
Then foo should be fine
""")
"""
)
result = testdir.runpytest("-k arg_fixture_mix")
assert result.ret == 0

View File

@ -1,9 +1,9 @@
Scenario: Multiple given alias is not evaluated multiple times
Given I have an empty list
# Alias of the "I have foo (which is 1) in my list"
And I have bar (alias of foo) in my list
When I do crash (which is 2)
And I do boom (alias of crash)
Then my list should be [1, 2, 2]

View File

@ -1,11 +1,11 @@
from pytest_bdd import given, then
@given('I have a bar')
@given("I have a bar")
def bar():
return 'bar'
return "bar"
@then('bar should have value "bar"')
def bar_is_bar(bar):
assert bar == 'bar'
assert bar == "bar"

View File

@ -1,4 +1,4 @@
Scenario: Some scenario
Given 1
When 2
Then 3
Then 3

View File

@ -1,8 +1,8 @@
Scenario: Given and when using the same fixture should not evaluate it twice
Given I have an empty list
# Alias of the "I have a fixture (appends 1 to a list)"
And I have a fixture (appends 1 to a list) in reuse syntax
When I use this fixture
Then my list should be [1]

View File

@ -3,29 +3,29 @@
from pytest_bdd import scenario, given, when, then
@scenario('alias.feature', 'Multiple given alias is not evaluated multiple times')
@scenario("alias.feature", "Multiple given alias is not evaluated multiple times")
def test_steps():
pass
@given('I have an empty list')
@given("I have an empty list")
def results():
return []
@given('I have foo (which is 1) in my list')
@given('I have bar (alias of foo) in my list')
@given("I have foo (which is 1) in my list")
@given("I have bar (alias of foo) in my list")
def foo(results):
results.append(1)
@when('I do crash (which is 2)')
@when('I do boom (alias of crash)')
@when("I do crash (which is 2)")
@when("I do boom (alias of crash)")
def crash(results):
results.append(2)
@then('my list should be [1, 2, 2]')
@then("my list should be [1, 2, 2]")
def check_results(results):
"""Fixtures are not evaluated multiple times, so the list will be [1, 2, 2]"""
assert results == [1, 2, 2]

View File

@ -3,20 +3,13 @@ import re
import pytest
from pytest_bdd import (
given,
parsers,
scenario,
then,
)
from pytest_bdd import given, parsers, scenario, then
def test_background_basic(request):
"""Test feature background."""
@scenario(
"background.feature",
"Basic usage",
)
@scenario("background.feature", "Basic usage")
def test():
pass
@ -25,10 +18,8 @@ def test_background_basic(request):
def test_background_check_order(request):
"""Test feature background to ensure that backound steps are executed first."""
@scenario(
"background.feature",
"Background steps are executed first",
)
@scenario("background.feature", "Background steps are executed first")
def test():
pass
@ -40,7 +31,7 @@ def foo():
return {}
@given(parsers.re(r'a background step with multiple lines:\n(?P<data>.+)', flags=re.DOTALL))
@given(parsers.re(r"a background step with multiple lines:\n(?P<data>.+)", flags=re.DOTALL))
def multi_line(foo, data):
assert data == "one\ntwo"
@ -70,7 +61,7 @@ def foo_has_bar(foo):
@then('foo should have value "dummy"')
def foo_has_dummy(foo):
assert foo['dummy'] == "dummy"
assert foo["dummy"] == "dummy"
@then('foo should not have value "bar"')

View File

@ -7,7 +7,7 @@ import textwrap
def runandparse(testdir, *args):
"""Run tests in testdir and parse json output."""
resultpath = testdir.tmpdir.join("cucumber.json")
result = testdir.runpytest('--cucumberjson={0}'.format(resultpath), '-s', *args)
result = testdir.runpytest("--cucumberjson={0}".format(resultpath), "-s", *args)
jsonobject = json.load(resultpath.open())
return result, jsonobject
@ -26,20 +26,28 @@ class equals_any(object):
return 0 if (isinstance(other, self.type) if self.type else False) else -1
string = type(u'')
string = type(u"")
def test_step_trace(testdir):
"""Test step trace."""
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers =
scenario-passing-tag
scenario-failing-tag
scenario-outline-passing-tag
feature-tag
"""))
testdir.makefile('.feature', test=textwrap.dedent("""
"""
),
)
testdir.makefile(
".feature",
test=textwrap.dedent(
"""
@feature-tag
Feature: One passing scenario, one failing scenario
@ -62,8 +70,12 @@ def test_step_trace(testdir):
| str | hello |
| int | 42 |
| float | 1.0 |
"""))
testdir.makepyfile(textwrap.dedent("""
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""
import pytest
from pytest_bdd import given, when, scenario
@ -94,7 +106,9 @@ def test_step_trace(testdir):
@scenario('test.feature', 'Passing outline')
def test_passing_outline():
pass
"""))
"""
)
)
result, jsonobject = runandparse(testdir)
assert result.ret
expected = [
@ -111,36 +125,20 @@ def test_step_trace(testdir):
{
"keyword": "Given",
"line": 6,
"match": {
"location": ""
},
"match": {"location": ""},
"name": "a passing step",
"result": {
"status": "passed",
"duration": equals_any(int)
}
"result": {"status": "passed", "duration": equals_any(int)},
},
{
"keyword": "And",
"line": 7,
"match": {
"location": ""
},
"match": {"location": ""},
"name": "some other passing step",
"result": {
"status": "passed",
"duration": equals_any(int)
}
}
"result": {"status": "passed", "duration": equals_any(int)},
},
],
"tags": [
{
'name': 'scenario-passing-tag',
'line': 4,
}
],
"type": "scenario"
"tags": [{"name": "scenario-passing-tag", "line": 4}],
"type": "scenario",
},
{
"description": "",
@ -152,127 +150,86 @@ def test_step_trace(testdir):
{
"keyword": "Given",
"line": 11,
"match": {
"location": ""
},
"match": {"location": ""},
"name": "a passing step",
"result": {
"status": "passed",
"duration": equals_any(int)
}
"result": {"status": "passed", "duration": equals_any(int)},
},
{
"keyword": "And",
"line": 12,
"match": {
"location": ""
},
"match": {"location": ""},
"name": "a failing step",
"result": {
"error_message": equals_any(string),
"status": "failed",
"duration": equals_any(int)
}
}
"duration": equals_any(int),
},
},
],
"tags": [
{
'name': 'scenario-failing-tag',
'line': 9,
}
],
"type": "scenario"
"tags": [{"name": "scenario-failing-tag", "line": 9}],
"type": "scenario",
},
{
"description": "",
"keyword": "Scenario",
"tags": [
{
"line": 14,
"name": "scenario-outline-passing-tag"
}
],
"tags": [{"line": 14, "name": "scenario-outline-passing-tag"}],
"steps": [
{
"line": 16,
"match": {"location": ""},
"result": {
"status": "passed",
"duration": equals_any(int)
},
"result": {"status": "passed", "duration": equals_any(int)},
"keyword": "Given",
"name": "type <type> and value <value>"
"name": "type <type> and value <value>",
}
],
"line": 15,
"type": "scenario",
"id": "test_passing_outline[str-hello]",
"name": "Passing outline"
"name": "Passing outline",
},
{
"description": "",
"keyword": "Scenario",
"tags": [
{
"line": 14,
"name": "scenario-outline-passing-tag"
}
],
"tags": [{"line": 14, "name": "scenario-outline-passing-tag"}],
"steps": [
{
"line": 16,
"match": {"location": ""},
"result": {
"status": "passed",
"duration": equals_any(int)
},
"result": {"status": "passed", "duration": equals_any(int)},
"keyword": "Given",
"name": "type <type> and value <value>"
"name": "type <type> and value <value>",
}
],
"line": 15,
"type": "scenario",
"id": "test_passing_outline[int-42]",
"name": "Passing outline"
"name": "Passing outline",
},
{
"description": "",
"keyword": "Scenario",
"tags": [
{
"line": 14,
"name": "scenario-outline-passing-tag"
}
],
"tags": [{"line": 14, "name": "scenario-outline-passing-tag"}],
"steps": [
{
"line": 16,
"match": {"location": ""},
"result": {
"status": "passed",
"duration": equals_any(int)
},
"result": {"status": "passed", "duration": equals_any(int)},
"keyword": "Given",
"name": "type <type> and value <value>"
"name": "type <type> and value <value>",
}
],
"line": 15,
"type": "scenario",
"id": "test_passing_outline[float-1.0]",
"name": "Passing outline"
}
"name": "Passing outline",
},
],
"id": os.path.join("test_step_trace0", "test.feature"),
"keyword": "Feature",
"line": 2,
"name": "One passing scenario, one failing scenario",
"tags": [
{
'name': 'feature-tag',
'line': 1,
}
],
"uri": os.path.join(testdir.tmpdir.basename, 'test.feature'),
"tags": [{"name": "feature-tag", "line": 1}],
"uri": os.path.join(testdir.tmpdir.basename, "test.feature"),
}
]
@ -281,13 +238,21 @@ def test_step_trace(testdir):
def test_step_trace_with_expand_option(testdir):
"""Test step trace."""
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers =
feature-tag
scenario-outline-passing-tag
"""))
testdir.makefile('.feature', test=textwrap.dedent("""
"""
),
)
testdir.makefile(
".feature",
test=textwrap.dedent(
"""
@feature-tag
Feature: One scenario outline, expanded to multiple scenarios
@ -300,8 +265,12 @@ def test_step_trace_with_expand_option(testdir):
| str | hello |
| int | 42 |
| float | 1.0 |
"""))
testdir.makepyfile(textwrap.dedent("""
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""
import pytest
from pytest_bdd import given, scenario
@ -312,8 +281,10 @@ def test_step_trace_with_expand_option(testdir):
@scenario('test.feature', 'Passing outline')
def test_passing_outline():
pass
"""))
result, jsonobject = runandparse(testdir, '--cucumber-json-expanded')
"""
)
)
result, jsonobject = runandparse(testdir, "--cucumber-json-expanded")
assert result.ret == 0
assert jsonobject[0]["elements"][0]["steps"][0]["name"] == "type str and value hello"

View File

@ -4,18 +4,19 @@ from pytest_bdd import scenario
def test_description(request):
"""Test description for the feature."""
@scenario(
'description.feature',
'Description'
)
@scenario("description.feature", "Description")
def test():
pass
assert test.__scenario__.feature.description == """In order to achieve something
assert (
test.__scenario__.feature.description
== """In order to achieve something
I want something
Because it will be cool
Some description goes here."""
)
test(request)

View File

@ -1,66 +1,66 @@
"""Test feature base dir."""
import pytest
NOT_EXISTING_FEATURE_PATHS = [
'.',
'/does/not/exist/',
]
NOT_EXISTING_FEATURE_PATHS = [".", "/does/not/exist/"]
@pytest.mark.parametrize(
'base_dir', NOT_EXISTING_FEATURE_PATHS
)
@pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS)
def test_feature_path_not_found(testdir, base_dir):
"""Test feature base dir."""
prepare_testdir(testdir, base_dir)
result = testdir.runpytest('-k', 'test_not_found_by_ini')
result = testdir.runpytest("-k", "test_not_found_by_ini")
result.assert_outcomes(passed=2)
def test_feature_path_ok(testdir):
base_dir = 'features'
base_dir = "features"
prepare_testdir(testdir, base_dir)
result = testdir.runpytest('-k', 'test_ok_by_ini')
result = testdir.runpytest("-k", "test_ok_by_ini")
result.assert_outcomes(passed=2)
def test_feature_path_by_param_not_found(testdir):
"""As param takes precendence even if ini config is correct it should fail
if passed param is incorrect"""
base_dir = 'features'
base_dir = "features"
prepare_testdir(testdir, base_dir)
result = testdir.runpytest('-k', 'test_not_found_by_param')
result = testdir.runpytest("-k", "test_not_found_by_param")
result.assert_outcomes(passed=4)
@pytest.mark.parametrize(
'base_dir', NOT_EXISTING_FEATURE_PATHS
)
@pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS)
def test_feature_path_by_param_ok(testdir, base_dir):
"""If ini config is incorrect but param path is fine it should be able
to find features"""
prepare_testdir(testdir, base_dir)
result = testdir.runpytest('-k', 'test_ok_by_param')
result = testdir.runpytest("-k", "test_ok_by_param")
result.assert_outcomes(passed=2)
def prepare_testdir(testdir, ini_base_dir):
testdir.makeini("""
testdir.makeini(
"""
[pytest]
bdd_features_base_dir={}
""".format(ini_base_dir))
""".format(
ini_base_dir
)
)
feature_file = testdir.mkdir('features').join('steps.feature')
feature_file.write("""
feature_file = testdir.mkdir("features").join("steps.feature")
feature_file.write(
"""
Scenario: When scenario found
Given found
""")
"""
)
testdir.makepyfile("""
testdir.makepyfile(
"""
import os.path
import pytest
@ -128,4 +128,7 @@ def prepare_testdir(testdir, ini_base_dir):
else:
scenario(FEATURE, scenario_name, features_base_dir='features')
""".format(ini_base_dir))
""".format(
ini_base_dir
)
)

View File

@ -7,64 +7,62 @@ from pytest_bdd import scenario, given, when, then
from tests.utils import get_test_filepath, prepare_feature_and_py_files
@scenario('gherkin_terminal_reporter.feature',
'Should default output be the same as regular terminal reporter')
@scenario("gherkin_terminal_reporter.feature", "Should default output be the same as regular terminal reporter")
def test_Should_default_output_be_the_same_as_regular_terminal_reporter():
pass
@scenario('gherkin_terminal_reporter.feature',
'Should verbose mode enable displaying feature and scenario names rather than test names in a single line')
@scenario(
"gherkin_terminal_reporter.feature",
"Should verbose mode enable displaying feature and scenario names rather than test names in a single line",
)
def test_Should_verbose_mode_enable_displaying_feature_and_scenario_names_rather_than_test_names_in_a_single_line():
pass
@scenario('gherkin_terminal_reporter.feature',
'Should verbose mode preserve displaying of regular tests as usual')
@scenario("gherkin_terminal_reporter.feature", "Should verbose mode preserve displaying of regular tests as usual")
def test_Should_verbose_mode_preserve_displaying_of_regular_tests_as_usual():
pass
@scenario('gherkin_terminal_reporter.feature',
'Should double verbose mode enable displaying of full gherkin scenario description')
@scenario(
"gherkin_terminal_reporter.feature",
"Should double verbose mode enable displaying of full gherkin scenario description",
)
def test_Should_double_verbose_mode_enable_displaying_of_full_gherkin_scenario_description():
pass
@scenario('gherkin_terminal_reporter.feature',
'Should error message be displayed when no scenario is found')
@scenario("gherkin_terminal_reporter.feature", "Should error message be displayed when no scenario is found")
def test_Should_error_message_be_displayed_when_no_scenario_is_found(verbosity_mode):
pass
@scenario('gherkin_terminal_reporter.feature',
'Should error message be displayed when no step is found')
@scenario("gherkin_terminal_reporter.feature", "Should error message be displayed when no step is found")
def test_Should_error_message_be_displayed_when_no_step_is_found(verbosity_mode):
pass
@scenario('gherkin_terminal_reporter.feature',
'Should error message be displayed when error occurs during test execution')
@scenario(
"gherkin_terminal_reporter.feature", "Should error message be displayed when error occurs during test execution"
)
def test_Should_error_message_be_displayed_when_error_occurs_during_test_execution(verbosity_mode):
pass
@scenario('gherkin_terminal_reporter.feature',
'Should local variables be displayed when --showlocals option is used')
@scenario("gherkin_terminal_reporter.feature", "Should local variables be displayed when --showlocals option is used")
def test_Should_local_variables_be_displayed_when___showlocals_option_is_used():
pass
@scenario('gherkin_terminal_reporter.feature',
'Should step parameters be replaced by their values')
@scenario("gherkin_terminal_reporter.feature", "Should step parameters be replaced by their values")
def test_Should_step_parameters_be_replaced_by_their_values():
pass
@pytest.fixture(params=[0, 1, 2],
ids=['compact mode', 'line per test', 'verbose'])
@pytest.fixture(params=[0, 1, 2], ids=["compact mode", "line per test", "verbose"])
def verbosity_mode(request):
return request.param, '-' + 'v' * request.param if request.param else ''
return request.param, "-" + "v" * request.param if request.param else ""
@pytest.fixture
@ -74,22 +72,28 @@ def test_execution():
@given("there is non-gherkin scenario implemented")
def non_gherkin_test(testdir):
testdir.makepyfile(test_regular="""
testdir.makepyfile(
test_regular="""
def test_1():
pass
""")
"""
)
@given("there is gherkin scenario implemented")
def gherkin_scenario(testdir):
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Feature: Gherkin terminal output feature
Scenario: Scenario example 1
Given there is a bar
When the bar is accessed
Then world explodes
""")
testdir.makepyfile(test_gherkin="""
""",
)
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import given, when, scenario, then
@ -108,17 +112,16 @@ def gherkin_scenario(testdir):
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
""")
"""
)
@given("there is gherkin scenario outline implemented")
def gherkin_scenario_outline(testdir):
example = {
'start': 12,
'eat': 5,
'left': 7,
}
testdir.makefile('.feature', test="""
example = {"start": 12, "eat": 5, "left": 7}
testdir.makefile(
".feature",
test="""
Feature: Gherkin terminal output feature
Scenario Outline: Scenario example 2
Given there are <start> cucumbers
@ -128,8 +131,12 @@ def gherkin_scenario_outline(testdir):
Examples:
| start | eat | left |
|{start}|{eat}|{left}|
""".format(**example))
testdir.makepyfile(test_gherkin="""
""".format(
**example
),
)
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import given, when, scenario, then
@ -148,128 +155,134 @@ def gherkin_scenario_outline(testdir):
@scenario('test.feature', 'Scenario example 2')
def test_scenario_2():
pass
""")
"""
)
return example
@when("I run tests")
def run_tests(testdir, test_execution):
test_execution['regular'] = testdir.runpytest()
test_execution['gherkin'] = testdir.runpytest('--gherkin-terminal-reporter')
test_execution["regular"] = testdir.runpytest()
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter")
@then("output must be formatted the same way as regular one")
def output_must_be_the_same_as_regular_reporter(test_execution):
reg = test_execution['regular']
ghe = test_execution['gherkin']
reg = test_execution["regular"]
ghe = test_execution["gherkin"]
assert reg.ret == 0
assert ghe.ret == 0
# last line can be different because of test execution time is printed
reg_lines = reg.stdout.lines if reg.stdout.lines[-1] else reg.stdout.lines[:-2]
reg_lines[-1] = re.sub(r' \d+\.\d+ ', ' X ', reg_lines[-1])
reg_lines[-1] = re.sub(r" \d+\.\d+ ", " X ", reg_lines[-1])
ghe_lines = ghe.stdout.lines if ghe.stdout.lines[-1] else ghe.stdout.lines[:-2]
ghe_lines[-1] = re.sub(r' \d+\.\d+ ', ' X ', ghe_lines[-1])
ghe_lines[-1] = re.sub(r" \d+\.\d+ ", " X ", ghe_lines[-1])
for l1, l2 in zip(reg_lines, ghe_lines):
assert l1 == l2
@when("I run tests with verbose mode")
def run_tests_with_verbose_mode(testdir, test_execution):
test_execution['regular'] = testdir.runpytest('-v')
test_execution['gherkin'] = testdir.runpytest('--gherkin-terminal-reporter', '-v')
test_execution["regular"] = testdir.runpytest("-v")
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter", "-v")
@when("I run tests with very verbose mode")
def run_tests_with_very_verbose_mode(testdir, test_execution):
test_execution['regular'] = testdir.runpytest('-vv')
test_execution['gherkin'] = testdir.runpytest('--gherkin-terminal-reporter', '-vv')
test_execution["regular"] = testdir.runpytest("-vv")
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter", "-vv")
@when("I run tests with step expanded mode")
def run_tests_with_step_expanded_mode(testdir, test_execution):
test_execution['regular'] = testdir.runpytest('-vv')
test_execution['gherkin'] = testdir.runpytest(
'--gherkin-terminal-reporter',
'--gherkin-terminal-reporter-expanded',
'-vv',
test_execution["regular"] = testdir.runpytest("-vv")
test_execution["gherkin"] = testdir.runpytest(
"--gherkin-terminal-reporter", "--gherkin-terminal-reporter-expanded", "-vv"
)
@then("output should contain single line feature description")
def output_should_contain_single_line_feature_description(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret == 0
ghe.stdout.fnmatch_lines('Feature: Gherkin terminal output feature')
ghe.stdout.fnmatch_lines("Feature: Gherkin terminal output feature")
@then("output should contain single line scenario description")
def output_should_contain_single_line_scenario_description(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret == 0
ghe.stdout.fnmatch_lines('*Scenario: Scenario example 1 PASSED')
ghe.stdout.fnmatch_lines("*Scenario: Scenario example 1 PASSED")
@then("output must contain full gherkin scenario description")
def output_should_contain_full_gherkin_scenario_description(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret == 0
ghe.stdout.fnmatch_lines('*Scenario: Scenario example 1')
ghe.stdout.fnmatch_lines('*Given there is a bar')
ghe.stdout.fnmatch_lines('*When the bar is accessed')
ghe.stdout.fnmatch_lines('*Then world explodes')
ghe.stdout.fnmatch_lines('*PASSED')
ghe.stdout.fnmatch_lines("*Scenario: Scenario example 1")
ghe.stdout.fnmatch_lines("*Given there is a bar")
ghe.stdout.fnmatch_lines("*When the bar is accessed")
ghe.stdout.fnmatch_lines("*Then world explodes")
ghe.stdout.fnmatch_lines("*PASSED")
@given('there is gherkin scenario without implementation')
@given("there is gherkin scenario without implementation")
def gherkin_scenario_without_implementation(testdir):
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Feature: Gherkin terminal output feature
Scenario: Scenario example 1
Given there is a bar
When the bar is accessed
Then world explodes
""")
testdir.makepyfile(test_gherkin="""
""",
)
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import scenarios
scenarios('.')
""")
"""
)
@when('I run tests with any verbosity mode')
def run_tests_with_any_verbosity_mode(
test_execution, verbosity_mode, testdir,
gherkin_scenario_without_implementation):
@when("I run tests with any verbosity mode")
def run_tests_with_any_verbosity_mode(test_execution, verbosity_mode, testdir, gherkin_scenario_without_implementation):
# test_execution['gherkin'] = testdir.runpytest(
# '--gherkin-terminal-reporter', '-vv')
if verbosity_mode[1]:
test_execution['gherkin'] = testdir.runpytest(
'--gherkin-terminal-reporter', verbosity_mode[1])
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter", verbosity_mode[1])
else:
test_execution['gherkin'] = testdir.runpytest(
'--gherkin-terminal-reporter')
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter")
@then('output contains error about missing scenario implementation')
@then("output contains error about missing scenario implementation")
def output_contains_error_about_missing_scenario_implementation(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret
ghe.stdout.fnmatch_lines('''*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". '''
'''Line 3 in scenario "Scenario example 1"*''')
ghe.stdout.fnmatch_lines(
"""*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". """
"""Line 3 in scenario "Scenario example 1"*"""
)
@given('there is gherkin scenario partially implemented')
@given("there is gherkin scenario partially implemented")
def partially_implemented_gherkin_scenario(testdir):
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Feature: Gherkin terminal output feature
Scenario: Scenario example 1
Given there is a bar
When the bar is accessed
Then world explodes
""")
testdir.makepyfile(test_gherkin="""
""",
)
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import given, when, scenario, then
@ -284,27 +297,34 @@ def partially_implemented_gherkin_scenario(testdir):
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
""")
"""
)
@then('output contains error about missing step implementation')
@then("output contains error about missing step implementation")
def output_contains_error_about_missing_step_implementation(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret
ghe.stdout.fnmatch_lines('''*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". '''
'''Line 3 in scenario "Scenario example 1"*''')
ghe.stdout.fnmatch_lines(
"""*StepDefinitionNotFoundError: Step definition is not found: Given "there is a bar". """
"""Line 3 in scenario "Scenario example 1"*"""
)
@given('there is gherkin scenario with broken implementation')
@given("there is gherkin scenario with broken implementation")
def there_is_gherkin_scenario_with_broken_implementation(testdir):
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Feature: Gherkin terminal output feature
Scenario: Scenario example 1
Given there is a bar
When the bar is accessed
Then world explodes
""")
testdir.makepyfile(test_gherkin="""
""",
)
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import given, when, scenario, then
@ -320,47 +340,44 @@ def there_is_gherkin_scenario_with_broken_implementation(testdir):
@scenario('test.feature', 'Scenario example 1')
def test_scenario_1():
pass
""")
"""
)
@when('I run tests with --showlocals')
@when("I run tests with --showlocals")
def run_tests_with___showlocals(test_execution, testdir):
test_execution['gherkin'] = testdir.runpytest('--gherkin-terminal-reporter', '--showlocals')
test_execution["gherkin"] = testdir.runpytest("--gherkin-terminal-reporter", "--showlocals")
@then('error traceback contains local variable descriptions')
@then("error traceback contains local variable descriptions")
def error_traceback_contains_local_variable_descriptions(test_execution):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret
ghe.stdout.fnmatch_lines('''request*=*<FixtureRequest for *''')
ghe.stdout.fnmatch_lines('''local_var*=*''')
ghe.stdout.fnmatch_lines("""request*=*<FixtureRequest for *""")
ghe.stdout.fnmatch_lines("""local_var*=*""")
@then("output must contain parameters values")
def output_output_must_contain_parameters_values(test_execution, gherkin_scenario_outline):
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
assert ghe.ret == 0
ghe.stdout.fnmatch_lines('*Scenario: Scenario example 2')
ghe.stdout.fnmatch_lines('*Given there are {start} cucumbers'.format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines('*When I eat {eat} cucumbers'.format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines('*Then I should have {left} cucumbers'.format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines('*PASSED')
ghe.stdout.fnmatch_lines("*Scenario: Scenario example 2")
ghe.stdout.fnmatch_lines("*Given there are {start} cucumbers".format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines("*When I eat {eat} cucumbers".format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines("*Then I should have {left} cucumbers".format(**gherkin_scenario_outline))
ghe.stdout.fnmatch_lines("*PASSED")
@pytest.mark.parametrize(
'feature_file, py_file, name', [
('./steps/unicode.feature', './steps/test_unicode.py', 'test_steps_in_feature_file_have_unicode')
]
"feature_file, py_file, name",
[("./steps/unicode.feature", "./steps/test_unicode.py", "test_steps_in_feature_file_have_unicode")],
)
def test_scenario_in_expanded_mode(testdir, test_execution, feature_file, py_file, name):
prepare_feature_and_py_files(testdir, feature_file, py_file)
test_execution['gherkin'] = testdir.runpytest(
'-k %s' % name,
'--gherkin-terminal-reporter',
'--gherkin-terminal-reporter-expanded',
'-vv',
test_execution["gherkin"] = testdir.runpytest(
"-k %s" % name, "--gherkin-terminal-reporter", "--gherkin-terminal-reporter-expanded", "-vv"
)
ghe = test_execution['gherkin']
ghe = test_execution["gherkin"]
ghe.assert_outcomes(passed=1)

View File

@ -3,18 +3,15 @@ import textwrap
import pytest
from pytest_bdd import (
exceptions,
given,
parsers,
scenario,
then,
)
from pytest_bdd import exceptions, given, parsers, scenario, then
@pytest.mark.parametrize(["feature_text", "expected_text"], [
(
textwrap.dedent("""
@pytest.mark.parametrize(
["feature_text", "expected_text"],
[
(
textwrap.dedent(
"""
Scenario: Multiline step using sub indentation
Given I have a step with:
Some
@ -22,16 +19,20 @@ from pytest_bdd import (
Extra
Lines
Then the text should be parsed with correct indentation
"""),
textwrap.dedent("""
"""
),
textwrap.dedent(
"""
Some
Extra
Lines
""")[1: -1]
),
(
textwrap.dedent("""
"""
)[1:-1],
),
(
textwrap.dedent(
"""
Scenario: Multiline step using sub indentation
Given I have a step with:
Some
@ -40,16 +41,20 @@ from pytest_bdd import (
Lines
Then the text should be parsed with correct indentation
"""),
textwrap.dedent("""
"""
),
textwrap.dedent(
"""
Some
Extra
Lines
""")[1:-1]
),
(
textwrap.dedent("""
"""
)[1:-1],
),
(
textwrap.dedent(
"""
Feature:
Scenario: Multiline step using sub indentation
Given I have a step with:
@ -57,42 +62,46 @@ from pytest_bdd import (
Extra
Lines
"""),
textwrap.dedent("""
"""
),
textwrap.dedent(
"""
Some
Extra
Lines
""")[1:-1]
),
])
"""
)[1:-1],
),
],
)
def test_multiline(request, tmpdir, feature_text, expected_text):
file_name = tmpdir.join('test.feature')
with file_name.open('w') as fd:
file_name = tmpdir.join("test.feature")
with file_name.open("w") as fd:
fd.write(feature_text)
@scenario(file_name.strpath, 'Multiline step using sub indentation')
@scenario(file_name.strpath, "Multiline step using sub indentation")
def test_multiline(request):
assert request.getfixturevalue('i_have_text') == expected_text
assert request.getfixturevalue("i_have_text") == expected_text
test_multiline(request)
@given(parsers.parse('I have a step with:\n{text}'))
@given(parsers.parse("I have a step with:\n{text}"))
def i_have_text(text):
return text
@then('the text should be parsed with correct indentation')
@then("the text should be parsed with correct indentation")
def text_should_be_correct(i_have_text, text, expected_text):
assert i_have_text == text == expected_text
def test_multiline_wrong_indent(request):
"""Multiline step using sub indentation wrong indent."""
@scenario(
'multiline.feature',
'Multiline step using sub indentation wrong indent',
)
@scenario("multiline.feature", "Multiline step using sub indentation wrong indent")
def test_multiline():
pass
with pytest.raises(exceptions.StepDefinitionNotFoundError):
test_multiline(request)

View File

@ -5,43 +5,55 @@ import textwrap
def test_no_scenarios(testdir):
"""Test no scenarios defined in the feature file."""
features = testdir.mkdir('features')
features.join('test.feature').write_text(textwrap.dedent(u"""
features = testdir.mkdir("features")
features.join("test.feature").write_text(
textwrap.dedent(
u"""
Given foo
When bar
Then baz
"""), 'utf-8', ensure=True)
testdir.makepyfile(textwrap.dedent("""
"""
),
"utf-8",
ensure=True,
)
testdir.makepyfile(
textwrap.dedent(
"""
from pytest_bdd import scenarios
scenarios('features')
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
'*FeatureError: Step definition outside of a Scenario or a Background.*',
],
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*FeatureError: Step definition outside of a Scenario or a Background.*"])
def test_only_background_strict_mode(testdir):
"""Test only wrong background defined in the feature file."""
features = testdir.mkdir('features')
features.join('test.feature').write_text(textwrap.dedent(u"""
features = testdir.mkdir("features")
features.join("test.feature").write_text(
textwrap.dedent(
u"""
Background:
Given foo
When bar
"""), 'utf-8', ensure=True)
testdir.makepyfile(textwrap.dedent("""
"""
),
"utf-8",
ensure=True,
)
testdir.makepyfile(
textwrap.dedent(
"""
from pytest_bdd import scenarios
scenarios('features')
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
'*FeatureError: Background section can only contain Given steps.*',
],
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*FeatureError: Background section can only contain Given steps.*"])

View File

@ -5,7 +5,9 @@ def test_background_no_strict_gherkin(testdir):
"""Test background no strict gherkin."""
prepare_test_dir(testdir)
testdir.makefile('.feature', no_strict_gherkin_background="""
testdir.makefile(
".feature",
no_strict_gherkin_background="""
Feature: No strict Gherkin Background support
Background:
@ -15,9 +17,10 @@ def test_background_no_strict_gherkin(testdir):
Scenario: Test background
""")
""",
)
result = testdir.runpytest('-k', 'test_background_ok')
result = testdir.runpytest("-k", "test_background_ok")
result.assert_outcomes(passed=1)
@ -25,7 +28,9 @@ def test_scenario_no_strict_gherkin(testdir):
"""Test scenario no strict gherkin."""
prepare_test_dir(testdir)
testdir.makefile('.feature', no_strict_gherkin_scenario="""
testdir.makefile(
".feature",
no_strict_gherkin_scenario="""
Feature: No strict Gherkin Scenario support
Scenario: Test scenario
@ -33,21 +38,24 @@ def test_scenario_no_strict_gherkin(testdir):
And foo is not boolean
And foo has not a value "baz"
""")
""",
)
result = testdir.runpytest('-k', 'test_scenario_ok')
result = testdir.runpytest("-k", "test_scenario_ok")
result.assert_outcomes(passed=1)
def prepare_test_dir(testdir):
"""Test scenario no strict gherkin."""
testdir.makeini("""
testdir.makeini(
"""
[pytest]
bdd_strict_gherkin=false
"""
)
)
testdir.makepyfile(test_gherkin="""
testdir.makepyfile(
test_gherkin="""
import pytest
from pytest_bdd import (
@ -93,4 +101,5 @@ def prepare_test_dir(testdir):
@when('foo has not a value "baz"')
def has_not_baz(foo):
assert "baz" not in foo
""")
"""
)

View File

@ -9,43 +9,36 @@ from pytest_bdd import exceptions
from pytest_bdd.utils import get_parametrize_markers_args
@scenario(
'outline.feature',
'Outlined given, when, thens',
example_converters=dict(start=int, eat=float, left=str)
)
@scenario("outline.feature", "Outlined given, when, thens", example_converters=dict(start=int, eat=float, left=str))
def test_outlined(request):
assert get_parametrize_markers_args(request.node) == (
[u'start', u'eat', u'left'], [[12, 5.0, '7'], [5, 4.0, '1']])
assert get_parametrize_markers_args(request.node) == ([u"start", u"eat", u"left"], [[12, 5.0, "7"], [5, 4.0, "1"]])
@given('there are <start> cucumbers')
@given("there are <start> cucumbers")
def start_cucumbers(start):
assert isinstance(start, int)
return dict(start=start)
@when('I eat <eat> cucumbers')
@when("I eat <eat> cucumbers")
def eat_cucumbers(start_cucumbers, eat):
assert isinstance(eat, float)
start_cucumbers['eat'] = eat
start_cucumbers["eat"] = eat
@then('I should have <left> cucumbers')
@then("I should have <left> cucumbers")
def should_have_left_cucumbers(start_cucumbers, start, eat, left):
assert isinstance(left, str)
assert start - eat == int(left)
assert start_cucumbers['start'] == start
assert start_cucumbers['eat'] == eat
assert start_cucumbers["start"] == start
assert start_cucumbers["eat"] == eat
def test_wrongly_outlined(request):
"""Test parametrized scenario when the test function lacks parameters."""
with pytest.raises(exceptions.ScenarioExamplesNotValidError) as exc:
@scenario(
'outline.feature',
'Outlined with wrong examples',
)
@scenario("outline.feature", "Outlined with wrong examples")
def wrongly_outlined():
pass
@ -53,15 +46,17 @@ def test_wrongly_outlined(request):
r"""Scenario \"Outlined with wrong examples\" in the feature \"(.+)\" has not valid examples\. """
r"""Set of step parameters (.+) should match set of example values """
r"""(.+)\.""",
exc.value.args[0]
exc.value.args[0],
)
def test_wrong_vertical_examples_scenario(testdir):
"""Test parametrized scenario vertical example table has wrong format."""
features = testdir.mkdir('features')
feature = features.join('test.feature')
feature.write_text(textwrap.dedent(u"""
features = testdir.mkdir("features")
feature = features.join("test.feature")
feature.write_text(
textwrap.dedent(
u"""
Scenario Outline: Outlined with wrong vertical example table
Given there are <start> cucumbers
When I eat <eat> cucumbers
@ -71,25 +66,30 @@ def test_wrong_vertical_examples_scenario(testdir):
| start | 12 | 2 |
| start | 10 | 1 |
| left | 7 | 1 |
"""), 'utf-8', ensure=True)
"""
),
"utf-8",
ensure=True,
)
with pytest.raises(exceptions.FeatureError) as exc:
@scenario(
feature.strpath,
'Outlined with wrong vertical example table',
)
@scenario(feature.strpath, "Outlined with wrong vertical example table")
def wrongly_outlined():
pass
assert exc.value.args[0] == (
'Scenario has not valid examples. Example rows should contain unique parameters.'
' "start" appeared more than once')
"Scenario has not valid examples. Example rows should contain unique parameters."
' "start" appeared more than once'
)
def test_wrong_vertical_examples_feature(testdir):
"""Test parametrized feature vertical example table has wrong format."""
features = testdir.mkdir('features')
feature = features.join('test.feature')
feature.write_text(textwrap.dedent(u"""
features = testdir.mkdir("features")
feature = features.join("test.feature")
feature.write_text(
textwrap.dedent(
u"""
Feature: Outlines
Examples: Vertical
@ -101,18 +101,21 @@ def test_wrong_vertical_examples_feature(testdir):
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
"""), 'utf-8', ensure=True)
"""
),
"utf-8",
ensure=True,
)
with pytest.raises(exceptions.FeatureError) as exc:
@scenario(
feature.strpath,
'Outlined with wrong vertical example table',
)
@scenario(feature.strpath, "Outlined with wrong vertical example table")
def wrongly_outlined():
pass
assert exc.value.args[0] == (
'Feature has not valid examples. Example rows should contain unique parameters.'
' "start" appeared more than once')
"Feature has not valid examples. Example rows should contain unique parameters."
' "start" appeared more than once'
)
@pytest.fixture(params=[1, 2, 3])
@ -120,55 +123,46 @@ def other_fixture(request):
return request.param
@scenario(
'outline.feature',
'Outlined given, when, thens',
example_converters=dict(start=int, eat=float, left=str)
)
@scenario("outline.feature", "Outlined given, when, thens", example_converters=dict(start=int, eat=float, left=str))
def test_outlined_with_other_fixtures(other_fixture):
"""Test outlined scenario also using other parametrized fixture."""
@scenario(
'outline.feature',
'Outlined with vertical example table',
example_converters=dict(start=int, eat=float, left=str)
"outline.feature", "Outlined with vertical example table", example_converters=dict(start=int, eat=float, left=str)
)
def test_vertical_example(request):
"""Test outlined scenario with vertical examples table."""
assert get_parametrize_markers_args(request.node) == (
[u'start', u'eat', u'left'], [[12, 5.0, '7'], [2, 1.0, '1']])
assert get_parametrize_markers_args(request.node) == ([u"start", u"eat", u"left"], [[12, 5.0, "7"], [2, 1.0, "1"]])
@given('there are <start> <fruits>')
@given("there are <start> <fruits>")
def start_fruits(start, fruits):
assert isinstance(start, int)
return {fruits: dict(start=start)}
@when('I eat <eat> <fruits>')
@when("I eat <eat> <fruits>")
def eat_fruits(start_fruits, eat, fruits):
assert isinstance(eat, float)
start_fruits[fruits]['eat'] = eat
start_fruits[fruits]["eat"] = eat
@then('I should have <left> <fruits>')
@then("I should have <left> <fruits>")
def should_have_left_fruits(start_fruits, start, eat, left, fruits):
assert isinstance(left, str)
assert start - eat == int(left)
assert start_fruits[fruits]['start'] == start
assert start_fruits[fruits]['eat'] == eat
assert start_fruits[fruits]["start"] == start
assert start_fruits[fruits]["eat"] == eat
@scenario(
'outline_feature.feature',
'Outlined given, when, thens',
example_converters=dict(start=int, eat=float, left=str)
"outline_feature.feature", "Outlined given, when, thens", example_converters=dict(start=int, eat=float, left=str)
)
def test_outlined_feature(request):
assert get_parametrize_markers_args(request.node) == (
['start', 'eat', 'left'],
[[12, 5.0, '7'], [5, 4.0, '1']],
['fruits'],
[[u'oranges'], [u'apples']]
["start", "eat", "left"],
[[12, 5.0, "7"], [5, 4.0, "1"]],
["fruits"],
[[u"oranges"], [u"apples"]],
)

View File

@ -3,34 +3,26 @@ from pytest_bdd import given, scenario, then, when
from pytest_bdd.utils import get_parametrize_markers_args
@given('there are <start> cucumbers')
@given("there are <start> cucumbers")
def start_cucumbers(start):
pass
@when('I eat <eat> cucumbers')
@when("I eat <eat> cucumbers")
def eat_cucumbers(eat):
pass
@then('I should have <left> cucumbers')
@then("I should have <left> cucumbers")
def should_have_left_cucumbers(left):
pass
@scenario(
'outline.feature',
'Outlined with empty example values',
)
@scenario("outline.feature", "Outlined with empty example values")
def test_scenario_with_empty_example_values(request):
assert get_parametrize_markers_args(request.node) == (
[u'start', u'eat', u'left'], [['#', '', '']])
assert get_parametrize_markers_args(request.node) == ([u"start", u"eat", u"left"], [["#", "", ""]])
@scenario(
'outline.feature',
'Outlined with empty example values vertical',
)
@scenario("outline.feature", "Outlined with empty example values vertical")
def test_scenario_with_empty_example_values_vertical(request):
assert get_parametrize_markers_args(request.node) == (
[u'start', u'eat', u'left'], [['#', '', '']])
assert get_parametrize_markers_args(request.node) == ([u"start", u"eat", u"left"], [["#", "", ""]])

View File

@ -3,45 +3,35 @@ import pytest
from pytest_bdd import given, when, then, scenario
@pytest.mark.parametrize(
['start', 'eat', 'left'],
[(12, 5, 7)])
@scenario(
'parametrized.feature',
'Parametrized given, when, thens',
)
@pytest.mark.parametrize(["start", "eat", "left"], [(12, 5, 7)])
@scenario("parametrized.feature", "Parametrized given, when, thens")
def test_parametrized(request, start, eat, left):
"""Test parametrized scenario."""
@pytest.fixture(params=[1, 2])
def foo_bar(request):
return 'bar' * request.param
return "bar" * request.param
@pytest.mark.parametrize(
['start', 'eat', 'left'],
[(12, 5, 7)])
@scenario(
'parametrized.feature',
'Parametrized given, when, thens',
)
@pytest.mark.parametrize(["start", "eat", "left"], [(12, 5, 7)])
@scenario("parametrized.feature", "Parametrized given, when, thens")
def test_parametrized_with_other_fixtures(request, start, eat, left, foo_bar):
"""Test parametrized scenario, but also with other parametrized fixtures."""
@given('there are <start> cucumbers')
@given("there are <start> cucumbers")
def start_cucumbers(start):
return dict(start=start)
@when('I eat <eat> cucumbers')
@when("I eat <eat> cucumbers")
def eat_cucumbers(start_cucumbers, start, eat):
start_cucumbers['eat'] = eat
start_cucumbers["eat"] = eat
@then('I should have <left> cucumbers')
@then("I should have <left> cucumbers")
def should_have_left_cucumbers(start_cucumbers, start, eat, left):
assert start - eat == left
assert start_cucumbers['start'] == start
assert start_cucumbers['eat'] == eat
assert start_cucumbers["start"] == start
assert start_cucumbers["eat"] == eat

View File

@ -18,19 +18,27 @@ class equals_any(object):
return 0 if (isinstance(other, self.type) if self.type else False) else -1
string = type(u'')
string = type(u"")
def test_step_trace(testdir):
"""Test step trace."""
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers =
feature-tag
scenario-passing-tag
scenario-failing-tag
"""))
feature = testdir.makefile('.feature', test=textwrap.dedent("""
"""
),
)
feature = testdir.makefile(
".feature",
test=textwrap.dedent(
"""
@feature-tag
Feature: One passing scenario, one failing scenario
@ -53,9 +61,13 @@ def test_step_trace(testdir):
| start | eat | left |
| 12 | 5 | 7 |
| 5 | 4 | 1 |
"""))
"""
),
)
relpath = feature.relto(testdir.tmpdir.dirname)
testdir.makepyfile(textwrap.dedent("""
testdir.makepyfile(
textwrap.dedent(
"""
import pytest
from pytest_bdd import given, when, then, scenarios
@ -91,140 +103,193 @@ def test_step_trace(testdir):
assert start_cucumbers['eat'] == eat
scenarios('test.feature', example_converters=dict(start=int, eat=float, left=str))
"""))
result = testdir.inline_run('-vvl')
"""
)
)
result = testdir.inline_run("-vvl")
assert result.ret
report = result.matchreport('test_passing', when='call').scenario
expected = {'feature': {'description': u'',
'filename': feature.strpath,
'line_number': 2,
'name': u'One passing scenario, one failing scenario',
'rel_filename': relpath,
'tags': [u'feature-tag']},
'line_number': 5,
'name': u'Passing',
'steps': [{'duration': equals_any(float),
'failed': False,
'keyword': 'Given',
'line_number': 6,
'name': u'a passing step',
'type': 'given'},
{'duration': equals_any(float),
'failed': False,
'keyword': 'And',
'line_number': 7,
'name': u'some other passing step',
'type': 'given'}],
'tags': [u'scenario-passing-tag'],
'examples': [],
'example_kwargs': {}}
report = result.matchreport("test_passing", when="call").scenario
expected = {
"feature": {
"description": u"",
"filename": feature.strpath,
"line_number": 2,
"name": u"One passing scenario, one failing scenario",
"rel_filename": relpath,
"tags": [u"feature-tag"],
},
"line_number": 5,
"name": u"Passing",
"steps": [
{
"duration": equals_any(float),
"failed": False,
"keyword": "Given",
"line_number": 6,
"name": u"a passing step",
"type": "given",
},
{
"duration": equals_any(float),
"failed": False,
"keyword": "And",
"line_number": 7,
"name": u"some other passing step",
"type": "given",
},
],
"tags": [u"scenario-passing-tag"],
"examples": [],
"example_kwargs": {},
}
assert report == expected
report = result.matchreport('test_failing', when='call').scenario
expected = {'feature': {'description': u'',
'filename': feature.strpath,
'line_number': 2,
'name': u'One passing scenario, one failing scenario',
'rel_filename': relpath,
'tags': [u'feature-tag']},
'line_number': 10,
'name': u'Failing',
'steps': [{'duration': equals_any(float),
'failed': False,
'keyword': 'Given',
'line_number': 11,
'name': u'a passing step',
'type': 'given'},
{'duration': equals_any(float),
'failed': True,
'keyword': 'And',
'line_number': 12,
'name': u'a failing step',
'type': 'given'}],
'tags': [u'scenario-failing-tag'],
'examples': [],
'example_kwargs': {}}
report = result.matchreport("test_failing", when="call").scenario
expected = {
"feature": {
"description": u"",
"filename": feature.strpath,
"line_number": 2,
"name": u"One passing scenario, one failing scenario",
"rel_filename": relpath,
"tags": [u"feature-tag"],
},
"line_number": 10,
"name": u"Failing",
"steps": [
{
"duration": equals_any(float),
"failed": False,
"keyword": "Given",
"line_number": 11,
"name": u"a passing step",
"type": "given",
},
{
"duration": equals_any(float),
"failed": True,
"keyword": "And",
"line_number": 12,
"name": u"a failing step",
"type": "given",
},
],
"tags": [u"scenario-failing-tag"],
"examples": [],
"example_kwargs": {},
}
assert report == expected
report = result.matchreport('test_outlined[12-5.0-7]', when='call').scenario
expected = {'feature': {'description': u'',
'filename': feature.strpath,
'line_number': 2,
'name': u'One passing scenario, one failing scenario',
'rel_filename': relpath,
'tags': [u'feature-tag']},
'line_number': 14,
'name': u'Outlined',
'steps': [{'duration': equals_any(float),
'failed': False,
'keyword': 'Given',
'line_number': 15,
'name': u'there are <start> cucumbers',
'type': 'given'},
{'duration': equals_any(float),
'failed': False,
'keyword': 'When',
'line_number': 16,
'name': u'I eat <eat> cucumbers',
'type': 'when'},
{'duration': equals_any(float),
'failed': False,
'keyword': 'Then',
'line_number': 17,
'name': u'I should have <left> cucumbers',
'type': 'then'}],
'tags': [],
'examples': [{'line_number': 19,
'name': None,
'row_index': 0,
'rows': [['start', 'eat', 'left'],
[[12, 5.0, '7'], [5, 4.0, '1']]]}],
'example_kwargs': {'eat': '5.0', 'left': '7', 'start': '12'},
}
report = result.matchreport("test_outlined[12-5.0-7]", when="call").scenario
expected = {
"feature": {
"description": u"",
"filename": feature.strpath,
"line_number": 2,
"name": u"One passing scenario, one failing scenario",
"rel_filename": relpath,
"tags": [u"feature-tag"],
},
"line_number": 14,
"name": u"Outlined",
"steps": [
{
"duration": equals_any(float),
"failed": False,
"keyword": "Given",
"line_number": 15,
"name": u"there are <start> cucumbers",
"type": "given",
},
{
"duration": equals_any(float),
"failed": False,
"keyword": "When",
"line_number": 16,
"name": u"I eat <eat> cucumbers",
"type": "when",
},
{
"duration": equals_any(float),
"failed": False,
"keyword": "Then",
"line_number": 17,
"name": u"I should have <left> cucumbers",
"type": "then",
},
],
"tags": [],
"examples": [
{
"line_number": 19,
"name": None,
"row_index": 0,
"rows": [["start", "eat", "left"], [[12, 5.0, "7"], [5, 4.0, "1"]]],
}
],
"example_kwargs": {"eat": "5.0", "left": "7", "start": "12"},
}
assert report == expected
report = result.matchreport('test_outlined[5-4.0-1]', when='call').scenario
expected = {'feature': {'description': u'',
'filename': feature.strpath,
'line_number': 2,
'name': u'One passing scenario, one failing scenario',
'rel_filename': relpath,
'tags': [u'feature-tag']},
'line_number': 14,
'name': u'Outlined',
'steps': [{'duration': equals_any(float),
'failed': False,
'keyword': 'Given',
'line_number': 15,
'name': u'there are <start> cucumbers',
'type': 'given'},
{'duration': equals_any(float),
'failed': False,
'keyword': 'When',
'line_number': 16,
'name': u'I eat <eat> cucumbers',
'type': 'when'},
{'duration': equals_any(float),
'failed': False,
'keyword': 'Then',
'line_number': 17,
'name': u'I should have <left> cucumbers',
'type': 'then'}],
'tags': [],
'examples': [{'line_number': 19,
'name': None,
'row_index': 1,
'rows': [['start', 'eat', 'left'],
[[12, 5.0, '7'], [5, 4.0, '1']]]}],
'example_kwargs': {'eat': '4.0', 'left': '1', 'start': '5'},
}
report = result.matchreport("test_outlined[5-4.0-1]", when="call").scenario
expected = {
"feature": {
"description": u"",
"filename": feature.strpath,
"line_number": 2,
"name": u"One passing scenario, one failing scenario",
"rel_filename": relpath,
"tags": [u"feature-tag"],
},
"line_number": 14,
"name": u"Outlined",
"steps": [
{
"duration": equals_any(float),
"failed": False,
"keyword": "Given",
"line_number": 15,
"name": u"there are <start> cucumbers",
"type": "given",
},
{
"duration": equals_any(float),
"failed": False,
"keyword": "When",
"line_number": 16,
"name": u"I eat <eat> cucumbers",
"type": "when",
},
{
"duration": equals_any(float),
"failed": False,
"keyword": "Then",
"line_number": 17,
"name": u"I should have <left> cucumbers",
"type": "then",
},
],
"tags": [],
"examples": [
{
"line_number": 19,
"name": None,
"row_index": 1,
"rows": [["start", "eat", "left"], [[12, 5.0, "7"], [5, 4.0, "1"]]],
}
],
"example_kwargs": {"eat": "4.0", "left": "1", "start": "5"},
}
assert report == expected
def test_complex_types(testdir):
"""Test serialization of the complex types."""
testdir.makefile('.feature', test=textwrap.dedent("""
testdir.makefile(
".feature",
test=textwrap.dedent(
"""
Feature: Report serialization containing parameters of complex types
Scenario: Complex
@ -233,8 +298,12 @@ def test_complex_types(testdir):
Examples:
| point |
| 10,20 |
"""))
testdir.makepyfile(textwrap.dedent("""
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""
import pytest
from pytest_bdd import given, when, then, scenario
@ -262,8 +331,10 @@ def test_complex_types(testdir):
def test_complex(alien):
pass
"""))
result = testdir.inline_run('-vvl')
report = result.matchreport('test_complex[point0-alien0]', when='call')
"""
)
)
result = testdir.inline_run("-vvl")
report = result.matchreport("test_complex[point0-alien0]", when="call")
assert execnet.gateway_base.dumps(report.item)
assert execnet.gateway_base.dumps(report.scenario)

View File

@ -3,33 +3,30 @@ from pytest_bdd.steps import when
from pytest_bdd import given, then, scenario
@scenario(
'reuse.feature',
'Given and when using the same fixture should not evaluate it twice',
)
@scenario("reuse.feature", "Given and when using the same fixture should not evaluate it twice")
def test_reuse():
pass
@given('I have an empty list')
@given("I have an empty list")
def empty_list():
return []
@given('I have a fixture (appends 1 to a list)')
@given("I have a fixture (appends 1 to a list)")
def appends_1(empty_list):
empty_list.append(1)
return empty_list
given('I have a fixture (appends 1 to a list) in reuse syntax', fixture='appends_1')
given("I have a fixture (appends 1 to a list) in reuse syntax", fixture="appends_1")
@when('I use this fixture')
@when("I use this fixture")
def use_fixture(appends_1):
pass
@then('my list should be [1]')
@then("my list should be [1]")
def list_should_be_1(appends_1):
assert appends_1 == [1]

View File

@ -1,16 +1,13 @@
"""Function name same as step name."""
from pytest_bdd import (
scenario,
when,
)
from pytest_bdd import scenario, when
@scenario('same_function_name.feature', 'When function name same as step name')
@scenario("same_function_name.feature", "When function name same as step name")
def test_when_function_name_same_as_step_name():
pass
@when('something')
@when("something")
def something():
return 'something'
return "something"

View File

@ -4,52 +4,40 @@ import re
import six
from pytest_bdd import (
scenario,
given,
then,
parsers,
exceptions,
)
from pytest_bdd import scenario, given, then, parsers, exceptions
def test_scenario_not_found(request):
"""Test the situation when scenario is not found."""
with pytest.raises(exceptions.ScenarioNotFound) as exc_info:
scenario(
'not_found.feature',
'NOT FOUND'
)
scenario("not_found.feature", "NOT FOUND")
assert six.text_type(exc_info.value).startswith(
'Scenario "NOT FOUND" in feature "[Empty]" in {feature_path}'
.format(feature_path=request.fspath.join('..', 'not_found.feature')))
'Scenario "NOT FOUND" in feature "[Empty]" in {feature_path}'.format(
feature_path=request.fspath.join("..", "not_found.feature")
)
)
@given('comments should be at the start of words')
@given("comments should be at the start of words")
def comments():
"""Comments."""
pass
@then(parsers.parse('this is not {acomment}'))
@then(parsers.parse("this is not {acomment}"))
def a_comment(acomment):
"""A comment."""
assert re.search('a.*comment', acomment)
assert re.search("a.*comment", acomment)
def test_scenario_comments(request):
"""Test comments inside scenario."""
@scenario(
'comments.feature',
'Comments'
)
@scenario("comments.feature", "Comments")
def test():
pass
@scenario(
'comments.feature',
'Strings that are not comments'
)
@scenario("comments.feature", "Strings that are not comments")
def test2():
pass
@ -59,15 +47,20 @@ def test_scenario_comments(request):
def test_scenario_not_decorator(testdir):
"""Test scenario function is used not as decorator."""
testdir.makefile('.feature', foo="""
testdir.makefile(
".feature",
foo="""
Scenario: Foo
Given I have a bar
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
from pytest_bdd import scenario
test_foo = scenario('foo.feature', 'Foo')
""")
"""
)
result = testdir.runpytest()

View File

@ -4,11 +4,14 @@ import textwrap
def test_scenarios(testdir):
"""Test scenarios shortcut."""
testdir.makeini("""
testdir.makeini(
"""
[pytest]
console_output_style=classic
""")
testdir.makeconftest("""
"""
)
testdir.makeconftest(
"""
import pytest
from pytest_bdd import given
@ -16,13 +19,22 @@ def test_scenarios(testdir):
def i_have_bar():
print('bar!')
return 'bar'
""")
features = testdir.mkdir('features')
features.join('test.feature').write_text(textwrap.dedent(u"""
"""
)
features = testdir.mkdir("features")
features.join("test.feature").write_text(
textwrap.dedent(
u"""
Scenario: Test scenario
Given I have a bar
"""), 'utf-8', ensure=True)
features.join('subfolder', 'test.feature').write_text(textwrap.dedent(u"""
"""
),
"utf-8",
ensure=True,
)
features.join("subfolder", "test.feature").write_text(
textwrap.dedent(
u"""
Scenario: Test subfolder scenario
Given I have a bar
@ -34,8 +46,13 @@ def test_scenarios(testdir):
Scenario: Test scenario
Given I have a bar
"""), 'utf-8', ensure=True)
testdir.makepyfile("""
"""
),
"utf-8",
ensure=True,
)
testdir.makepyfile(
"""
import pytest
from pytest_bdd import scenarios, scenario
@ -44,24 +61,27 @@ def test_scenarios(testdir):
pass
scenarios('features')
""")
result = testdir.runpytest('-v', '-s')
result.stdout.fnmatch_lines(['*collected 5 items'])
result.stdout.fnmatch_lines(['*test_test_subfolder_scenario *bar!', 'PASSED'])
result.stdout.fnmatch_lines(['*test_test_scenario *bar!', 'PASSED'])
result.stdout.fnmatch_lines(['*test_test_failing_subfolder_scenario *FAILED'])
result.stdout.fnmatch_lines(['*test_already_bound *bar!', 'PASSED'])
result.stdout.fnmatch_lines(['*test_test_scenario_1 *bar!', 'PASSED'])
"""
)
result = testdir.runpytest("-v", "-s")
result.stdout.fnmatch_lines(["*collected 5 items"])
result.stdout.fnmatch_lines(["*test_test_subfolder_scenario *bar!", "PASSED"])
result.stdout.fnmatch_lines(["*test_test_scenario *bar!", "PASSED"])
result.stdout.fnmatch_lines(["*test_test_failing_subfolder_scenario *FAILED"])
result.stdout.fnmatch_lines(["*test_already_bound *bar!", "PASSED"])
result.stdout.fnmatch_lines(["*test_test_scenario_1 *bar!", "PASSED"])
def test_scenarios_none_found(testdir):
"""Test scenarios shortcut when no scenarios found."""
testpath = testdir.makepyfile("""
testpath = testdir.makepyfile(
"""
import pytest
from pytest_bdd import scenarios
scenarios('.')
""")
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(failed=1)
assert 'NoScenariosFound' in str(reprec.getreports()[1].longrepr)
assert "NoScenariosFound" in str(reprec.getreports()[1].longrepr)

View File

@ -4,83 +4,81 @@ from pytest_bdd import scenario, given, when, then
from pytest_bdd import exceptions
@scenario('steps.feature', 'Executed step by step')
@scenario("steps.feature", "Executed step by step")
def test_steps():
pass
@given('I have a foo fixture with value "foo"')
def foo():
return 'foo'
return "foo"
@given('there is a list')
@given("there is a list")
def results():
return []
@when('I append 1 to the list')
@when("I append 1 to the list")
def append_1(results):
results.append(1)
@when('I append 2 to the list')
@when("I append 2 to the list")
def append_2(results):
results.append(2)
@when('I append 3 to the list')
@when("I append 3 to the list")
def append_3(results):
results.append(3)
@then('foo should have value "foo"')
def foo_is_foo(foo):
assert foo == 'foo'
assert foo == "foo"
@then('the list should be [1, 2, 3]')
@then("the list should be [1, 2, 3]")
def check_results(results):
assert results == [1, 2, 3]
@scenario('steps.feature', 'When step can be the first')
@scenario("steps.feature", "When step can be the first")
def test_when_first():
pass
@when('I do nothing')
@when("I do nothing")
def do_nothing():
pass
@then('I make no mistakes')
@then("I make no mistakes")
def no_errors():
assert True
@scenario('steps.feature', 'Then step can follow Given step')
@scenario("steps.feature", "Then step can follow Given step")
def test_then_after_given():
pass
@given('xyz')
@given("xyz")
def xyz():
"""Used in the test_same_step_name."""
return
@scenario('steps.feature', 'All steps are declared in the conftest')
@scenario("steps.feature", "All steps are declared in the conftest")
def test_conftest():
pass
def test_multiple_given(request):
"""Using the same given fixture raises an error."""
@scenario(
'steps.feature',
'Using the same given fixture raises an error',
)
@scenario("steps.feature", "Using the same given fixture raises an error")
def test():
pass
@ -90,7 +88,9 @@ def test_multiple_given(request):
def test_step_hooks(testdir):
"""When step fails."""
testdir.makefile(".feature", test="""
testdir.makefile(
".feature",
test="""
Scenario: When step has hook on failure
Given I have a bar
When it fails
@ -105,8 +105,10 @@ def test_step_hooks(testdir):
Scenario: When step validation error happens
Given foo
And foo
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
import pytest
from pytest_bdd import given, when, scenario
@ -149,7 +151,8 @@ def test_step_hooks(testdir):
@scenario('test.feature', 'When step validation error happens')
def test_when_step_validation_error():
pass
""")
"""
)
reprec = testdir.inline_run("-k test_when_fails")
assert reprec.ret == 1
@ -180,7 +183,7 @@ def test_step_hooks(testdir):
reprec = testdir.inline_run("-k test_when_step_validation_error")
assert reprec.ret == 1
reprec = testdir.inline_run("-k test_when_dependency_fails", '-vv')
reprec = testdir.inline_run("-k test_when_dependency_fails", "-vv")
assert reprec.ret == 1
calls = reprec.getcalls("pytest_bdd_before_step")
@ -195,12 +198,16 @@ def test_step_hooks(testdir):
def test_step_trace(testdir):
"""Test step trace."""
testdir.makeini("""
testdir.makeini(
"""
[pytest]
console_output_style=classic
""")
"""
)
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Scenario: When step has failure
Given I have a bar
When it fails
@ -211,8 +218,10 @@ def test_step_trace(testdir):
Scenario: When step validation error happens
Given foo
And foo
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
import pytest
from pytest_bdd import given, when, scenario
@ -243,23 +252,24 @@ def test_step_trace(testdir):
@scenario('test.feature', 'When step validation error happens')
def test_when_step_validation_error():
pass
""")
result = testdir.runpytest('-k test_when_fails_inline', '-vv')
"""
)
result = testdir.runpytest("-k test_when_fails_inline", "-vv")
assert result.ret == 1
result.stdout.fnmatch_lines(['*test_when_fails_inline*FAILED'])
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_when_fails_inline*FAILED"])
assert "INTERNALERROR" not in result.stdout.str()
result = testdir.runpytest('-k test_when_fails_decorated', '-vv')
result = testdir.runpytest("-k test_when_fails_decorated", "-vv")
assert result.ret == 1
result.stdout.fnmatch_lines(['*test_when_fails_decorated*FAILED'])
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_when_fails_decorated*FAILED"])
assert "INTERNALERROR" not in result.stdout.str()
result = testdir.runpytest('-k test_when_not_found', '-vv')
result = testdir.runpytest("-k test_when_not_found", "-vv")
assert result.ret == 1
result.stdout.fnmatch_lines(['*test_when_not_found*FAILED'])
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_when_not_found*FAILED"])
assert "INTERNALERROR" not in result.stdout.str()
result = testdir.runpytest('-k test_when_step_validation_error', '-vv')
result = testdir.runpytest("-k test_when_step_validation_error", "-vv")
assert result.ret == 1
result.stdout.fnmatch_lines(['*test_when_step_validation_error*FAILED'])
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_when_step_validation_error*FAILED"])
assert "INTERNALERROR" not in result.stdout.str()

View File

@ -8,7 +8,10 @@ from pytest_bdd import feature
def test_tags_selector(testdir):
"""Test tests selection by tags."""
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers =
feature_tag_1
@ -17,8 +20,12 @@ def test_tags_selector(testdir):
scenario_tag_02
scenario_tag_10
scenario_tag_20
"""))
testdir.makefile('.feature', test="""
"""
),
)
testdir.makefile(
".feature",
test="""
@feature_tag_1 @feature_tag_2
Feature: Tags
@ -30,8 +37,10 @@ def test_tags_selector(testdir):
Scenario: Tags 2
Given I have a bar
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
import pytest
from pytest_bdd import given, scenarios
@ -40,30 +49,38 @@ def test_tags_selector(testdir):
return 'bar'
scenarios('test.feature')
""")
result = testdir.runpytest('-m', 'scenario_tag_10 and not scenario_tag_01', '-vv')
"""
)
result = testdir.runpytest("-m", "scenario_tag_10 and not scenario_tag_01", "-vv")
outcomes = result.parseoutcomes()
assert outcomes['passed'] == 1
assert outcomes['deselected'] == 1
assert outcomes["passed"] == 1
assert outcomes["deselected"] == 1
result = testdir.runpytest('-m', 'scenario_tag_01 and not scenario_tag_10', '-vv').parseoutcomes()
assert result['passed'] == 1
assert result['deselected'] == 1
result = testdir.runpytest("-m", "scenario_tag_01 and not scenario_tag_10", "-vv").parseoutcomes()
assert result["passed"] == 1
assert result["deselected"] == 1
result = testdir.runpytest('-m', 'feature_tag_1', '-vv').parseoutcomes()
assert result['passed'] == 2
result = testdir.runpytest("-m", "feature_tag_1", "-vv").parseoutcomes()
assert result["passed"] == 2
result = testdir.runpytest('-m', 'feature_tag_10', '-vv').parseoutcomes()
assert result['deselected'] == 2
result = testdir.runpytest("-m", "feature_tag_10", "-vv").parseoutcomes()
assert result["deselected"] == 2
def test_tags_after_background_issue_160(testdir):
"""Make sure using a tag after background works."""
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers = tag
"""))
testdir.makefile('.feature', test="""
"""
),
)
testdir.makefile(
".feature",
test="""
Feature: Tags after background
Background:
@ -75,8 +92,10 @@ def test_tags_after_background_issue_160(testdir):
Scenario: Tags 2
Given I have a baz
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
import pytest
from pytest_bdd import given, scenarios
@ -89,14 +108,16 @@ def test_tags_after_background_issue_160(testdir):
return 'baz'
scenarios('test.feature')
""")
result = testdir.runpytest('-m', 'tag', '-vv').parseoutcomes()
assert result['passed'] == 1
assert result['deselected'] == 1
"""
)
result = testdir.runpytest("-m", "tag", "-vv").parseoutcomes()
assert result["passed"] == 1
assert result["deselected"] == 1
def test_apply_tag_hook(testdir):
testdir.makeconftest("""
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(tryfirst=True)
@ -108,8 +129,11 @@ def test_apply_tag_hook(testdir):
else:
# Fall back to pytest-bdd's default behavior
return None
""")
testdir.makefile('.feature', test="""
"""
)
testdir.makefile(
".feature",
test="""
Feature: Customizing tag handling
@todo
@ -119,8 +143,10 @@ def test_apply_tag_hook(testdir):
@xfail
Scenario: Tags 2
Given I have a bar
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
from pytest_bdd import given, scenarios
@given('I have a bar')
@ -128,33 +154,45 @@ def test_apply_tag_hook(testdir):
return 'bar'
scenarios('test.feature')
""")
result = testdir.runpytest('-rsx')
"""
)
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines(["SKIP*: Not implemented yet"])
result.stdout.fnmatch_lines(["*= 1 skipped, 1 xpassed * =*"])
def test_tag_with_spaces(testdir):
testdir.makefile(".ini", pytest=textwrap.dedent("""
testdir.makefile(
".ini",
pytest=textwrap.dedent(
"""
[pytest]
markers =
test with spaces
"""))
testdir.makeconftest("""
"""
),
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(tryfirst=True)
def pytest_bdd_apply_tag(tag, function):
assert tag == 'test with spaces'
""")
testdir.makefile('.feature', test="""
"""
)
testdir.makefile(
".feature",
test="""
Feature: Tag with spaces
@test with spaces
Scenario: Tags
Given I have a bar
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
from pytest_bdd import given, scenarios
@given('I have a bar')
@ -162,17 +200,16 @@ def test_tag_with_spaces(testdir):
return 'bar'
scenarios('test.feature')
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*= 1 passed * =*",
],
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed * =*"])
def test_at_in_scenario(testdir):
testdir.makefile('.feature', test="""
testdir.makefile(
".feature",
test="""
Feature: At sign in a scenario
Scenario: Tags
@ -180,8 +217,10 @@ def test_at_in_scenario(testdir):
Scenario: Second
Given I have a baz
""")
testdir.makepyfile("""
""",
)
testdir.makepyfile(
"""
from pytest_bdd import given, scenarios
@given('I have a foo@bar')
@ -193,23 +232,23 @@ def test_at_in_scenario(testdir):
return 'baz'
scenarios('test.feature')
""")
result = testdir.runpytest_subprocess('--strict')
result.stdout.fnmatch_lines(
[
"*= 2 passed * =*",
],
"""
)
result = testdir.runpytest_subprocess("--strict")
result.stdout.fnmatch_lines(["*= 2 passed * =*"])
@pytest.mark.parametrize('line, expected', [
('@foo @bar', {'foo', 'bar'}),
('@with spaces @bar', {'with spaces', 'bar'}),
('@double @double', {'double'}),
(' @indented', {'indented'}),
(None, set()),
('foobar', set()),
('', set()),
])
@pytest.mark.parametrize(
"line, expected",
[
("@foo @bar", {"foo", "bar"}),
("@with spaces @bar", {"with spaces", "bar"}),
("@double @double", {"double"}),
(" @indented", {"indented"}),
(None, set()),
("foobar", set()),
("", set()),
],
)
def test_get_tags(line, expected):
assert feature.get_tags(line) == expected

View File

@ -12,39 +12,41 @@ from pytest_bdd.feature import features
from pytest_bdd import exceptions
@given('something')
@given("something")
def given_something():
pass
@when('something else')
@when("something else")
def when_something_else():
pass
@then('nevermind')
@then("nevermind")
def then_nevermind():
pass
@pytest.mark.parametrize(
('feature', 'scenario_name'),
("feature", "scenario_name"),
[
('when_in_background.feature', 'When in background'),
('when_after_then.feature', 'When after then'),
('then_first.feature', 'Then first'),
('given_after_when.feature', 'Given after When'),
('given_after_then.feature', 'Given after Then'),
]
("when_in_background.feature", "When in background"),
("when_after_then.feature", "When after then"),
("then_first.feature", "Then first"),
("given_after_when.feature", "Given after When"),
("given_after_then.feature", "Given after Then"),
],
)
@pytest.mark.parametrize('strict_gherkin', [True, False])
@pytest.mark.parametrize('multiple', [True, False])
@pytest.mark.parametrize("strict_gherkin", [True, False])
@pytest.mark.parametrize("multiple", [True, False])
def test_wrong(request, feature, scenario_name, strict_gherkin, multiple):
"""Test wrong feature scenarios."""
def declare_scenario():
if multiple:
scenarios(feature, strict_gherkin=strict_gherkin)
else:
@scenario(feature, scenario_name, strict_gherkin=strict_gherkin)
def test_scenario():
pass
@ -58,46 +60,41 @@ def test_wrong(request, feature, scenario_name, strict_gherkin, multiple):
def clean_cache():
features.clear()
request.addfinalizer(clean_cache)
@pytest.mark.parametrize(
'scenario_name',
[
'When in Given',
'When in Then',
'Then in Given',
'Given in When',
'Given in Then',
'Then in When',
]
"scenario_name",
["When in Given", "When in Then", "Then in Given", "Given in When", "Given in Then", "Then in When"],
)
def test_wrong_type_order(request, scenario_name):
"""Test wrong step type order."""
@scenario('wrong_type_order.feature', scenario_name)
@scenario("wrong_type_order.feature", scenario_name)
def test_wrong_type_order(request):
pass
with pytest.raises(exceptions.StepDefinitionNotFoundError) as excinfo:
test_wrong_type_order(request)
assert re.match(r'Step definition is not found: (.+)', excinfo.value.args[0])
assert re.match(r"Step definition is not found: (.+)", excinfo.value.args[0])
def test_verbose_output():
"""Test verbose output of failed feature scenario."""
with pytest.raises(exceptions.FeatureError) as excinfo:
scenario('when_after_then.feature', 'When after then')
scenario("when_after_then.feature", "When after then")
msg, line_number, line, file = excinfo.value.args
assert line_number == 5
assert line == 'When I do it again'
assert file == os.path.join(os.path.dirname(__file__), 'when_after_then.feature')
assert line == "When I do it again"
assert file == os.path.join(os.path.dirname(__file__), "when_after_then.feature")
assert line in str(excinfo.value)
def test_multiple_features_single_file():
"""Test validation error when multiple features are placed in a single file."""
with pytest.raises(exceptions.FeatureError) as excinfo:
scenarios('wrong_multiple_features.feature')
assert excinfo.value.args[0] == 'Multiple features are not allowed in a single feature file'
scenarios("wrong_multiple_features.feature")
assert excinfo.value.args[0] == "Multiple features are not allowed in a single feature file"

View File

@ -8,7 +8,7 @@ from pytest_bdd.scenario import get_python_name_generator
def test_python_name_generator():
"""Test python name generator function."""
itertools.islice(get_python_name_generator('Some name'), 2) == ['some_name', 'some_name_2']
itertools.islice(get_python_name_generator("Some name"), 2) == ["some_name", "some_name_2"]
def test_generate_missing(testdir):
@ -16,9 +16,11 @@ def test_generate_missing(testdir):
dirname = "test_generate_missing"
tests = testdir.mkpydir(dirname)
with open(os.path.join(os.path.dirname(__file__), "generation.feature")) as fd:
tests.join('generation.feature').write(fd.read())
tests.join("generation.feature").write(fd.read())
tests.join("test_foo.py").write(textwrap.dedent("""
tests.join("test_foo.py").write(
textwrap.dedent(
"""
import functools
from pytest_bdd import scenario, given
@ -36,18 +38,20 @@ def test_generate_missing(testdir):
@scenario("Code is generated for scenario steps which are not yet defined(implemented)")
def test_missing_steps():
pass
"""))
"""
)
)
result = testdir.runpytest(dirname, "--generate-missing", "--feature", tests.join('generation.feature').strpath)
result = testdir.runpytest(dirname, "--generate-missing", "--feature", tests.join("generation.feature").strpath)
result.stdout.fnmatch_lines([
'Scenario "Code is generated for scenarios which are not bound to any tests" is not bound to any test *']
result.stdout.fnmatch_lines(
['Scenario "Code is generated for scenarios which are not bound to any tests" is not bound to any test *']
)
result.stdout.fnmatch_lines(
[
'Step Given "I have a custom bar" is not defined in the scenario '
'"Code is generated for scenario steps which are not yet defined(implemented)" *',
'"Code is generated for scenario steps which are not yet defined(implemented)" *'
]
)

View File

@ -1,6 +1,6 @@
from pytest_bdd import given
@given('I have the overriden fixture')
@given("I have the overriden fixture")
def overridable():
return 'child'
return "child"

View File

@ -7,30 +7,30 @@ from pytest_bdd import given
from pytest_bdd.steps import get_step_fixture_name, GIVEN
@given('I have locally overriden fixture')
@given("I have locally overriden fixture")
def overridable():
return 'local'
return "local"
@given('I have locally overriden parent fixture')
@given("I have locally overriden parent fixture")
def parent():
return 'local'
return "local"
def test_override(request, overridable):
"""Test locally overriden fixture."""
# Test the fixture is also collected by the text name
fixture = request.getfixturevalue(get_step_fixture_name('I have locally overriden fixture', GIVEN))
assert fixture(request) == 'local'
fixture = request.getfixturevalue(get_step_fixture_name("I have locally overriden fixture", GIVEN))
assert fixture(request) == "local"
# 'I have the overriden fixture' stands for overridable and is overriden locally
fixture = request.getfixturevalue(get_step_fixture_name('I have the overriden fixture', GIVEN))
assert fixture(request) == 'local'
fixture = request.getfixturevalue(get_step_fixture_name("I have the overriden fixture", GIVEN))
assert fixture(request) == "local"
assert overridable == 'local'
assert overridable == "local"
def test_parent(parent):
"""Test locally overriden parent fixture."""
assert parent == 'local'
assert parent == "local"

View File

@ -6,9 +6,9 @@ Check the parent givens are collected and overriden in the local conftest.
def test_parent(parent):
"""Test parent given is collected."""
assert parent == 'parent'
assert parent == "parent"
def test_override(overridable):
"""Test the child conftest overriding the fixture."""
assert overridable == 'child'
assert overridable == "child"

View File

@ -1,11 +1,11 @@
from pytest_bdd import given
@given('I have parent fixture')
@given("I have parent fixture")
def parent():
return 'parent'
return "parent"
@given('I have overridable parent fixture')
@given("I have overridable parent fixture")
def overridable():
return 'parent'
return "parent"

View File

@ -10,10 +10,10 @@ def test_parent(parent, overridable):
Both fixtures come from the parent conftest.
"""
assert parent == 'parent'
assert overridable == 'parent'
assert parent == "parent"
assert overridable == "parent"
def test_global_when_step(request):
"""Test when step defined in the parent conftest."""
request.getfixturevalue(get_step_fixture_name('I use a when step from the parent conftest', WHEN))
request.getfixturevalue(get_step_fixture_name("I use a when step from the parent conftest", WHEN))

View File

@ -10,10 +10,11 @@ PATH = os.path.dirname(__file__)
def test_generate(monkeypatch, capsys):
"""Test if the code is generated by a given feature."""
monkeypatch.setattr(sys, 'argv', ['', 'generate', os.path.join(PATH, 'generate.feature')])
monkeypatch.setattr(sys, "argv", ["", "generate", os.path.join(PATH, "generate.feature")])
main()
out, err = capsys.readouterr()
assert out == textwrap.dedent('''
assert out == textwrap.dedent(
'''
# coding=utf-8
"""Code generation feature tests."""
@ -53,4 +54,9 @@ def test_generate(monkeypatch, capsys):
"""my list should be [1]."""
raise NotImplementedError
'''[1:].replace(u"'", u"'"))
'''[
1:
].replace(
u"'", u"'"
)
)

View File

@ -9,9 +9,9 @@ PATH = os.path.dirname(__file__)
def test_main(monkeypatch, capsys):
"""Test if main commmand shows help when called without the subcommand."""
monkeypatch.setattr(sys, 'argv', ['pytest-bdd'])
monkeypatch.setattr(sys, 'exit', lambda x: x)
monkeypatch.setattr(sys, "argv", ["pytest-bdd"])
monkeypatch.setattr(sys, "exit", lambda x: x)
main()
out, err = capsys.readouterr()
assert 'usage: pytest-bdd [-h]' in err
assert 'pytest-bdd: error:' in err
assert "usage: pytest-bdd [-h]" in err
assert "pytest-bdd: error:" in err

View File

@ -10,28 +10,40 @@ PATH = os.path.dirname(__file__)
def test_migrate(monkeypatch, capsys, testdir):
"""Test if the code is migrated by a given file mask."""
tests = testdir.mkpydir('tests')
tests = testdir.mkpydir("tests")
tests.join("test_foo.py").write(textwrap.dedent('''
tests.join("test_foo.py").write(
textwrap.dedent(
'''
"""Foo bar tests."""
from pytest_bdd import scenario
test_foo = scenario('foo_bar.feature', 'Foo bar')
'''))
'''
)
)
monkeypatch.setattr(sys, 'argv', ['', 'migrate', tests.strpath])
monkeypatch.setattr(sys, "argv", ["", "migrate", tests.strpath])
main()
out, err = capsys.readouterr()
out = '\n'.join(sorted(out.splitlines()))
expected = textwrap.dedent('''
out = "\n".join(sorted(out.splitlines()))
expected = textwrap.dedent(
"""
migrated: {0}/test_foo.py
skipped: {0}/__init__.py'''.format(tests.strpath)[1:])
skipped: {0}/__init__.py""".format(
tests.strpath
)[
1:
]
)
assert out == expected
assert tests.join("test_foo.py").read() == textwrap.dedent('''
assert tests.join("test_foo.py").read() == textwrap.dedent(
'''
"""Foo bar tests."""
from pytest_bdd import scenario
@scenario('foo_bar.feature', 'Foo bar')
def test_foo():
pass
''')
'''
)

View File

@ -14,59 +14,60 @@ given("I have alias for foo", fixture="foo")
given("I have an alias to the root fixture", fixture="root")
@given("I have session foo", scope='session')
@given("I have session foo", scope="session")
def session_foo():
return "session foo"
@scenario('given.feature', 'Test reusing local fixture')
@scenario("given.feature", "Test reusing local fixture")
def test_given_with_fixture():
pass
@scenario('given.feature', 'Test reusing root fixture')
@scenario("given.feature", "Test reusing root fixture")
def test_root_alias():
pass
@scenario('given.feature', 'Test session given')
@scenario("given.feature", "Test session given")
def test_session_given():
pass
@scenario('given.feature', 'Test given fixture injection')
@scenario("given.feature", "Test given fixture injection")
def test_given_injection():
pass
@given("I have injecting given", target_fixture='foo')
@given("I have injecting given", target_fixture="foo")
def injecting_given():
return "injected foo"
@then('foo should be "injected foo"')
def foo_is_foo(foo):
assert foo == 'injected foo'
assert foo == "injected foo"
@then('foo should be "foo"')
def foo_is_foo(foo):
assert foo == 'foo'
assert foo == "foo"
@then('session foo should be "session foo"')
def session_foo_is_foo(session_foo):
assert session_foo == 'session foo'
assert session_foo == "session foo"
@then('root should be "root"')
def root_is_root(root):
assert root == 'root'
assert root == "root"
def test_decorate_with_fixture():
"""Test given can't be used as decorator when the fixture is specified."""
with pytest.raises(StepError):
@given('Foo', fixture='foo')
@given("Foo", fixture="foo")
def bla():
pass

View File

@ -5,12 +5,12 @@ from pytest_bdd import given, when, then
from pytest_bdd.steps import get_step_fixture_name, WHEN, THEN
@when('I do stuff')
@when("I do stuff")
def do_stuff():
pass
@then('I check stuff')
@then("I check stuff")
def check_stuff():
pass
@ -21,22 +21,19 @@ def test_when_then(request):
This test checks that when and then are not evaluated
during fixture collection that might break the scenario.
"""
do_stuff_ = request.getfixturevalue(get_step_fixture_name('I do stuff', WHEN))
do_stuff_ = request.getfixturevalue(get_step_fixture_name("I do stuff", WHEN))
assert callable(do_stuff_)
check_stuff_ = request.getfixturevalue(get_step_fixture_name('I check stuff', THEN))
check_stuff_ = request.getfixturevalue(get_step_fixture_name("I check stuff", THEN))
assert callable(check_stuff_)
@pytest.mark.parametrize(
('step', 'keyword'), [
(given, 'Given'),
(when, 'When'),
(then, 'Then')])
@pytest.mark.parametrize(("step", "keyword"), [(given, "Given"), (when, "When"), (then, "Then")])
def test_preserve_decorator(step, keyword):
"""Check that we preserve original function attributes after decorating it."""
@step(keyword)
def func():
"""Doc string."""
assert globals()[get_step_fixture_name(keyword, step.__name__)].__doc__ == 'Doc string.'
assert globals()[get_step_fixture_name(keyword, step.__name__)].__doc__ == "Doc string."

View File

@ -4,22 +4,17 @@
import sys
import pytest
import functools
from pytest_bdd import (
given,
parsers,
scenario,
then,
)
from pytest_bdd import given, parsers, scenario, then
scenario = functools.partial(scenario, 'unicode.feature')
scenario = functools.partial(scenario, "unicode.feature")
@scenario('Кроки в .feature файлі містять юнікод')
@scenario("Кроки в .feature файлі містять юнікод")
def test_steps_in_feature_file_have_unicode():
pass
@scenario(u'Steps in .py file have unicode')
@scenario(u"Steps in .py file have unicode")
def test_steps_in_py_file_have_unicode():
pass
@ -30,7 +25,7 @@ pattern = r"(?P<content>'\w+')"
@pytest.fixture
def string():
"""String fixture."""
return {'content': ''}
return {"content": ""}
given(u"I have an alias with a unicode type for foo", fixture="foo")
@ -39,24 +34,24 @@ given(u"I have an alias with a unicode type for foo", fixture="foo")
@given(parsers.parse(u"у мене є рядок який містить '{content}'"))
def there_is_a_string_with_content(content, string):
"""Create string with unicode content."""
string['content'] = content
string["content"] = content
@given("there is an other string with content 'якийсь контент'")
def there_is_an_other_string_with_content(string):
"""Create other string with unicode content."""
string['content'] = u"с каким-то контентом"
string["content"] = u"с каким-то контентом"
@then("I should see that the other string equals to content 'якийсь контент'")
def assert_that_the_other_string_equals_to_content(string):
"""Assert that the other string equals to content."""
assert string['content'] == u"с каким-то контентом"
assert string["content"] == u"с каким-то контентом"
@then(parsers.parse("I should see that the string equals to content '{content}'"))
def assert_that_the_string_equals_to_content(content, string):
"""Assert that the string equals to content."""
assert string['content'] == content
assert string["content"] == content
if sys.version_info < (3, 0):
assert isinstance(content, unicode)

View File

@ -5,36 +5,49 @@ def test_hooks(testdir):
testdir.makeconftest("")
subdir = testdir.mkpydir("subdir")
subdir.join("conftest.py").write(textwrap.dedent(r"""
subdir.join("conftest.py").write(
textwrap.dedent(
r"""
def pytest_pyfunc_call(pyfuncitem):
print('\npytest_pyfunc_call hook')
def pytest_generate_tests(metafunc):
print('\npytest_generate_tests hook')
"""))
"""
)
)
subdir.join("test_foo.py").write(textwrap.dedent(r"""
subdir.join("test_foo.py").write(
textwrap.dedent(
r"""
from pytest_bdd import scenario
@scenario('foo.feature', 'Some scenario')
def test_foo():
pass
"""))
"""
)
)
subdir.join("foo.feature").write(textwrap.dedent(r"""
subdir.join("foo.feature").write(
textwrap.dedent(
r"""
Feature: The feature
Scenario: Some scenario
"""))
"""
)
)
result = testdir.runpytest("-s")
assert result.stdout.lines.count('pytest_pyfunc_call hook') == 1
assert result.stdout.lines.count('pytest_generate_tests hook') == 1
assert result.stdout.lines.count("pytest_pyfunc_call hook") == 1
assert result.stdout.lines.count("pytest_generate_tests hook") == 1
def test_item_collection_does_not_break_on_non_function_items(testdir):
"""Regression test for https://github.com/pytest-dev/pytest-bdd/issues/317"""
testdir.makeconftest("""
testdir.makeconftest(
"""
import pytest
@pytest.mark.tryfirst
@ -44,12 +57,15 @@ def test_item_collection_does_not_break_on_non_function_items(testdir):
class CustomItem(pytest.Item):
def runtest(self):
assert True
""")
"""
)
testdir.makepyfile("""
testdir.makepyfile(
"""
def test_convert_me_to_custom_item_and_assert_true():
assert False
""")
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)

View File

@ -10,7 +10,7 @@ def prepare_feature_and_py_files(testdir, feature_file, py_file):
feature_filepath = get_test_filepath(feature_file)
with open(feature_filepath) as feature_file:
feature_content = feature_file.read()
testdir.makefile('.feature', unicode=feature_content)
testdir.makefile(".feature", unicode=feature_content)
py_filepath = get_test_filepath(py_file)
with open(py_filepath) as py_file: