Coverage for /home/ionel/open-source/pytest-cov/examples/adhoc-layout/.tox/py36/lib/python3.6/site-packages/_pytest/skipping.py : 17%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
""" support for skip/xfail functions and markers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
from _pytest.config import hookimpl from _pytest.mark.evaluate import MarkEvaluator from _pytest.outcomes import fail from _pytest.outcomes import skip from _pytest.outcomes import xfail
def pytest_addoption(parser): group = parser.getgroup("general") group.addoption( "--runxfail", action="store_true", dest="runxfail", default=False, help="run tests even if they are marked xfail", )
parser.addini( "xfail_strict", "default for the strict parameter of xfail " "markers when not given explicitly (default: False)", default=False, type="bool", )
def pytest_configure(config): # yay a hack import pytest
old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs): pass
nop.Exception = xfail.Exception setattr(pytest, "xfail", nop)
"markers", "skip(reason=None): skip the given test function with an optional reason. " 'Example: skip(reason="no way of currently testing this") skips the ' "test.", ) "markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " "module global context. Example: skipif('sys.platform == \"win32\"') " "skips the test if we are on the win32 platform. see " "https://docs.pytest.org/en/latest/skipping.html", ) "markers", "xfail(condition, reason=None, run=True, raises=None, strict=False): " "mark the test function as an expected failure if eval(condition) " "has a True value. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " "a true failure. See https://docs.pytest.org/en/latest/skipping.html", )
@hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks item._skipped_by_mark = True skip(eval_skipif.getexplanation())
item._skipped_by_mark = True if "reason" in skip_info.kwargs: skip(skip_info.kwargs["reason"]) elif skip_info.args: skip(skip_info.args[0]) else: skip("unconditional skip")
@hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem):
def check_xfail_no_run(item): """check xfail(run=False)""" if not evalxfail.get("run", True): xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem): """check xfail(strict=True) for the given PASSING test""" strict_default = pyfuncitem.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): # unitttest special case, see setting of _unexpectedsuccess from _pytest.compat import _is_unittest_unexpected_success_a_failure
if item._unexpectedsuccess: rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) else: rep.longrepr = "Unexpected success" if _is_unittest_unexpected_success_a_failure(): rep.outcome = "failed" else: rep.outcome = "passed" rep.wasxfail = rep.longrepr pass # don't interefere rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" else: rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": strict_default = item.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" rep.longrepr = "[XPASS(strict)] {}".format(explanation) else: rep.outcome = "passed" rep.wasxfail = explanation getattr(item, "_skipped_by_mark", False) and rep.skipped and type(rep.longrepr) is tuple ): # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest filename, line, reason = rep.longrepr filename, line = item.location[:2] rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report): if report.skipped: return "xfailed", "x", "XFAIL" elif report.passed: return "xpassed", "X", "XPASS"
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter): tr = terminalreporter if not tr.reportchars: return
lines = [] for char in tr.reportchars: action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) action(terminalreporter, lines)
if lines: tr._tw.sep("=", "short test summary info") for line in lines: tr._tw.line(line)
def show_simple(terminalreporter, lines, stat): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: verbose_word = _get_report_str(terminalreporter, rep) pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) lines.append("%s %s" % (verbose_word, pos))
def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") if xfailed: for rep in xfailed: verbose_word = _get_report_str(terminalreporter, rep) pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail lines.append("%s %s" % (verbose_word, pos)) if reason: lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: verbose_word = _get_report_str(terminalreporter, rep) pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail lines.append("%s %s %s" % (verbose_word, pos, reason))
def folded_skips(skipped): d = {} for event in skipped: key = event.longrepr assert len(key) == 3, (event, key) keywords = getattr(event, "keywords", {}) # folding reports with global pytestmark variable # this is workaround, because for now we cannot identify the scope of a skip marker # TODO: revisit after marks scope would be fixed if ( event.when == "setup" and "skip" in keywords and "pytestmark" not in keywords ): key = (key[0], None, key[2]) d.setdefault(key, []).append(event) values = [] for key, events in d.items(): values.append((len(events),) + key) return values
def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get("skipped", []) if skipped: verbose_word = _get_report_str(terminalreporter, report=skipped[0]) fskips = folded_skips(skipped) if fskips: for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] if lineno is not None: lines.append( "%s [%d] %s:%d: %s" % (verbose_word, num, fspath, lineno + 1, reason) ) else: lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
def shower(stat): def show_(terminalreporter, lines): return show_simple(terminalreporter, lines, stat)
return show_
def _get_report_str(terminalreporter, report): _category, _short, verbose = terminalreporter.config.hook.pytest_report_teststatus( report=report, config=terminalreporter.config ) return verbose
REPORTCHAR_ACTIONS = { "x": show_xfailed, "X": show_xpassed, "f": shower("failed"), "F": shower("failed"), "s": show_skipped, "S": show_skipped, "p": shower("passed"), "E": shower("error"), } |