Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

""" support for skip/xfail functions and markers. """ 

import os 

import sys 

import traceback 

 

import py 

import pytest 

from _pytest.mark import MarkInfo, MarkDecorator 

 

 

def pytest_addoption(parser): 

    group = parser.getgroup("general") 

    group.addoption('--runxfail', 

           action="store_true", dest="runxfail", default=False, 

           help="run tests even if they are marked xfail") 

 

    parser.addini("xfail_strict", "default for the strict parameter of xfail " 

                                  "markers when not given explicitly (default: " 

                                  "False)", 

                                  default=False, 

                                  type="bool") 

 

 

def pytest_configure(config): 

26    if config.option.runxfail: 

        old = pytest.xfail 

        config._cleanup.append(lambda: setattr(pytest, "xfail", old)) 

        def nop(*args, **kwargs): 

            pass 

        nop.Exception = XFailed 

        setattr(pytest, "xfail", nop) 

 

    config.addinivalue_line("markers", 

        "skipif(condition): skip the given test function if eval(condition) " 

        "results in a True value.  Evaluation happens within the " 

        "module global context. Example: skipif('sys.platform == \"win32\"') " 

        "skips the test if we are on the win32 platform. see " 

        "http://pytest.org/latest/skipping.html" 

    ) 

    config.addinivalue_line("markers", 

        "xfail(condition, reason=None, run=True, raises=None): mark the the test function " 

        "as an expected failure if eval(condition) has a True value. " 

        "Optionally specify a reason for better reporting and run=False if " 

        "you don't even want to execute the test function. If only specific " 

        "exception(s) are expected, you can list them in raises, and if the test fails " 

        "in other ways, it will be reported as a true failure. " 

        "See http://pytest.org/latest/skipping.html" 

    ) 

 

 

def pytest_namespace(): 

    return dict(xfail=xfail) 

 

 

class XFailed(pytest.fail.Exception): 

    """ raised from an explicit call to pytest.xfail() """ 

 

 

def xfail(reason=""): 

    """ xfail an executing test or setup functions with the given reason.""" 

    __tracebackhide__ = True 

    raise XFailed(reason) 

xfail.Exception = XFailed 

 

 

class MarkEvaluator: 

    def __init__(self, item, name): 

        self.item = item 

        self.name = name 

 

    @property 

    def holder(self): 

        return self.item.keywords.get(self.name) 

 

    def __bool__(self): 

        return bool(self.holder) 

    __nonzero__ = __bool__ 

 

    def wasvalid(self): 

        return not hasattr(self, 'exc') 

 

    def invalidraise(self, exc): 

        raises = self.get('raises') 

        if not raises: 

            return 

        return not isinstance(exc, raises) 

 

    def istrue(self): 

        try: 

            return self._istrue() 

        except Exception: 

            self.exc = sys.exc_info() 

            if isinstance(self.exc[1], SyntaxError): 

                msg = [" " * (self.exc[1].offset + 4) + "^",] 

                msg.append("SyntaxError: invalid syntax") 

            else: 

                msg = traceback.format_exception_only(*self.exc[:2]) 

            pytest.fail("Error evaluating %r expression\n" 

                        "    %s\n" 

                        "%s" 

                        %(self.name, self.expr, "\n".join(msg)), 

                        pytrace=False) 

 

    def _getglobals(self): 

        d = {'os': os, 'sys': sys, 'config': self.item.config} 

        func = self.item.obj 

        try: 

            d.update(func.__globals__) 

        except AttributeError: 

            d.update(func.func_globals) 

        return d 

 

    def _istrue(self): 

115        if hasattr(self, 'result'): 

            return self.result 

117        if self.holder: 

            d = self._getglobals() 

            if self.holder.args: 

                self.result = False 

                # "holder" might be a MarkInfo or a MarkDecorator; only 

                # MarkInfo keeps track of all parameters it received in an 

                # _arglist attribute 

                if hasattr(self.holder, '_arglist'): 

                    arglist = self.holder._arglist 

                else: 

                    arglist = [(self.holder.args, self.holder.kwargs)] 

                for args, kwargs in arglist: 

                    for expr in args: 

                        self.expr = expr 

                        if isinstance(expr, py.builtin._basestring): 

                            result = cached_eval(self.item.config, expr, d) 

                        else: 

                            if "reason" not in kwargs: 

                                # XXX better be checked at collection time 

                                msg = "you need to specify reason=STRING " \ 

                                      "when using booleans as conditions." 

                                pytest.fail(msg) 

                            result = bool(expr) 

                        if result: 

                            self.result = True 

                            self.reason = kwargs.get('reason', None) 

                            self.expr = expr 

                            return self.result 

            else: 

                self.result = True 

        return getattr(self, 'result', False) 

 

    def get(self, attr, default=None): 

        return self.holder.kwargs.get(attr, default) 

 

    def getexplanation(self): 

        expl = getattr(self, 'reason', None) or self.get('reason', None) 

        if not expl: 

            if not hasattr(self, 'expr'): 

                return "" 

            else: 

                return "condition: " + str(self.expr) 

        return expl 

 

 

@pytest.hookimpl(tryfirst=True) 

def pytest_runtest_setup(item): 

    # Check if skip or skipif are specified as pytest marks 

 

    skipif_info = item.keywords.get('skipif') 

167    if isinstance(skipif_info, (MarkInfo, MarkDecorator)): 

        eval_skipif = MarkEvaluator(item, 'skipif') 

        if eval_skipif.istrue(): 

            item._evalskip = eval_skipif 

            pytest.skip(eval_skipif.getexplanation()) 

 

    skip_info = item.keywords.get('skip') 

174    if isinstance(skip_info, (MarkInfo, MarkDecorator)): 

        item._evalskip = True 

        if 'reason' in skip_info.kwargs: 

            pytest.skip(skip_info.kwargs['reason']) 

        elif skip_info.args: 

            pytest.skip(skip_info.args[0]) 

        else: 

            pytest.skip("unconditional skip") 

 

    item._evalxfail = MarkEvaluator(item, 'xfail') 

    check_xfail_no_run(item) 

 

 

@pytest.mark.hookwrapper 

def pytest_pyfunc_call(pyfuncitem): 

    check_xfail_no_run(pyfuncitem) 

    outcome = yield 

    passed = outcome.excinfo is None 

    if passed: 

        check_strict_xfail(pyfuncitem) 

 

 

def check_xfail_no_run(item): 

    """check xfail(run=False)""" 

exit    if not item.config.option.runxfail: 

        evalxfail = item._evalxfail 

200        if evalxfail.istrue(): 

            if not evalxfail.get('run', True): 

                pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) 

 

 

def check_strict_xfail(pyfuncitem): 

    """check xfail(strict=True) for the given PASSING test""" 

    evalxfail = pyfuncitem._evalxfail 

    if evalxfail.istrue(): 

        strict_default = pyfuncitem.config.getini('xfail_strict') 

        is_strict_xfail = evalxfail.get('strict', strict_default) 

        if is_strict_xfail: 

            del pyfuncitem._evalxfail 

            explanation = evalxfail.getexplanation() 

            pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) 

 

 

@pytest.hookimpl(hookwrapper=True) 

def pytest_runtest_makereport(item, call): 

    outcome = yield 

    rep = outcome.get_result() 

    evalxfail = getattr(item, '_evalxfail', None) 

    evalskip = getattr(item, '_evalskip', None) 

    # unitttest special case, see setting of _unexpectedsuccess 

225    if hasattr(item, '_unexpectedsuccess') and rep.when == "call": 

        # we need to translate into how pytest encodes xpass 

        rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) 

        rep.outcome = "failed" 

228    elif item.config.option.runxfail: 

        pass   # don't interefere 

230    elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): 

        rep.wasxfail = "reason: " + call.excinfo.value.msg 

        rep.outcome = "skipped" 

234    elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ 

        evalxfail.istrue(): 

        if call.excinfo: 

            if evalxfail.invalidraise(call.excinfo.value): 

                rep.outcome = "failed" 

            else: 

                rep.outcome = "skipped" 

                rep.wasxfail = evalxfail.getexplanation() 

        elif call.when == "call": 

            rep.outcome = "failed"  # xpass outcome 

            rep.wasxfail = evalxfail.getexplanation() 

247    elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: 

        # skipped by mark.skipif; change the location of the failure 

        # to point to the item definition, otherwise it will display 

        # the location of where the skip exception was raised within pytest 

        filename, line, reason = rep.longrepr 

        filename, line = item.location[:2] 

        rep.longrepr = filename, line, reason 

 

# called by terminalreporter progress reporting 

def pytest_report_teststatus(report): 

254    if hasattr(report, "wasxfail"): 

        if report.skipped: 

            return "xfailed", "x", "xfail" 

        elif report.failed: 

            return "xpassed", "X", ("XPASS", {'yellow': True}) 

 

# called by the terminalreporter instance/plugin 

def pytest_terminal_summary(terminalreporter): 

    tr = terminalreporter 

270    if not tr.reportchars: 

        #for name in "xfailed skipped failed xpassed": 

        #    if not tr.stats.get(name, 0): 

        #        tr.write_line("HINT: use '-r' option to see extra " 

        #              "summary info about tests") 

        #        break 

        return 

 

    lines = [] 

    for char in tr.reportchars: 

        if char == "x": 

            show_xfailed(terminalreporter, lines) 

        elif char == "X": 

            show_xpassed(terminalreporter, lines) 

        elif char in "fF": 

            show_simple(terminalreporter, lines, 'failed', "FAIL %s") 

        elif char in "sS": 

            show_skipped(terminalreporter, lines) 

        elif char == "E": 

            show_simple(terminalreporter, lines, 'error', "ERROR %s") 

        elif char == 'p': 

            show_simple(terminalreporter, lines, 'passed', "PASSED %s") 

 

    if lines: 

        tr._tw.sep("=", "short test summary info") 

        for line in lines: 

            tr._tw.line(line) 

 

def show_simple(terminalreporter, lines, stat, format): 

    failed = terminalreporter.stats.get(stat) 

    if failed: 

        for rep in failed: 

            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) 

            lines.append(format %(pos,)) 

 

def show_xfailed(terminalreporter, lines): 

    xfailed = terminalreporter.stats.get("xfailed") 

    if xfailed: 

        for rep in xfailed: 

            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) 

            reason = rep.wasxfail 

            lines.append("XFAIL %s" % (pos,)) 

            if reason: 

                lines.append("  " + str(reason)) 

 

def show_xpassed(terminalreporter, lines): 

    xpassed = terminalreporter.stats.get("xpassed") 

    if xpassed: 

        for rep in xpassed: 

            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) 

            reason = rep.wasxfail 

            lines.append("XPASS %s %s" %(pos, reason)) 

 

def cached_eval(config, expr, d): 

    if not hasattr(config, '_evalcache'): 

        config._evalcache = {} 

    try: 

        return config._evalcache[expr] 

    except KeyError: 

        import _pytest._code 

        exprcode = _pytest._code.compile(expr, mode="eval") 

        config._evalcache[expr] = x = eval(exprcode, d) 

        return x 

 

 

def folded_skips(skipped): 

    d = {} 

    for event in skipped: 

        key = event.longrepr 

        assert len(key) == 3, (event, key) 

        d.setdefault(key, []).append(event) 

    l = [] 

    for key, events in d.items(): 

        l.append((len(events),) + key) 

    return l 

 

def show_skipped(terminalreporter, lines): 

    tr = terminalreporter 

    skipped = tr.stats.get('skipped', []) 

    if skipped: 

        #if not tr.hasopt('skipped'): 

        #    tr.write_line( 

        #        "%d skipped tests, specify -rs for more info" % 

        #        len(skipped)) 

        #    return 

        fskips = folded_skips(skipped) 

        if fskips: 

            #tr.write_sep("_", "skipped test summary") 

            for num, fspath, lineno, reason in fskips: 

                if reason.startswith("Skipped: "): 

                    reason = reason[9:] 

                lines.append("SKIP [%d] %s:%d: %s" % 

                    (num, fspath, lineno, reason))