forked from bton/matekasse
tests versuch 2
This commit is contained in:
parent
fdf385fe06
commit
c88f7df83a
2363 changed files with 408191 additions and 0 deletions
9
venv/lib/python3.11/site-packages/_pytest/__init__.py
Normal file
9
venv/lib/python3.11/site-packages/_pytest/__init__.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
__all__ = ["__version__", "version_tuple"]
|
||||
|
||||
try:
|
||||
from ._version import version as __version__, version_tuple
|
||||
except ImportError: # pragma: no cover
|
||||
# broken installation, we don't even try
|
||||
# unknown only works because we do poor mans version compare
|
||||
__version__ = "unknown"
|
||||
version_tuple = (0, 0, "unknown") # type:ignore[assignment]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
116
venv/lib/python3.11/site-packages/_pytest/_argcomplete.py
Normal file
116
venv/lib/python3.11/site-packages/_pytest/_argcomplete.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
"""Allow bash-completion for argparse with argcomplete if installed.
|
||||
|
||||
Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
|
||||
to find the magic string, so _ARGCOMPLETE env. var is never set, and
|
||||
this does not need special code).
|
||||
|
||||
Function try_argcomplete(parser) should be called directly before
|
||||
the call to ArgumentParser.parse_args().
|
||||
|
||||
The filescompleter is what you normally would use on the positional
|
||||
arguments specification, in order to get "dirname/" after "dirn<TAB>"
|
||||
instead of the default "dirname ":
|
||||
|
||||
optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter
|
||||
|
||||
Other, application specific, completers should go in the file
|
||||
doing the add_argument calls as they need to be specified as .completer
|
||||
attributes as well. (If argcomplete is not installed, the function the
|
||||
attribute points to will not be used).
|
||||
|
||||
SPEEDUP
|
||||
=======
|
||||
|
||||
The generic argcomplete script for bash-completion
|
||||
(/etc/bash_completion.d/python-argcomplete.sh)
|
||||
uses a python program to determine startup script generated by pip.
|
||||
You can speed up completion somewhat by changing this script to include
|
||||
# PYTHON_ARGCOMPLETE_OK
|
||||
so the python-argcomplete-check-easy-install-script does not
|
||||
need to be called to find the entry point of the code and see if that is
|
||||
marked with PYTHON_ARGCOMPLETE_OK.
|
||||
|
||||
INSTALL/DEBUGGING
|
||||
=================
|
||||
|
||||
To include this support in another application that has setup.py generated
|
||||
scripts:
|
||||
|
||||
- Add the line:
|
||||
# PYTHON_ARGCOMPLETE_OK
|
||||
near the top of the main python entry point.
|
||||
|
||||
- Include in the file calling parse_args():
|
||||
from _argcomplete import try_argcomplete, filescompleter
|
||||
Call try_argcomplete just before parse_args(), and optionally add
|
||||
filescompleter to the positional arguments' add_argument().
|
||||
|
||||
If things do not work right away:
|
||||
|
||||
- Switch on argcomplete debugging with (also helpful when doing custom
|
||||
completers):
|
||||
export _ARC_DEBUG=1
|
||||
|
||||
- Run:
|
||||
python-argcomplete-check-easy-install-script $(which appname)
|
||||
echo $?
|
||||
will echo 0 if the magic line has been found, 1 if not.
|
||||
|
||||
- Sometimes it helps to find early on errors using:
|
||||
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
|
||||
which should throw a KeyError: 'COMPLINE' (which is properly set by the
|
||||
global argcomplete script).
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from glob import glob
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class FastFilesCompleter:
|
||||
"""Fast file completer class."""
|
||||
|
||||
def __init__(self, directories: bool = True) -> None:
|
||||
self.directories = directories
|
||||
|
||||
def __call__(self, prefix: str, **kwargs: Any) -> List[str]:
|
||||
# Only called on non option completions.
|
||||
if os.sep in prefix[1:]:
|
||||
prefix_dir = len(os.path.dirname(prefix) + os.sep)
|
||||
else:
|
||||
prefix_dir = 0
|
||||
completion = []
|
||||
globbed = []
|
||||
if "*" not in prefix and "?" not in prefix:
|
||||
# We are on unix, otherwise no bash.
|
||||
if not prefix or prefix[-1] == os.sep:
|
||||
globbed.extend(glob(prefix + ".*"))
|
||||
prefix += "*"
|
||||
globbed.extend(glob(prefix))
|
||||
for x in sorted(globbed):
|
||||
if os.path.isdir(x):
|
||||
x += "/"
|
||||
# Append stripping the prefix (like bash, not like compgen).
|
||||
completion.append(x[prefix_dir:])
|
||||
return completion
|
||||
|
||||
|
||||
if os.environ.get("_ARGCOMPLETE"):
|
||||
try:
|
||||
import argcomplete.completers
|
||||
except ImportError:
|
||||
sys.exit(-1)
|
||||
filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter()
|
||||
|
||||
def try_argcomplete(parser: argparse.ArgumentParser) -> None:
|
||||
argcomplete.autocomplete(parser, always_complete_options=False)
|
||||
|
||||
else:
|
||||
|
||||
def try_argcomplete(parser: argparse.ArgumentParser) -> None:
|
||||
pass
|
||||
|
||||
filescompleter = None
|
22
venv/lib/python3.11/site-packages/_pytest/_code/__init__.py
Normal file
22
venv/lib/python3.11/site-packages/_pytest/_code/__init__.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
"""Python inspection/code generation API."""
|
||||
from .code import Code
|
||||
from .code import ExceptionInfo
|
||||
from .code import filter_traceback
|
||||
from .code import Frame
|
||||
from .code import getfslineno
|
||||
from .code import Traceback
|
||||
from .code import TracebackEntry
|
||||
from .source import getrawcode
|
||||
from .source import Source
|
||||
|
||||
__all__ = [
|
||||
"Code",
|
||||
"ExceptionInfo",
|
||||
"filter_traceback",
|
||||
"Frame",
|
||||
"getfslineno",
|
||||
"getrawcode",
|
||||
"Traceback",
|
||||
"TracebackEntry",
|
||||
"Source",
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
1337
venv/lib/python3.11/site-packages/_pytest/_code/code.py
Normal file
1337
venv/lib/python3.11/site-packages/_pytest/_code/code.py
Normal file
File diff suppressed because it is too large
Load diff
217
venv/lib/python3.11/site-packages/_pytest/_code/source.py
Normal file
217
venv/lib/python3.11/site-packages/_pytest/_code/source.py
Normal file
|
@ -0,0 +1,217 @@
|
|||
import ast
|
||||
import inspect
|
||||
import textwrap
|
||||
import tokenize
|
||||
import types
|
||||
import warnings
|
||||
from bisect import bisect_right
|
||||
from typing import Iterable
|
||||
from typing import Iterator
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import overload
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
|
||||
class Source:
|
||||
"""An immutable object holding a source code fragment.
|
||||
|
||||
When using Source(...), the source lines are deindented.
|
||||
"""
|
||||
|
||||
def __init__(self, obj: object = None) -> None:
|
||||
if not obj:
|
||||
self.lines: List[str] = []
|
||||
elif isinstance(obj, Source):
|
||||
self.lines = obj.lines
|
||||
elif isinstance(obj, (tuple, list)):
|
||||
self.lines = deindent(x.rstrip("\n") for x in obj)
|
||||
elif isinstance(obj, str):
|
||||
self.lines = deindent(obj.split("\n"))
|
||||
else:
|
||||
try:
|
||||
rawcode = getrawcode(obj)
|
||||
src = inspect.getsource(rawcode)
|
||||
except TypeError:
|
||||
src = inspect.getsource(obj) # type: ignore[arg-type]
|
||||
self.lines = deindent(src.split("\n"))
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Source):
|
||||
return NotImplemented
|
||||
return self.lines == other.lines
|
||||
|
||||
# Ignore type because of https://github.com/python/mypy/issues/4266.
|
||||
__hash__ = None # type: ignore
|
||||
|
||||
@overload
|
||||
def __getitem__(self, key: int) -> str:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __getitem__(self, key: slice) -> "Source":
|
||||
...
|
||||
|
||||
def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]:
|
||||
if isinstance(key, int):
|
||||
return self.lines[key]
|
||||
else:
|
||||
if key.step not in (None, 1):
|
||||
raise IndexError("cannot slice a Source with a step")
|
||||
newsource = Source()
|
||||
newsource.lines = self.lines[key.start : key.stop]
|
||||
return newsource
|
||||
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return iter(self.lines)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.lines)
|
||||
|
||||
def strip(self) -> "Source":
|
||||
"""Return new Source object with trailing and leading blank lines removed."""
|
||||
start, end = 0, len(self)
|
||||
while start < end and not self.lines[start].strip():
|
||||
start += 1
|
||||
while end > start and not self.lines[end - 1].strip():
|
||||
end -= 1
|
||||
source = Source()
|
||||
source.lines[:] = self.lines[start:end]
|
||||
return source
|
||||
|
||||
def indent(self, indent: str = " " * 4) -> "Source":
|
||||
"""Return a copy of the source object with all lines indented by the
|
||||
given indent-string."""
|
||||
newsource = Source()
|
||||
newsource.lines = [(indent + line) for line in self.lines]
|
||||
return newsource
|
||||
|
||||
def getstatement(self, lineno: int) -> "Source":
|
||||
"""Return Source statement which contains the given linenumber
|
||||
(counted from 0)."""
|
||||
start, end = self.getstatementrange(lineno)
|
||||
return self[start:end]
|
||||
|
||||
def getstatementrange(self, lineno: int) -> Tuple[int, int]:
|
||||
"""Return (start, end) tuple which spans the minimal statement region
|
||||
which containing the given lineno."""
|
||||
if not (0 <= lineno < len(self)):
|
||||
raise IndexError("lineno out of range")
|
||||
ast, start, end = getstatementrange_ast(lineno, self)
|
||||
return start, end
|
||||
|
||||
def deindent(self) -> "Source":
|
||||
"""Return a new Source object deindented."""
|
||||
newsource = Source()
|
||||
newsource.lines[:] = deindent(self.lines)
|
||||
return newsource
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "\n".join(self.lines)
|
||||
|
||||
|
||||
#
|
||||
# helper functions
|
||||
#
|
||||
|
||||
|
||||
def findsource(obj) -> Tuple[Optional[Source], int]:
|
||||
try:
|
||||
sourcelines, lineno = inspect.findsource(obj)
|
||||
except Exception:
|
||||
return None, -1
|
||||
source = Source()
|
||||
source.lines = [line.rstrip() for line in sourcelines]
|
||||
return source, lineno
|
||||
|
||||
|
||||
def getrawcode(obj: object, trycall: bool = True) -> types.CodeType:
|
||||
"""Return code object for given function."""
|
||||
try:
|
||||
return obj.__code__ # type: ignore[attr-defined,no-any-return]
|
||||
except AttributeError:
|
||||
pass
|
||||
if trycall:
|
||||
call = getattr(obj, "__call__", None)
|
||||
if call and not isinstance(obj, type):
|
||||
return getrawcode(call, trycall=False)
|
||||
raise TypeError(f"could not get code object for {obj!r}")
|
||||
|
||||
|
||||
def deindent(lines: Iterable[str]) -> List[str]:
|
||||
return textwrap.dedent("\n".join(lines)).splitlines()
|
||||
|
||||
|
||||
def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]:
|
||||
# Flatten all statements and except handlers into one lineno-list.
|
||||
# AST's line numbers start indexing at 1.
|
||||
values: List[int] = []
|
||||
for x in ast.walk(node):
|
||||
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
|
||||
# Before Python 3.8, the lineno of a decorated class or function pointed at the decorator.
|
||||
# Since Python 3.8, the lineno points to the class/def, so need to include the decorators.
|
||||
if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
|
||||
for d in x.decorator_list:
|
||||
values.append(d.lineno - 1)
|
||||
values.append(x.lineno - 1)
|
||||
for name in ("finalbody", "orelse"):
|
||||
val: Optional[List[ast.stmt]] = getattr(x, name, None)
|
||||
if val:
|
||||
# Treat the finally/orelse part as its own statement.
|
||||
values.append(val[0].lineno - 1 - 1)
|
||||
values.sort()
|
||||
insert_index = bisect_right(values, lineno)
|
||||
start = values[insert_index - 1]
|
||||
if insert_index >= len(values):
|
||||
end = None
|
||||
else:
|
||||
end = values[insert_index]
|
||||
return start, end
|
||||
|
||||
|
||||
def getstatementrange_ast(
|
||||
lineno: int,
|
||||
source: Source,
|
||||
assertion: bool = False,
|
||||
astnode: Optional[ast.AST] = None,
|
||||
) -> Tuple[ast.AST, int, int]:
|
||||
if astnode is None:
|
||||
content = str(source)
|
||||
# See #4260:
|
||||
# Don't produce duplicate warnings when compiling source to find AST.
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
astnode = ast.parse(content, "source", "exec")
|
||||
|
||||
start, end = get_statement_startend2(lineno, astnode)
|
||||
# We need to correct the end:
|
||||
# - ast-parsing strips comments
|
||||
# - there might be empty lines
|
||||
# - we might have lesser indented code blocks at the end
|
||||
if end is None:
|
||||
end = len(source.lines)
|
||||
|
||||
if end > start + 1:
|
||||
# Make sure we don't span differently indented code blocks
|
||||
# by using the BlockFinder helper used which inspect.getsource() uses itself.
|
||||
block_finder = inspect.BlockFinder()
|
||||
# If we start with an indented line, put blockfinder to "started" mode.
|
||||
block_finder.started = source.lines[start][0].isspace()
|
||||
it = ((x + "\n") for x in source.lines[start:end])
|
||||
try:
|
||||
for tok in tokenize.generate_tokens(lambda: next(it)):
|
||||
block_finder.tokeneater(*tok)
|
||||
except (inspect.EndOfBlock, IndentationError):
|
||||
end = block_finder.last + start
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# The end might still point to a comment or empty line, correct it.
|
||||
while end:
|
||||
line = source.lines[end - 1].lstrip()
|
||||
if line.startswith("#") or not line:
|
||||
end -= 1
|
||||
else:
|
||||
break
|
||||
return astnode, start, end
|
|
@ -0,0 +1,8 @@
|
|||
from .terminalwriter import get_terminal_width
|
||||
from .terminalwriter import TerminalWriter
|
||||
|
||||
|
||||
__all__ = [
|
||||
"TerminalWriter",
|
||||
"get_terminal_width",
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
180
venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py
Normal file
180
venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
import pprint
|
||||
import reprlib
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import IO
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def _try_repr_or_str(obj: object) -> str:
|
||||
try:
|
||||
return repr(obj)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException:
|
||||
return f'{type(obj).__name__}("{obj}")'
|
||||
|
||||
|
||||
def _format_repr_exception(exc: BaseException, obj: object) -> str:
|
||||
try:
|
||||
exc_info = _try_repr_or_str(exc)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})"
|
||||
return "<[{} raised in repr()] {} object at 0x{:x}>".format(
|
||||
exc_info, type(obj).__name__, id(obj)
|
||||
)
|
||||
|
||||
|
||||
def _ellipsize(s: str, maxsize: int) -> str:
|
||||
if len(s) > maxsize:
|
||||
i = max(0, (maxsize - 3) // 2)
|
||||
j = max(0, maxsize - 3 - i)
|
||||
return s[:i] + "..." + s[len(s) - j :]
|
||||
return s
|
||||
|
||||
|
||||
class SafeRepr(reprlib.Repr):
|
||||
"""
|
||||
repr.Repr that limits the resulting size of repr() and includes
|
||||
information on exceptions raised during the call.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize: Optional[int], use_ascii: bool = False) -> None:
|
||||
"""
|
||||
:param maxsize:
|
||||
If not None, will truncate the resulting repr to that specific size, using ellipsis
|
||||
somewhere in the middle to hide the extra text.
|
||||
If None, will not impose any size limits on the returning repr.
|
||||
"""
|
||||
super().__init__()
|
||||
# ``maxstring`` is used by the superclass, and needs to be an int; using a
|
||||
# very large number in case maxsize is None, meaning we want to disable
|
||||
# truncation.
|
||||
self.maxstring = maxsize if maxsize is not None else 1_000_000_000
|
||||
self.maxsize = maxsize
|
||||
self.use_ascii = use_ascii
|
||||
|
||||
def repr(self, x: object) -> str:
|
||||
try:
|
||||
if self.use_ascii:
|
||||
s = ascii(x)
|
||||
else:
|
||||
s = super().repr(x)
|
||||
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
s = _format_repr_exception(exc, x)
|
||||
if self.maxsize is not None:
|
||||
s = _ellipsize(s, self.maxsize)
|
||||
return s
|
||||
|
||||
def repr_instance(self, x: object, level: int) -> str:
|
||||
try:
|
||||
s = repr(x)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
s = _format_repr_exception(exc, x)
|
||||
if self.maxsize is not None:
|
||||
s = _ellipsize(s, self.maxsize)
|
||||
return s
|
||||
|
||||
|
||||
def safeformat(obj: object) -> str:
|
||||
"""Return a pretty printed string for the given object.
|
||||
|
||||
Failing __repr__ functions of user instances will be represented
|
||||
with a short exception info.
|
||||
"""
|
||||
try:
|
||||
return pprint.pformat(obj)
|
||||
except Exception as exc:
|
||||
return _format_repr_exception(exc, obj)
|
||||
|
||||
|
||||
# Maximum size of overall repr of objects to display during assertion errors.
|
||||
DEFAULT_REPR_MAX_SIZE = 240
|
||||
|
||||
|
||||
def saferepr(
|
||||
obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False
|
||||
) -> str:
|
||||
"""Return a size-limited safe repr-string for the given object.
|
||||
|
||||
Failing __repr__ functions of user instances will be represented
|
||||
with a short exception info and 'saferepr' generally takes
|
||||
care to never raise exceptions itself.
|
||||
|
||||
This function is a wrapper around the Repr/reprlib functionality of the
|
||||
stdlib.
|
||||
"""
|
||||
|
||||
return SafeRepr(maxsize, use_ascii).repr(obj)
|
||||
|
||||
|
||||
def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str:
|
||||
"""Return an unlimited-size safe repr-string for the given object.
|
||||
|
||||
As with saferepr, failing __repr__ functions of user instances
|
||||
will be represented with a short exception info.
|
||||
|
||||
This function is a wrapper around simple repr.
|
||||
|
||||
Note: a cleaner solution would be to alter ``saferepr``this way
|
||||
when maxsize=None, but that might affect some other code.
|
||||
"""
|
||||
try:
|
||||
if use_ascii:
|
||||
return ascii(obj)
|
||||
return repr(obj)
|
||||
except Exception as exc:
|
||||
return _format_repr_exception(exc, obj)
|
||||
|
||||
|
||||
class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter):
|
||||
"""PrettyPrinter that always dispatches (regardless of width)."""
|
||||
|
||||
def _format(
|
||||
self,
|
||||
object: object,
|
||||
stream: IO[str],
|
||||
indent: int,
|
||||
allowance: int,
|
||||
context: Dict[int, Any],
|
||||
level: int,
|
||||
) -> None:
|
||||
# Type ignored because _dispatch is private.
|
||||
p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined]
|
||||
|
||||
objid = id(object)
|
||||
if objid in context or p is None:
|
||||
# Type ignored because _format is private.
|
||||
super()._format( # type: ignore[misc]
|
||||
object,
|
||||
stream,
|
||||
indent,
|
||||
allowance,
|
||||
context,
|
||||
level,
|
||||
)
|
||||
return
|
||||
|
||||
context[objid] = 1
|
||||
p(self, object, stream, indent, allowance, context, level + 1)
|
||||
del context[objid]
|
||||
|
||||
|
||||
def _pformat_dispatch(
|
||||
object: object,
|
||||
indent: int = 1,
|
||||
width: int = 80,
|
||||
depth: Optional[int] = None,
|
||||
*,
|
||||
compact: bool = False,
|
||||
) -> str:
|
||||
return AlwaysDispatchingPrettyPrinter(
|
||||
indent=indent, width=width, depth=depth, compact=compact
|
||||
).pformat(object)
|
233
venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py
Normal file
233
venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py
Normal file
|
@ -0,0 +1,233 @@
|
|||
"""Helper functions for writing to terminals and files."""
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import TextIO
|
||||
|
||||
from .wcwidth import wcswidth
|
||||
from _pytest.compat import final
|
||||
|
||||
|
||||
# This code was initially copied from py 1.8.1, file _io/terminalwriter.py.
|
||||
|
||||
|
||||
def get_terminal_width() -> int:
|
||||
width, _ = shutil.get_terminal_size(fallback=(80, 24))
|
||||
|
||||
# The Windows get_terminal_size may be bogus, let's sanify a bit.
|
||||
if width < 40:
|
||||
width = 80
|
||||
|
||||
return width
|
||||
|
||||
|
||||
def should_do_markup(file: TextIO) -> bool:
|
||||
if os.environ.get("PY_COLORS") == "1":
|
||||
return True
|
||||
if os.environ.get("PY_COLORS") == "0":
|
||||
return False
|
||||
if "NO_COLOR" in os.environ:
|
||||
return False
|
||||
if "FORCE_COLOR" in os.environ:
|
||||
return True
|
||||
return (
|
||||
hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb"
|
||||
)
|
||||
|
||||
|
||||
@final
|
||||
class TerminalWriter:
|
||||
_esctable = dict(
|
||||
black=30,
|
||||
red=31,
|
||||
green=32,
|
||||
yellow=33,
|
||||
blue=34,
|
||||
purple=35,
|
||||
cyan=36,
|
||||
white=37,
|
||||
Black=40,
|
||||
Red=41,
|
||||
Green=42,
|
||||
Yellow=43,
|
||||
Blue=44,
|
||||
Purple=45,
|
||||
Cyan=46,
|
||||
White=47,
|
||||
bold=1,
|
||||
light=2,
|
||||
blink=5,
|
||||
invert=7,
|
||||
)
|
||||
|
||||
def __init__(self, file: Optional[TextIO] = None) -> None:
|
||||
if file is None:
|
||||
file = sys.stdout
|
||||
if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32":
|
||||
try:
|
||||
import colorama
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
file = colorama.AnsiToWin32(file).stream
|
||||
assert file is not None
|
||||
self._file = file
|
||||
self.hasmarkup = should_do_markup(file)
|
||||
self._current_line = ""
|
||||
self._terminal_width: Optional[int] = None
|
||||
self.code_highlight = True
|
||||
|
||||
@property
|
||||
def fullwidth(self) -> int:
|
||||
if self._terminal_width is not None:
|
||||
return self._terminal_width
|
||||
return get_terminal_width()
|
||||
|
||||
@fullwidth.setter
|
||||
def fullwidth(self, value: int) -> None:
|
||||
self._terminal_width = value
|
||||
|
||||
@property
|
||||
def width_of_current_line(self) -> int:
|
||||
"""Return an estimate of the width so far in the current line."""
|
||||
return wcswidth(self._current_line)
|
||||
|
||||
def markup(self, text: str, **markup: bool) -> str:
|
||||
for name in markup:
|
||||
if name not in self._esctable:
|
||||
raise ValueError(f"unknown markup: {name!r}")
|
||||
if self.hasmarkup:
|
||||
esc = [self._esctable[name] for name, on in markup.items() if on]
|
||||
if esc:
|
||||
text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m"
|
||||
return text
|
||||
|
||||
def sep(
|
||||
self,
|
||||
sepchar: str,
|
||||
title: Optional[str] = None,
|
||||
fullwidth: Optional[int] = None,
|
||||
**markup: bool,
|
||||
) -> None:
|
||||
if fullwidth is None:
|
||||
fullwidth = self.fullwidth
|
||||
# The goal is to have the line be as long as possible
|
||||
# under the condition that len(line) <= fullwidth.
|
||||
if sys.platform == "win32":
|
||||
# If we print in the last column on windows we are on a
|
||||
# new line but there is no way to verify/neutralize this
|
||||
# (we may not know the exact line width).
|
||||
# So let's be defensive to avoid empty lines in the output.
|
||||
fullwidth -= 1
|
||||
if title is not None:
|
||||
# we want 2 + 2*len(fill) + len(title) <= fullwidth
|
||||
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
|
||||
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
|
||||
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
|
||||
N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
|
||||
fill = sepchar * N
|
||||
line = f"{fill} {title} {fill}"
|
||||
else:
|
||||
# we want len(sepchar)*N <= fullwidth
|
||||
# i.e. N <= fullwidth // len(sepchar)
|
||||
line = sepchar * (fullwidth // len(sepchar))
|
||||
# In some situations there is room for an extra sepchar at the right,
|
||||
# in particular if we consider that with a sepchar like "_ " the
|
||||
# trailing space is not important at the end of the line.
|
||||
if len(line) + len(sepchar.rstrip()) <= fullwidth:
|
||||
line += sepchar.rstrip()
|
||||
|
||||
self.line(line, **markup)
|
||||
|
||||
def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None:
|
||||
if msg:
|
||||
current_line = msg.rsplit("\n", 1)[-1]
|
||||
if "\n" in msg:
|
||||
self._current_line = current_line
|
||||
else:
|
||||
self._current_line += current_line
|
||||
|
||||
msg = self.markup(msg, **markup)
|
||||
|
||||
try:
|
||||
self._file.write(msg)
|
||||
except UnicodeEncodeError:
|
||||
# Some environments don't support printing general Unicode
|
||||
# strings, due to misconfiguration or otherwise; in that case,
|
||||
# print the string escaped to ASCII.
|
||||
# When the Unicode situation improves we should consider
|
||||
# letting the error propagate instead of masking it (see #7475
|
||||
# for one brief attempt).
|
||||
msg = msg.encode("unicode-escape").decode("ascii")
|
||||
self._file.write(msg)
|
||||
|
||||
if flush:
|
||||
self.flush()
|
||||
|
||||
def line(self, s: str = "", **markup: bool) -> None:
|
||||
self.write(s, **markup)
|
||||
self.write("\n")
|
||||
|
||||
def flush(self) -> None:
|
||||
self._file.flush()
|
||||
|
||||
def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None:
|
||||
"""Write lines of source code possibly highlighted.
|
||||
|
||||
Keeping this private for now because the API is clunky. We should discuss how
|
||||
to evolve the terminal writer so we can have more precise color support, for example
|
||||
being able to write part of a line in one color and the rest in another, and so on.
|
||||
"""
|
||||
if indents and len(indents) != len(lines):
|
||||
raise ValueError(
|
||||
"indents size ({}) should have same size as lines ({})".format(
|
||||
len(indents), len(lines)
|
||||
)
|
||||
)
|
||||
if not indents:
|
||||
indents = [""] * len(lines)
|
||||
source = "\n".join(lines)
|
||||
new_lines = self._highlight(source).splitlines()
|
||||
for indent, new_line in zip(indents, new_lines):
|
||||
self.line(indent + new_line)
|
||||
|
||||
def _highlight(self, source: str) -> str:
|
||||
"""Highlight the given source code if we have markup support."""
|
||||
from _pytest.config.exceptions import UsageError
|
||||
|
||||
if not self.hasmarkup or not self.code_highlight:
|
||||
return source
|
||||
try:
|
||||
from pygments.formatters.terminal import TerminalFormatter
|
||||
from pygments.lexers.python import PythonLexer
|
||||
from pygments import highlight
|
||||
import pygments.util
|
||||
except ImportError:
|
||||
return source
|
||||
else:
|
||||
try:
|
||||
highlighted: str = highlight(
|
||||
source,
|
||||
PythonLexer(),
|
||||
TerminalFormatter(
|
||||
bg=os.getenv("PYTEST_THEME_MODE", "dark"),
|
||||
style=os.getenv("PYTEST_THEME"),
|
||||
),
|
||||
)
|
||||
return highlighted
|
||||
except pygments.util.ClassNotFound:
|
||||
raise UsageError(
|
||||
"PYTEST_THEME environment variable had an invalid value: '{}'. "
|
||||
"Only valid pygment styles are allowed.".format(
|
||||
os.getenv("PYTEST_THEME")
|
||||
)
|
||||
)
|
||||
except pygments.util.OptionError:
|
||||
raise UsageError(
|
||||
"PYTEST_THEME_MODE environment variable had an invalid value: '{}'. "
|
||||
"The only allowed values are 'dark' and 'light'.".format(
|
||||
os.getenv("PYTEST_THEME_MODE")
|
||||
)
|
||||
)
|
55
venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py
Normal file
55
venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import unicodedata
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
@lru_cache(100)
|
||||
def wcwidth(c: str) -> int:
|
||||
"""Determine how many columns are needed to display a character in a terminal.
|
||||
|
||||
Returns -1 if the character is not printable.
|
||||
Returns 0, 1 or 2 for other characters.
|
||||
"""
|
||||
o = ord(c)
|
||||
|
||||
# ASCII fast path.
|
||||
if 0x20 <= o < 0x07F:
|
||||
return 1
|
||||
|
||||
# Some Cf/Zp/Zl characters which should be zero-width.
|
||||
if (
|
||||
o == 0x0000
|
||||
or 0x200B <= o <= 0x200F
|
||||
or 0x2028 <= o <= 0x202E
|
||||
or 0x2060 <= o <= 0x2063
|
||||
):
|
||||
return 0
|
||||
|
||||
category = unicodedata.category(c)
|
||||
|
||||
# Control characters.
|
||||
if category == "Cc":
|
||||
return -1
|
||||
|
||||
# Combining characters with zero width.
|
||||
if category in ("Me", "Mn"):
|
||||
return 0
|
||||
|
||||
# Full/Wide east asian characters.
|
||||
if unicodedata.east_asian_width(c) in ("F", "W"):
|
||||
return 2
|
||||
|
||||
return 1
|
||||
|
||||
|
||||
def wcswidth(s: str) -> int:
|
||||
"""Determine how many columns are needed to display a string in a terminal.
|
||||
|
||||
Returns -1 if the string contains non-printable characters.
|
||||
"""
|
||||
width = 0
|
||||
for c in unicodedata.normalize("NFC", s):
|
||||
wc = wcwidth(c)
|
||||
if wc < 0:
|
||||
return -1
|
||||
width += wc
|
||||
return width
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
109
venv/lib/python3.11/site-packages/_pytest/_py/error.py
Normal file
109
venv/lib/python3.11/site-packages/_pytest/_py/error.py
Normal file
|
@ -0,0 +1,109 @@
|
|||
"""create errno-specific classes for IO or os calls."""
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
from typing import Callable
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class Error(EnvironmentError):
|
||||
def __repr__(self) -> str:
|
||||
return "{}.{} {!r}: {} ".format(
|
||||
self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
self.__class__.__doc__,
|
||||
" ".join(map(str, self.args)),
|
||||
# repr(self.args)
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
s = "[{}]: {}".format(
|
||||
self.__class__.__doc__,
|
||||
" ".join(map(str, self.args)),
|
||||
)
|
||||
return s
|
||||
|
||||
|
||||
_winerrnomap = {
|
||||
2: errno.ENOENT,
|
||||
3: errno.ENOENT,
|
||||
17: errno.EEXIST,
|
||||
18: errno.EXDEV,
|
||||
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
|
||||
22: errno.ENOTDIR,
|
||||
20: errno.ENOTDIR,
|
||||
267: errno.ENOTDIR,
|
||||
5: errno.EACCES, # anything better?
|
||||
}
|
||||
|
||||
|
||||
class ErrorMaker:
|
||||
"""lazily provides Exception classes for each possible POSIX errno
|
||||
(as defined per the 'errno' module). All such instances
|
||||
subclass EnvironmentError.
|
||||
"""
|
||||
|
||||
_errno2class: dict[int, type[Error]] = {}
|
||||
|
||||
def __getattr__(self, name: str) -> type[Error]:
|
||||
if name[0] == "_":
|
||||
raise AttributeError(name)
|
||||
eno = getattr(errno, name)
|
||||
cls = self._geterrnoclass(eno)
|
||||
setattr(self, name, cls)
|
||||
return cls
|
||||
|
||||
def _geterrnoclass(self, eno: int) -> type[Error]:
|
||||
try:
|
||||
return self._errno2class[eno]
|
||||
except KeyError:
|
||||
clsname = errno.errorcode.get(eno, "UnknownErrno%d" % (eno,))
|
||||
errorcls = type(
|
||||
clsname,
|
||||
(Error,),
|
||||
{"__module__": "py.error", "__doc__": os.strerror(eno)},
|
||||
)
|
||||
self._errno2class[eno] = errorcls
|
||||
return errorcls
|
||||
|
||||
def checked_call(
|
||||
self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
|
||||
) -> R:
|
||||
"""Call a function and raise an errno-exception if applicable."""
|
||||
__tracebackhide__ = True
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Error:
|
||||
raise
|
||||
except OSError as value:
|
||||
if not hasattr(value, "errno"):
|
||||
raise
|
||||
errno = value.errno
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
cls = self._geterrnoclass(_winerrnomap[errno])
|
||||
except KeyError:
|
||||
raise value
|
||||
else:
|
||||
# we are not on Windows, or we got a proper OSError
|
||||
cls = self._geterrnoclass(errno)
|
||||
|
||||
raise cls(f"{func.__name__}{args!r}")
|
||||
|
||||
|
||||
_error_maker = ErrorMaker()
|
||||
checked_call = _error_maker.checked_call
|
||||
|
||||
|
||||
def __getattr__(attr: str) -> type[Error]:
|
||||
return getattr(_error_maker, attr) # type: ignore[no-any-return]
|
1475
venv/lib/python3.11/site-packages/_pytest/_py/path.py
Normal file
1475
venv/lib/python3.11/site-packages/_pytest/_py/path.py
Normal file
File diff suppressed because it is too large
Load diff
4
venv/lib/python3.11/site-packages/_pytest/_version.py
Normal file
4
venv/lib/python3.11/site-packages/_pytest/_version.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# file generated by setuptools_scm
|
||||
# don't change, don't track in version control
|
||||
__version__ = version = '7.4.0'
|
||||
__version_tuple__ = version_tuple = (7, 4, 0)
|
181
venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py
Normal file
181
venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py
Normal file
|
@ -0,0 +1,181 @@
|
|||
"""Support for presenting detailed information in failing assertions."""
|
||||
import sys
|
||||
from typing import Any
|
||||
from typing import Generator
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from _pytest.assertion import rewrite
|
||||
from _pytest.assertion import truncate
|
||||
from _pytest.assertion import util
|
||||
from _pytest.assertion.rewrite import assertstate_key
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.nodes import Item
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _pytest.main import Session
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption(
|
||||
"--assert",
|
||||
action="store",
|
||||
dest="assertmode",
|
||||
choices=("rewrite", "plain"),
|
||||
default="rewrite",
|
||||
metavar="MODE",
|
||||
help=(
|
||||
"Control assertion debugging tools.\n"
|
||||
"'plain' performs no assertion debugging.\n"
|
||||
"'rewrite' (the default) rewrites assert statements in test modules"
|
||||
" on import to provide assert expression information."
|
||||
),
|
||||
)
|
||||
parser.addini(
|
||||
"enable_assertion_pass_hook",
|
||||
type="bool",
|
||||
default=False,
|
||||
help="Enables the pytest_assertion_pass hook. "
|
||||
"Make sure to delete any previously generated pyc cache files.",
|
||||
)
|
||||
|
||||
|
||||
def register_assert_rewrite(*names: str) -> None:
|
||||
"""Register one or more module names to be rewritten on import.
|
||||
|
||||
This function will make sure that this module or all modules inside
|
||||
the package will get their assert statements rewritten.
|
||||
Thus you should make sure to call this before the module is
|
||||
actually imported, usually in your __init__.py if you are a plugin
|
||||
using a package.
|
||||
|
||||
:param names: The module names to register.
|
||||
"""
|
||||
for name in names:
|
||||
if not isinstance(name, str):
|
||||
msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable]
|
||||
raise TypeError(msg.format(repr(names)))
|
||||
for hook in sys.meta_path:
|
||||
if isinstance(hook, rewrite.AssertionRewritingHook):
|
||||
importhook = hook
|
||||
break
|
||||
else:
|
||||
# TODO(typing): Add a protocol for mark_rewrite() and use it
|
||||
# for importhook and for PytestPluginManager.rewrite_hook.
|
||||
importhook = DummyRewriteHook() # type: ignore
|
||||
importhook.mark_rewrite(*names)
|
||||
|
||||
|
||||
class DummyRewriteHook:
|
||||
"""A no-op import hook for when rewriting is disabled."""
|
||||
|
||||
def mark_rewrite(self, *names: str) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class AssertionState:
|
||||
"""State for the assertion plugin."""
|
||||
|
||||
def __init__(self, config: Config, mode) -> None:
|
||||
self.mode = mode
|
||||
self.trace = config.trace.root.get("assertion")
|
||||
self.hook: Optional[rewrite.AssertionRewritingHook] = None
|
||||
|
||||
|
||||
def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
|
||||
"""Try to install the rewrite hook, raise SystemError if it fails."""
|
||||
config.stash[assertstate_key] = AssertionState(config, "rewrite")
|
||||
config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config)
|
||||
sys.meta_path.insert(0, hook)
|
||||
config.stash[assertstate_key].trace("installed rewrite import hook")
|
||||
|
||||
def undo() -> None:
|
||||
hook = config.stash[assertstate_key].hook
|
||||
if hook is not None and hook in sys.meta_path:
|
||||
sys.meta_path.remove(hook)
|
||||
|
||||
config.add_cleanup(undo)
|
||||
return hook
|
||||
|
||||
|
||||
def pytest_collection(session: "Session") -> None:
|
||||
# This hook is only called when test modules are collected
|
||||
# so for example not in the managing process of pytest-xdist
|
||||
# (which does not collect test modules).
|
||||
assertstate = session.config.stash.get(assertstate_key, None)
|
||||
if assertstate:
|
||||
if assertstate.hook is not None:
|
||||
assertstate.hook.set_session(session)
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
|
||||
"""Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks.
|
||||
|
||||
The rewrite module will use util._reprcompare if it exists to use custom
|
||||
reporting via the pytest_assertrepr_compare hook. This sets up this custom
|
||||
comparison for the test.
|
||||
"""
|
||||
|
||||
ihook = item.ihook
|
||||
|
||||
def callbinrepr(op, left: object, right: object) -> Optional[str]:
|
||||
"""Call the pytest_assertrepr_compare hook and prepare the result.
|
||||
|
||||
This uses the first result from the hook and then ensures the
|
||||
following:
|
||||
* Overly verbose explanations are truncated unless configured otherwise
|
||||
(eg. if running in verbose mode).
|
||||
* Embedded newlines are escaped to help util.format_explanation()
|
||||
later.
|
||||
* If the rewrite mode is used embedded %-characters are replaced
|
||||
to protect later % formatting.
|
||||
|
||||
The result can be formatted by util.format_explanation() for
|
||||
pretty printing.
|
||||
"""
|
||||
hook_result = ihook.pytest_assertrepr_compare(
|
||||
config=item.config, op=op, left=left, right=right
|
||||
)
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
new_expl = truncate.truncate_if_required(new_expl, item)
|
||||
new_expl = [line.replace("\n", "\\n") for line in new_expl]
|
||||
res = "\n~".join(new_expl)
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
return None
|
||||
|
||||
saved_assert_hooks = util._reprcompare, util._assertion_pass
|
||||
util._reprcompare = callbinrepr
|
||||
util._config = item.config
|
||||
|
||||
if ihook.pytest_assertion_pass.get_hookimpls():
|
||||
|
||||
def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None:
|
||||
ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl)
|
||||
|
||||
util._assertion_pass = call_assertion_pass_hook
|
||||
|
||||
yield
|
||||
|
||||
util._reprcompare, util._assertion_pass = saved_assert_hooks
|
||||
util._config = None
|
||||
|
||||
|
||||
def pytest_sessionfinish(session: "Session") -> None:
|
||||
assertstate = session.config.stash.get(assertstate_key, None)
|
||||
if assertstate:
|
||||
if assertstate.hook is not None:
|
||||
assertstate.hook.set_session(None)
|
||||
|
||||
|
||||
def pytest_assertrepr_compare(
|
||||
config: Config, op: str, left: Any, right: Any
|
||||
) -> Optional[List[str]]:
|
||||
return util.assertrepr_compare(config=config, op=op, left=left, right=right)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
1185
venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py
Normal file
1185
venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py
Normal file
File diff suppressed because it is too large
Load diff
115
venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py
Normal file
115
venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py
Normal file
|
@ -0,0 +1,115 @@
|
|||
"""Utilities for truncating assertion output.
|
||||
|
||||
Current default behaviour is to truncate assertion explanations at
|
||||
~8 terminal lines, unless running in "-vv" mode or running on CI.
|
||||
"""
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
from _pytest.assertion import util
|
||||
from _pytest.nodes import Item
|
||||
|
||||
|
||||
DEFAULT_MAX_LINES = 8
|
||||
DEFAULT_MAX_CHARS = 8 * 80
|
||||
USAGE_MSG = "use '-vv' to show"
|
||||
|
||||
|
||||
def truncate_if_required(
|
||||
explanation: List[str], item: Item, max_length: Optional[int] = None
|
||||
) -> List[str]:
|
||||
"""Truncate this assertion explanation if the given test item is eligible."""
|
||||
if _should_truncate_item(item):
|
||||
return _truncate_explanation(explanation)
|
||||
return explanation
|
||||
|
||||
|
||||
def _should_truncate_item(item: Item) -> bool:
|
||||
"""Whether or not this test item is eligible for truncation."""
|
||||
verbose = item.config.option.verbose
|
||||
return verbose < 2 and not util.running_on_ci()
|
||||
|
||||
|
||||
def _truncate_explanation(
|
||||
input_lines: List[str],
|
||||
max_lines: Optional[int] = None,
|
||||
max_chars: Optional[int] = None,
|
||||
) -> List[str]:
|
||||
"""Truncate given list of strings that makes up the assertion explanation.
|
||||
|
||||
Truncates to either 8 lines, or 640 characters - whichever the input reaches
|
||||
first, taking the truncation explanation into account. The remaining lines
|
||||
will be replaced by a usage message.
|
||||
"""
|
||||
if max_lines is None:
|
||||
max_lines = DEFAULT_MAX_LINES
|
||||
if max_chars is None:
|
||||
max_chars = DEFAULT_MAX_CHARS
|
||||
|
||||
# Check if truncation required
|
||||
input_char_count = len("".join(input_lines))
|
||||
# The length of the truncation explanation depends on the number of lines
|
||||
# removed but is at least 68 characters:
|
||||
# The real value is
|
||||
# 64 (for the base message:
|
||||
# '...\n...Full output truncated (1 line hidden), use '-vv' to show")'
|
||||
# )
|
||||
# + 1 (for plural)
|
||||
# + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1)
|
||||
# + 3 for the '...' added to the truncated line
|
||||
# But if there's more than 100 lines it's very likely that we're going to
|
||||
# truncate, so we don't need the exact value using log10.
|
||||
tolerable_max_chars = (
|
||||
max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...'
|
||||
)
|
||||
# The truncation explanation add two lines to the output
|
||||
tolerable_max_lines = max_lines + 2
|
||||
if (
|
||||
len(input_lines) <= tolerable_max_lines
|
||||
and input_char_count <= tolerable_max_chars
|
||||
):
|
||||
return input_lines
|
||||
# Truncate first to max_lines, and then truncate to max_chars if necessary
|
||||
truncated_explanation = input_lines[:max_lines]
|
||||
truncated_char = True
|
||||
# We reevaluate the need to truncate chars following removal of some lines
|
||||
if len("".join(truncated_explanation)) > tolerable_max_chars:
|
||||
truncated_explanation = _truncate_by_char_count(
|
||||
truncated_explanation, max_chars
|
||||
)
|
||||
else:
|
||||
truncated_char = False
|
||||
|
||||
truncated_line_count = len(input_lines) - len(truncated_explanation)
|
||||
if truncated_explanation[-1]:
|
||||
# Add ellipsis and take into account part-truncated final line
|
||||
truncated_explanation[-1] = truncated_explanation[-1] + "..."
|
||||
if truncated_char:
|
||||
# It's possible that we did not remove any char from this line
|
||||
truncated_line_count += 1
|
||||
else:
|
||||
# Add proper ellipsis when we were able to fit a full line exactly
|
||||
truncated_explanation[-1] = "..."
|
||||
return truncated_explanation + [
|
||||
"",
|
||||
f"...Full output truncated ({truncated_line_count} line"
|
||||
f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}",
|
||||
]
|
||||
|
||||
|
||||
def _truncate_by_char_count(input_lines: List[str], max_chars: int) -> List[str]:
|
||||
# Find point at which input length exceeds total allowed length
|
||||
iterated_char_count = 0
|
||||
for iterated_index, input_line in enumerate(input_lines):
|
||||
if iterated_char_count + len(input_line) > max_chars:
|
||||
break
|
||||
iterated_char_count += len(input_line)
|
||||
|
||||
# Create truncated explanation with modified final line
|
||||
truncated_result = input_lines[:iterated_index]
|
||||
final_line = input_lines[iterated_index]
|
||||
if final_line:
|
||||
final_line_truncate_point = max_chars - iterated_char_count
|
||||
final_line = final_line[:final_line_truncate_point]
|
||||
truncated_result.append(final_line)
|
||||
return truncated_result
|
522
venv/lib/python3.11/site-packages/_pytest/assertion/util.py
Normal file
522
venv/lib/python3.11/site-packages/_pytest/assertion/util.py
Normal file
|
@ -0,0 +1,522 @@
|
|||
"""Utilities for assertion debugging."""
|
||||
import collections.abc
|
||||
import os
|
||||
import pprint
|
||||
from typing import AbstractSet
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from unicodedata import normalize
|
||||
|
||||
import _pytest._code
|
||||
from _pytest import outcomes
|
||||
from _pytest._io.saferepr import _pformat_dispatch
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest._io.saferepr import saferepr_unlimited
|
||||
from _pytest.config import Config
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
# loaded and in turn call the hooks defined here as part of the
|
||||
# DebugInterpreter.
|
||||
_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None
|
||||
|
||||
# Works similarly as _reprcompare attribute. Is populated with the hook call
|
||||
# when pytest_runtest_setup is called.
|
||||
_assertion_pass: Optional[Callable[[int, str, str], None]] = None
|
||||
|
||||
# Config object which is assigned during pytest_runtest_protocol.
|
||||
_config: Optional[Config] = None
|
||||
|
||||
|
||||
def format_explanation(explanation: str) -> str:
|
||||
r"""Format an explanation.
|
||||
|
||||
Normally all embedded newlines are escaped, however there are
|
||||
three exceptions: \n{, \n} and \n~. The first two are intended
|
||||
cover nested explanations, see function and attribute explanations
|
||||
for examples (.visit_Call(), visit_Attribute()). The last one is
|
||||
for when one explanation needs to span multiple lines, e.g. when
|
||||
displaying diffs.
|
||||
"""
|
||||
lines = _split_explanation(explanation)
|
||||
result = _format_lines(lines)
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
def _split_explanation(explanation: str) -> List[str]:
|
||||
r"""Return a list of individual lines in the explanation.
|
||||
|
||||
This will return a list of lines split on '\n{', '\n}' and '\n~'.
|
||||
Any other newlines will be escaped and appear in the line as the
|
||||
literal '\n' characters.
|
||||
"""
|
||||
raw_lines = (explanation or "").split("\n")
|
||||
lines = [raw_lines[0]]
|
||||
for values in raw_lines[1:]:
|
||||
if values and values[0] in ["{", "}", "~", ">"]:
|
||||
lines.append(values)
|
||||
else:
|
||||
lines[-1] += "\\n" + values
|
||||
return lines
|
||||
|
||||
|
||||
def _format_lines(lines: Sequence[str]) -> List[str]:
|
||||
"""Format the individual lines.
|
||||
|
||||
This will replace the '{', '}' and '~' characters of our mini formatting
|
||||
language with the proper 'where ...', 'and ...' and ' + ...' text, taking
|
||||
care of indentation along the way.
|
||||
|
||||
Return a list of formatted lines.
|
||||
"""
|
||||
result = list(lines[:1])
|
||||
stack = [0]
|
||||
stackcnt = [0]
|
||||
for line in lines[1:]:
|
||||
if line.startswith("{"):
|
||||
if stackcnt[-1]:
|
||||
s = "and "
|
||||
else:
|
||||
s = "where "
|
||||
stack.append(len(result))
|
||||
stackcnt[-1] += 1
|
||||
stackcnt.append(0)
|
||||
result.append(" +" + " " * (len(stack) - 1) + s + line[1:])
|
||||
elif line.startswith("}"):
|
||||
stack.pop()
|
||||
stackcnt.pop()
|
||||
result[stack[-1]] += line[1:]
|
||||
else:
|
||||
assert line[0] in ["~", ">"]
|
||||
stack[-1] += 1
|
||||
indent = len(stack) if line.startswith("~") else len(stack) - 1
|
||||
result.append(" " * indent + line[1:])
|
||||
assert len(stack) == 1
|
||||
return result
|
||||
|
||||
|
||||
def issequence(x: Any) -> bool:
|
||||
return isinstance(x, collections.abc.Sequence) and not isinstance(x, str)
|
||||
|
||||
|
||||
def istext(x: Any) -> bool:
|
||||
return isinstance(x, str)
|
||||
|
||||
|
||||
def isdict(x: Any) -> bool:
|
||||
return isinstance(x, dict)
|
||||
|
||||
|
||||
def isset(x: Any) -> bool:
|
||||
return isinstance(x, (set, frozenset))
|
||||
|
||||
|
||||
def isnamedtuple(obj: Any) -> bool:
|
||||
return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None
|
||||
|
||||
|
||||
def isdatacls(obj: Any) -> bool:
|
||||
return getattr(obj, "__dataclass_fields__", None) is not None
|
||||
|
||||
|
||||
def isattrs(obj: Any) -> bool:
|
||||
return getattr(obj, "__attrs_attrs__", None) is not None
|
||||
|
||||
|
||||
def isiterable(obj: Any) -> bool:
|
||||
try:
|
||||
iter(obj)
|
||||
return not istext(obj)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def has_default_eq(
|
||||
obj: object,
|
||||
) -> bool:
|
||||
"""Check if an instance of an object contains the default eq
|
||||
|
||||
First, we check if the object's __eq__ attribute has __code__,
|
||||
if so, we check the equally of the method code filename (__code__.co_filename)
|
||||
to the default one generated by the dataclass and attr module
|
||||
for dataclasses the default co_filename is <string>, for attrs class, the __eq__ should contain "attrs eq generated"
|
||||
"""
|
||||
# inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68
|
||||
if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"):
|
||||
code_filename = obj.__eq__.__code__.co_filename
|
||||
|
||||
if isattrs(obj):
|
||||
return "attrs generated eq" in code_filename
|
||||
|
||||
return code_filename == "<string>" # data class
|
||||
return True
|
||||
|
||||
|
||||
def assertrepr_compare(
|
||||
config, op: str, left: Any, right: Any, use_ascii: bool = False
|
||||
) -> Optional[List[str]]:
|
||||
"""Return specialised explanations for some operators/operands."""
|
||||
verbose = config.getoption("verbose")
|
||||
|
||||
# Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier.
|
||||
# See issue #3246.
|
||||
use_ascii = (
|
||||
isinstance(left, str)
|
||||
and isinstance(right, str)
|
||||
and normalize("NFD", left) == normalize("NFD", right)
|
||||
)
|
||||
|
||||
if verbose > 1:
|
||||
left_repr = saferepr_unlimited(left, use_ascii=use_ascii)
|
||||
right_repr = saferepr_unlimited(right, use_ascii=use_ascii)
|
||||
else:
|
||||
# XXX: "15 chars indentation" is wrong
|
||||
# ("E AssertionError: assert "); should use term width.
|
||||
maxsize = (
|
||||
80 - 15 - len(op) - 2
|
||||
) // 2 # 15 chars indentation, 1 space around op
|
||||
|
||||
left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii)
|
||||
right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii)
|
||||
|
||||
summary = f"{left_repr} {op} {right_repr}"
|
||||
|
||||
explanation = None
|
||||
try:
|
||||
if op == "==":
|
||||
explanation = _compare_eq_any(left, right, verbose)
|
||||
elif op == "not in":
|
||||
if istext(left) and istext(right):
|
||||
explanation = _notin_text(left, right, verbose)
|
||||
except outcomes.Exit:
|
||||
raise
|
||||
except Exception:
|
||||
explanation = [
|
||||
"(pytest_assertion plugin: representation of details failed: {}.".format(
|
||||
_pytest._code.ExceptionInfo.from_current()._getreprcrash()
|
||||
),
|
||||
" Probably an object has a faulty __repr__.)",
|
||||
]
|
||||
|
||||
if not explanation:
|
||||
return None
|
||||
|
||||
return [summary] + explanation
|
||||
|
||||
|
||||
def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]:
|
||||
explanation = []
|
||||
if istext(left) and istext(right):
|
||||
explanation = _diff_text(left, right, verbose)
|
||||
else:
|
||||
from _pytest.python_api import ApproxBase
|
||||
|
||||
if isinstance(left, ApproxBase) or isinstance(right, ApproxBase):
|
||||
# Although the common order should be obtained == expected, this ensures both ways
|
||||
approx_side = left if isinstance(left, ApproxBase) else right
|
||||
other_side = right if isinstance(left, ApproxBase) else left
|
||||
|
||||
explanation = approx_side._repr_compare(other_side)
|
||||
elif type(left) == type(right) and (
|
||||
isdatacls(left) or isattrs(left) or isnamedtuple(left)
|
||||
):
|
||||
# Note: unlike dataclasses/attrs, namedtuples compare only the
|
||||
# field values, not the type or field names. But this branch
|
||||
# intentionally only handles the same-type case, which was often
|
||||
# used in older code bases before dataclasses/attrs were available.
|
||||
explanation = _compare_eq_cls(left, right, verbose)
|
||||
elif issequence(left) and issequence(right):
|
||||
explanation = _compare_eq_sequence(left, right, verbose)
|
||||
elif isset(left) and isset(right):
|
||||
explanation = _compare_eq_set(left, right, verbose)
|
||||
elif isdict(left) and isdict(right):
|
||||
explanation = _compare_eq_dict(left, right, verbose)
|
||||
|
||||
if isiterable(left) and isiterable(right):
|
||||
expl = _compare_eq_iterable(left, right, verbose)
|
||||
explanation.extend(expl)
|
||||
|
||||
return explanation
|
||||
|
||||
|
||||
def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
|
||||
"""Return the explanation for the diff between text.
|
||||
|
||||
Unless --verbose is used this will skip leading and trailing
|
||||
characters which are identical to keep the diff minimal.
|
||||
"""
|
||||
from difflib import ndiff
|
||||
|
||||
explanation: List[str] = []
|
||||
|
||||
if verbose < 1:
|
||||
i = 0 # just in case left or right has zero length
|
||||
for i in range(min(len(left), len(right))):
|
||||
if left[i] != right[i]:
|
||||
break
|
||||
if i > 42:
|
||||
i -= 10 # Provide some context
|
||||
explanation = [
|
||||
"Skipping %s identical leading characters in diff, use -v to show" % i
|
||||
]
|
||||
left = left[i:]
|
||||
right = right[i:]
|
||||
if len(left) == len(right):
|
||||
for i in range(len(left)):
|
||||
if left[-i] != right[-i]:
|
||||
break
|
||||
if i > 42:
|
||||
i -= 10 # Provide some context
|
||||
explanation += [
|
||||
"Skipping {} identical trailing "
|
||||
"characters in diff, use -v to show".format(i)
|
||||
]
|
||||
left = left[:-i]
|
||||
right = right[:-i]
|
||||
keepends = True
|
||||
if left.isspace() or right.isspace():
|
||||
left = repr(str(left))
|
||||
right = repr(str(right))
|
||||
explanation += ["Strings contain only whitespace, escaping them using repr()"]
|
||||
# "right" is the expected base against which we compare "left",
|
||||
# see https://github.com/pytest-dev/pytest/issues/3333
|
||||
explanation += [
|
||||
line.strip("\n")
|
||||
for line in ndiff(right.splitlines(keepends), left.splitlines(keepends))
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
def _surrounding_parens_on_own_lines(lines: List[str]) -> None:
|
||||
"""Move opening/closing parenthesis/bracket to own lines."""
|
||||
opening = lines[0][:1]
|
||||
if opening in ["(", "[", "{"]:
|
||||
lines[0] = " " + lines[0][1:]
|
||||
lines[:] = [opening] + lines
|
||||
closing = lines[-1][-1:]
|
||||
if closing in [")", "]", "}"]:
|
||||
lines[-1] = lines[-1][:-1] + ","
|
||||
lines[:] = lines + [closing]
|
||||
|
||||
|
||||
def _compare_eq_iterable(
|
||||
left: Iterable[Any], right: Iterable[Any], verbose: int = 0
|
||||
) -> List[str]:
|
||||
if verbose <= 0 and not running_on_ci():
|
||||
return ["Use -v to get more diff"]
|
||||
# dynamic import to speedup pytest
|
||||
import difflib
|
||||
|
||||
left_formatting = pprint.pformat(left).splitlines()
|
||||
right_formatting = pprint.pformat(right).splitlines()
|
||||
|
||||
# Re-format for different output lengths.
|
||||
lines_left = len(left_formatting)
|
||||
lines_right = len(right_formatting)
|
||||
if lines_left != lines_right:
|
||||
left_formatting = _pformat_dispatch(left).splitlines()
|
||||
right_formatting = _pformat_dispatch(right).splitlines()
|
||||
|
||||
if lines_left > 1 or lines_right > 1:
|
||||
_surrounding_parens_on_own_lines(left_formatting)
|
||||
_surrounding_parens_on_own_lines(right_formatting)
|
||||
|
||||
explanation = ["Full diff:"]
|
||||
# "right" is the expected base against which we compare "left",
|
||||
# see https://github.com/pytest-dev/pytest/issues/3333
|
||||
explanation.extend(
|
||||
line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting)
|
||||
)
|
||||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_sequence(
|
||||
left: Sequence[Any], right: Sequence[Any], verbose: int = 0
|
||||
) -> List[str]:
|
||||
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
|
||||
explanation: List[str] = []
|
||||
len_left = len(left)
|
||||
len_right = len(right)
|
||||
for i in range(min(len_left, len_right)):
|
||||
if left[i] != right[i]:
|
||||
if comparing_bytes:
|
||||
# when comparing bytes, we want to see their ascii representation
|
||||
# instead of their numeric values (#5260)
|
||||
# using a slice gives us the ascii representation:
|
||||
# >>> s = b'foo'
|
||||
# >>> s[0]
|
||||
# 102
|
||||
# >>> s[0:1]
|
||||
# b'f'
|
||||
left_value = left[i : i + 1]
|
||||
right_value = right[i : i + 1]
|
||||
else:
|
||||
left_value = left[i]
|
||||
right_value = right[i]
|
||||
|
||||
explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"]
|
||||
break
|
||||
|
||||
if comparing_bytes:
|
||||
# when comparing bytes, it doesn't help to show the "sides contain one or more
|
||||
# items" longer explanation, so skip it
|
||||
|
||||
return explanation
|
||||
|
||||
len_diff = len_left - len_right
|
||||
if len_diff:
|
||||
if len_diff > 0:
|
||||
dir_with_more = "Left"
|
||||
extra = saferepr(left[len_right])
|
||||
else:
|
||||
len_diff = 0 - len_diff
|
||||
dir_with_more = "Right"
|
||||
extra = saferepr(right[len_left])
|
||||
|
||||
if len_diff == 1:
|
||||
explanation += [f"{dir_with_more} contains one more item: {extra}"]
|
||||
else:
|
||||
explanation += [
|
||||
"%s contains %d more items, first extra item: %s"
|
||||
% (dir_with_more, len_diff, extra)
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_set(
|
||||
left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0
|
||||
) -> List[str]:
|
||||
explanation = []
|
||||
diff_left = left - right
|
||||
diff_right = right - left
|
||||
if diff_left:
|
||||
explanation.append("Extra items in the left set:")
|
||||
for item in diff_left:
|
||||
explanation.append(saferepr(item))
|
||||
if diff_right:
|
||||
explanation.append("Extra items in the right set:")
|
||||
for item in diff_right:
|
||||
explanation.append(saferepr(item))
|
||||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_dict(
|
||||
left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
|
||||
) -> List[str]:
|
||||
explanation: List[str] = []
|
||||
set_left = set(left)
|
||||
set_right = set(right)
|
||||
common = set_left.intersection(set_right)
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
if same and verbose < 2:
|
||||
explanation += ["Omitting %s identical items, use -vv to show" % len(same)]
|
||||
elif same:
|
||||
explanation += ["Common items:"]
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
diff = {k for k in common if left[k] != right[k]}
|
||||
if diff:
|
||||
explanation += ["Differing items:"]
|
||||
for k in diff:
|
||||
explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})]
|
||||
extra_left = set_left - set_right
|
||||
len_extra_left = len(extra_left)
|
||||
if len_extra_left:
|
||||
explanation.append(
|
||||
"Left contains %d more item%s:"
|
||||
% (len_extra_left, "" if len_extra_left == 1 else "s")
|
||||
)
|
||||
explanation.extend(
|
||||
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
|
||||
)
|
||||
extra_right = set_right - set_left
|
||||
len_extra_right = len(extra_right)
|
||||
if len_extra_right:
|
||||
explanation.append(
|
||||
"Right contains %d more item%s:"
|
||||
% (len_extra_right, "" if len_extra_right == 1 else "s")
|
||||
)
|
||||
explanation.extend(
|
||||
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
|
||||
)
|
||||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]:
|
||||
if not has_default_eq(left):
|
||||
return []
|
||||
if isdatacls(left):
|
||||
import dataclasses
|
||||
|
||||
all_fields = dataclasses.fields(left)
|
||||
fields_to_check = [info.name for info in all_fields if info.compare]
|
||||
elif isattrs(left):
|
||||
all_fields = left.__attrs_attrs__
|
||||
fields_to_check = [field.name for field in all_fields if getattr(field, "eq")]
|
||||
elif isnamedtuple(left):
|
||||
fields_to_check = left._fields
|
||||
else:
|
||||
assert False
|
||||
|
||||
indent = " "
|
||||
same = []
|
||||
diff = []
|
||||
for field in fields_to_check:
|
||||
if getattr(left, field) == getattr(right, field):
|
||||
same.append(field)
|
||||
else:
|
||||
diff.append(field)
|
||||
|
||||
explanation = []
|
||||
if same or diff:
|
||||
explanation += [""]
|
||||
if same and verbose < 2:
|
||||
explanation.append("Omitting %s identical items, use -vv to show" % len(same))
|
||||
elif same:
|
||||
explanation += ["Matching attributes:"]
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
if diff:
|
||||
explanation += ["Differing attributes:"]
|
||||
explanation += pprint.pformat(diff).splitlines()
|
||||
for field in diff:
|
||||
field_left = getattr(left, field)
|
||||
field_right = getattr(right, field)
|
||||
explanation += [
|
||||
"",
|
||||
"Drill down into differing attribute %s:" % field,
|
||||
("%s%s: %r != %r") % (indent, field, field_left, field_right),
|
||||
]
|
||||
explanation += [
|
||||
indent + line
|
||||
for line in _compare_eq_any(field_left, field_right, verbose)
|
||||
]
|
||||
return explanation
|
||||
|
||||
|
||||
def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]:
|
||||
index = text.find(term)
|
||||
head = text[:index]
|
||||
tail = text[index + len(term) :]
|
||||
correct_text = head + tail
|
||||
diff = _diff_text(text, correct_text, verbose)
|
||||
newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)]
|
||||
for line in diff:
|
||||
if line.startswith("Skipping"):
|
||||
continue
|
||||
if line.startswith("- "):
|
||||
continue
|
||||
if line.startswith("+ "):
|
||||
newdiff.append(" " + line[2:])
|
||||
else:
|
||||
newdiff.append(line)
|
||||
return newdiff
|
||||
|
||||
|
||||
def running_on_ci() -> bool:
|
||||
"""Check if we're currently running on a CI system."""
|
||||
env_vars = ["CI", "BUILD_NUMBER"]
|
||||
return any(var in os.environ for var in env_vars)
|
598
venv/lib/python3.11/site-packages/_pytest/cacheprovider.py
Normal file
598
venv/lib/python3.11/site-packages/_pytest/cacheprovider.py
Normal file
|
@ -0,0 +1,598 @@
|
|||
"""Implementation of the cache provider."""
|
||||
# This plugin was not named "cache" to avoid conflicts with the external
|
||||
# pytest-cache version.
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
from typing import Generator
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Set
|
||||
from typing import Union
|
||||
|
||||
from .pathlib import resolve_from_str
|
||||
from .pathlib import rm_rf
|
||||
from .reports import CollectReport
|
||||
from _pytest import nodes
|
||||
from _pytest._io import TerminalWriter
|
||||
from _pytest.compat import final
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import ExitCode
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.deprecated import check_ispytest
|
||||
from _pytest.fixtures import fixture
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.main import Session
|
||||
from _pytest.nodes import File
|
||||
from _pytest.python import Package
|
||||
from _pytest.reports import TestReport
|
||||
|
||||
README_CONTENT = """\
|
||||
# pytest cache directory #
|
||||
|
||||
This directory contains data from the pytest's cache plugin,
|
||||
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
|
||||
|
||||
**Do not** commit this to version control.
|
||||
|
||||
See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information.
|
||||
"""
|
||||
|
||||
CACHEDIR_TAG_CONTENT = b"""\
|
||||
Signature: 8a477f597d28d172789f06886806bc55
|
||||
# This file is a cache directory tag created by pytest.
|
||||
# For information about cache directory tags, see:
|
||||
# https://bford.info/cachedir/spec.html
|
||||
"""
|
||||
|
||||
|
||||
@final
|
||||
@dataclasses.dataclass
|
||||
class Cache:
|
||||
"""Instance of the `cache` fixture."""
|
||||
|
||||
_cachedir: Path = dataclasses.field(repr=False)
|
||||
_config: Config = dataclasses.field(repr=False)
|
||||
|
||||
# Sub-directory under cache-dir for directories created by `mkdir()`.
|
||||
_CACHE_PREFIX_DIRS = "d"
|
||||
|
||||
# Sub-directory under cache-dir for values created by `set()`.
|
||||
_CACHE_PREFIX_VALUES = "v"
|
||||
|
||||
def __init__(
|
||||
self, cachedir: Path, config: Config, *, _ispytest: bool = False
|
||||
) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self._cachedir = cachedir
|
||||
self._config = config
|
||||
|
||||
@classmethod
|
||||
def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache":
|
||||
"""Create the Cache instance for a Config.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
check_ispytest(_ispytest)
|
||||
cachedir = cls.cache_dir_from_config(config, _ispytest=True)
|
||||
if config.getoption("cacheclear") and cachedir.is_dir():
|
||||
cls.clear_cache(cachedir, _ispytest=True)
|
||||
return cls(cachedir, config, _ispytest=True)
|
||||
|
||||
@classmethod
|
||||
def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None:
|
||||
"""Clear the sub-directories used to hold cached directories and values.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
check_ispytest(_ispytest)
|
||||
for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
|
||||
d = cachedir / prefix
|
||||
if d.is_dir():
|
||||
rm_rf(d)
|
||||
|
||||
@staticmethod
|
||||
def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path:
|
||||
"""Get the path to the cache directory for a Config.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
check_ispytest(_ispytest)
|
||||
return resolve_from_str(config.getini("cache_dir"), config.rootpath)
|
||||
|
||||
def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None:
|
||||
"""Issue a cache warning.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
check_ispytest(_ispytest)
|
||||
import warnings
|
||||
from _pytest.warning_types import PytestCacheWarning
|
||||
|
||||
warnings.warn(
|
||||
PytestCacheWarning(fmt.format(**args) if args else fmt),
|
||||
self._config.hook,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
def mkdir(self, name: str) -> Path:
|
||||
"""Return a directory path object with the given name.
|
||||
|
||||
If the directory does not yet exist, it will be created. You can use
|
||||
it to manage files to e.g. store/retrieve database dumps across test
|
||||
sessions.
|
||||
|
||||
.. versionadded:: 7.0
|
||||
|
||||
:param name:
|
||||
Must be a string not containing a ``/`` separator.
|
||||
Make sure the name contains your plugin or application
|
||||
identifiers to prevent clashes with other cache users.
|
||||
"""
|
||||
path = Path(name)
|
||||
if len(path.parts) > 1:
|
||||
raise ValueError("name is not allowed to contain path separators")
|
||||
res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path)
|
||||
res.mkdir(exist_ok=True, parents=True)
|
||||
return res
|
||||
|
||||
def _getvaluepath(self, key: str) -> Path:
|
||||
return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key))
|
||||
|
||||
def get(self, key: str, default):
|
||||
"""Return the cached value for the given key.
|
||||
|
||||
If no value was yet cached or the value cannot be read, the specified
|
||||
default is returned.
|
||||
|
||||
:param key:
|
||||
Must be a ``/`` separated value. Usually the first
|
||||
name is the name of your plugin or your application.
|
||||
:param default:
|
||||
The value to return in case of a cache-miss or invalid cache value.
|
||||
"""
|
||||
path = self._getvaluepath(key)
|
||||
try:
|
||||
with path.open("r", encoding="UTF-8") as f:
|
||||
return json.load(f)
|
||||
except (ValueError, OSError):
|
||||
return default
|
||||
|
||||
def set(self, key: str, value: object) -> None:
|
||||
"""Save value for the given key.
|
||||
|
||||
:param key:
|
||||
Must be a ``/`` separated value. Usually the first
|
||||
name is the name of your plugin or your application.
|
||||
:param value:
|
||||
Must be of any combination of basic python types,
|
||||
including nested types like lists of dictionaries.
|
||||
"""
|
||||
path = self._getvaluepath(key)
|
||||
try:
|
||||
if path.parent.is_dir():
|
||||
cache_dir_exists_already = True
|
||||
else:
|
||||
cache_dir_exists_already = self._cachedir.exists()
|
||||
path.parent.mkdir(exist_ok=True, parents=True)
|
||||
except OSError as exc:
|
||||
self.warn(
|
||||
f"could not create cache path {path}: {exc}",
|
||||
_ispytest=True,
|
||||
)
|
||||
return
|
||||
if not cache_dir_exists_already:
|
||||
self._ensure_supporting_files()
|
||||
data = json.dumps(value, ensure_ascii=False, indent=2)
|
||||
try:
|
||||
f = path.open("w", encoding="UTF-8")
|
||||
except OSError as exc:
|
||||
self.warn(
|
||||
f"cache could not write path {path}: {exc}",
|
||||
_ispytest=True,
|
||||
)
|
||||
else:
|
||||
with f:
|
||||
f.write(data)
|
||||
|
||||
def _ensure_supporting_files(self) -> None:
|
||||
"""Create supporting files in the cache dir that are not really part of the cache."""
|
||||
readme_path = self._cachedir / "README.md"
|
||||
readme_path.write_text(README_CONTENT, encoding="UTF-8")
|
||||
|
||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||
msg = "# Created by pytest automatically.\n*\n"
|
||||
gitignore_path.write_text(msg, encoding="UTF-8")
|
||||
|
||||
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
|
||||
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
|
||||
|
||||
|
||||
class LFPluginCollWrapper:
|
||||
def __init__(self, lfplugin: "LFPlugin") -> None:
|
||||
self.lfplugin = lfplugin
|
||||
self._collected_at_least_one_failure = False
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_make_collect_report(self, collector: nodes.Collector):
|
||||
if isinstance(collector, (Session, Package)):
|
||||
out = yield
|
||||
res: CollectReport = out.get_result()
|
||||
|
||||
# Sort any lf-paths to the beginning.
|
||||
lf_paths = self.lfplugin._last_failed_paths
|
||||
|
||||
# Use stable sort to priorize last failed.
|
||||
def sort_key(node: Union[nodes.Item, nodes.Collector]) -> bool:
|
||||
# Package.path is the __init__.py file, we need the directory.
|
||||
if isinstance(node, Package):
|
||||
path = node.path.parent
|
||||
else:
|
||||
path = node.path
|
||||
return path in lf_paths
|
||||
|
||||
res.result = sorted(
|
||||
res.result,
|
||||
key=sort_key,
|
||||
reverse=True,
|
||||
)
|
||||
return
|
||||
|
||||
elif isinstance(collector, File):
|
||||
if collector.path in self.lfplugin._last_failed_paths:
|
||||
out = yield
|
||||
res = out.get_result()
|
||||
result = res.result
|
||||
lastfailed = self.lfplugin.lastfailed
|
||||
|
||||
# Only filter with known failures.
|
||||
if not self._collected_at_least_one_failure:
|
||||
if not any(x.nodeid in lastfailed for x in result):
|
||||
return
|
||||
self.lfplugin.config.pluginmanager.register(
|
||||
LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
|
||||
)
|
||||
self._collected_at_least_one_failure = True
|
||||
|
||||
session = collector.session
|
||||
result[:] = [
|
||||
x
|
||||
for x in result
|
||||
if x.nodeid in lastfailed
|
||||
# Include any passed arguments (not trivial to filter).
|
||||
or session.isinitpath(x.path)
|
||||
# Keep all sub-collectors.
|
||||
or isinstance(x, nodes.Collector)
|
||||
]
|
||||
return
|
||||
yield
|
||||
|
||||
|
||||
class LFPluginCollSkipfiles:
|
||||
def __init__(self, lfplugin: "LFPlugin") -> None:
|
||||
self.lfplugin = lfplugin
|
||||
|
||||
@hookimpl
|
||||
def pytest_make_collect_report(
|
||||
self, collector: nodes.Collector
|
||||
) -> Optional[CollectReport]:
|
||||
# Packages are Files, but we only want to skip test-bearing Files,
|
||||
# so don't filter Packages.
|
||||
if isinstance(collector, File) and not isinstance(collector, Package):
|
||||
if collector.path not in self.lfplugin._last_failed_paths:
|
||||
self.lfplugin._skipped_files += 1
|
||||
|
||||
return CollectReport(
|
||||
collector.nodeid, "passed", longrepr=None, result=[]
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class LFPlugin:
|
||||
"""Plugin which implements the --lf (run last-failing) option."""
|
||||
|
||||
def __init__(self, config: Config) -> None:
|
||||
self.config = config
|
||||
active_keys = "lf", "failedfirst"
|
||||
self.active = any(config.getoption(key) for key in active_keys)
|
||||
assert config.cache
|
||||
self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {})
|
||||
self._previously_failed_count: Optional[int] = None
|
||||
self._report_status: Optional[str] = None
|
||||
self._skipped_files = 0 # count skipped files during collection due to --lf
|
||||
|
||||
if config.getoption("lf"):
|
||||
self._last_failed_paths = self.get_last_failed_paths()
|
||||
config.pluginmanager.register(
|
||||
LFPluginCollWrapper(self), "lfplugin-collwrapper"
|
||||
)
|
||||
|
||||
def get_last_failed_paths(self) -> Set[Path]:
|
||||
"""Return a set with all Paths of the previously failed nodeids and
|
||||
their parents."""
|
||||
rootpath = self.config.rootpath
|
||||
result = set()
|
||||
for nodeid in self.lastfailed:
|
||||
path = rootpath / nodeid.split("::")[0]
|
||||
result.add(path)
|
||||
result.update(path.parents)
|
||||
return {x for x in result if x.exists()}
|
||||
|
||||
def pytest_report_collectionfinish(self) -> Optional[str]:
|
||||
if self.active and self.config.getoption("verbose") >= 0:
|
||||
return "run-last-failure: %s" % self._report_status
|
||||
return None
|
||||
|
||||
def pytest_runtest_logreport(self, report: TestReport) -> None:
|
||||
if (report.when == "call" and report.passed) or report.skipped:
|
||||
self.lastfailed.pop(report.nodeid, None)
|
||||
elif report.failed:
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
def pytest_collectreport(self, report: CollectReport) -> None:
|
||||
passed = report.outcome in ("passed", "skipped")
|
||||
if passed:
|
||||
if report.nodeid in self.lastfailed:
|
||||
self.lastfailed.pop(report.nodeid)
|
||||
self.lastfailed.update((item.nodeid, True) for item in report.result)
|
||||
else:
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
@hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_collection_modifyitems(
|
||||
self, config: Config, items: List[nodes.Item]
|
||||
) -> Generator[None, None, None]:
|
||||
yield
|
||||
|
||||
if not self.active:
|
||||
return
|
||||
|
||||
if self.lastfailed:
|
||||
previously_failed = []
|
||||
previously_passed = []
|
||||
for item in items:
|
||||
if item.nodeid in self.lastfailed:
|
||||
previously_failed.append(item)
|
||||
else:
|
||||
previously_passed.append(item)
|
||||
self._previously_failed_count = len(previously_failed)
|
||||
|
||||
if not previously_failed:
|
||||
# Running a subset of all tests with recorded failures
|
||||
# only outside of it.
|
||||
self._report_status = "%d known failures not in selected tests" % (
|
||||
len(self.lastfailed),
|
||||
)
|
||||
else:
|
||||
if self.config.getoption("lf"):
|
||||
items[:] = previously_failed
|
||||
config.hook.pytest_deselected(items=previously_passed)
|
||||
else: # --failedfirst
|
||||
items[:] = previously_failed + previously_passed
|
||||
|
||||
noun = "failure" if self._previously_failed_count == 1 else "failures"
|
||||
suffix = " first" if self.config.getoption("failedfirst") else ""
|
||||
self._report_status = "rerun previous {count} {noun}{suffix}".format(
|
||||
count=self._previously_failed_count, suffix=suffix, noun=noun
|
||||
)
|
||||
|
||||
if self._skipped_files > 0:
|
||||
files_noun = "file" if self._skipped_files == 1 else "files"
|
||||
self._report_status += " (skipped {files} {files_noun})".format(
|
||||
files=self._skipped_files, files_noun=files_noun
|
||||
)
|
||||
else:
|
||||
self._report_status = "no previously failed tests, "
|
||||
if self.config.getoption("last_failed_no_failures") == "none":
|
||||
self._report_status += "deselecting all items."
|
||||
config.hook.pytest_deselected(items=items[:])
|
||||
items[:] = []
|
||||
else:
|
||||
self._report_status += "not deselecting items."
|
||||
|
||||
def pytest_sessionfinish(self, session: Session) -> None:
|
||||
config = self.config
|
||||
if config.getoption("cacheshow") or hasattr(config, "workerinput"):
|
||||
return
|
||||
|
||||
assert config.cache is not None
|
||||
saved_lastfailed = config.cache.get("cache/lastfailed", {})
|
||||
if saved_lastfailed != self.lastfailed:
|
||||
config.cache.set("cache/lastfailed", self.lastfailed)
|
||||
|
||||
|
||||
class NFPlugin:
|
||||
"""Plugin which implements the --nf (run new-first) option."""
|
||||
|
||||
def __init__(self, config: Config) -> None:
|
||||
self.config = config
|
||||
self.active = config.option.newfirst
|
||||
assert config.cache is not None
|
||||
self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
|
||||
|
||||
@hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_collection_modifyitems(
|
||||
self, items: List[nodes.Item]
|
||||
) -> Generator[None, None, None]:
|
||||
yield
|
||||
|
||||
if self.active:
|
||||
new_items: Dict[str, nodes.Item] = {}
|
||||
other_items: Dict[str, nodes.Item] = {}
|
||||
for item in items:
|
||||
if item.nodeid not in self.cached_nodeids:
|
||||
new_items[item.nodeid] = item
|
||||
else:
|
||||
other_items[item.nodeid] = item
|
||||
|
||||
items[:] = self._get_increasing_order(
|
||||
new_items.values()
|
||||
) + self._get_increasing_order(other_items.values())
|
||||
self.cached_nodeids.update(new_items)
|
||||
else:
|
||||
self.cached_nodeids.update(item.nodeid for item in items)
|
||||
|
||||
def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]:
|
||||
return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return]
|
||||
|
||||
def pytest_sessionfinish(self) -> None:
|
||||
config = self.config
|
||||
if config.getoption("cacheshow") or hasattr(config, "workerinput"):
|
||||
return
|
||||
|
||||
if config.getoption("collectonly"):
|
||||
return
|
||||
|
||||
assert config.cache is not None
|
||||
config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("general")
|
||||
group.addoption(
|
||||
"--lf",
|
||||
"--last-failed",
|
||||
action="store_true",
|
||||
dest="lf",
|
||||
help="Rerun only the tests that failed "
|
||||
"at the last run (or all if none failed)",
|
||||
)
|
||||
group.addoption(
|
||||
"--ff",
|
||||
"--failed-first",
|
||||
action="store_true",
|
||||
dest="failedfirst",
|
||||
help="Run all tests, but run the last failures first. "
|
||||
"This may re-order tests and thus lead to "
|
||||
"repeated fixture setup/teardown.",
|
||||
)
|
||||
group.addoption(
|
||||
"--nf",
|
||||
"--new-first",
|
||||
action="store_true",
|
||||
dest="newfirst",
|
||||
help="Run tests from new files first, then the rest of the tests "
|
||||
"sorted by file mtime",
|
||||
)
|
||||
group.addoption(
|
||||
"--cache-show",
|
||||
action="append",
|
||||
nargs="?",
|
||||
dest="cacheshow",
|
||||
help=(
|
||||
"Show cache contents, don't perform collection or tests. "
|
||||
"Optional argument: glob (default: '*')."
|
||||
),
|
||||
)
|
||||
group.addoption(
|
||||
"--cache-clear",
|
||||
action="store_true",
|
||||
dest="cacheclear",
|
||||
help="Remove all cache contents at start of test run",
|
||||
)
|
||||
cache_dir_default = ".pytest_cache"
|
||||
if "TOX_ENV_DIR" in os.environ:
|
||||
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
|
||||
parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path")
|
||||
group.addoption(
|
||||
"--lfnf",
|
||||
"--last-failed-no-failures",
|
||||
action="store",
|
||||
dest="last_failed_no_failures",
|
||||
choices=("all", "none"),
|
||||
default="all",
|
||||
help="Which tests to run with no previously (known) failures",
|
||||
)
|
||||
|
||||
|
||||
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
|
||||
if config.option.cacheshow and not config.option.help:
|
||||
from _pytest.main import wrap_session
|
||||
|
||||
return wrap_session(config, cacheshow)
|
||||
return None
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_configure(config: Config) -> None:
|
||||
config.cache = Cache.for_config(config, _ispytest=True)
|
||||
config.pluginmanager.register(LFPlugin(config), "lfplugin")
|
||||
config.pluginmanager.register(NFPlugin(config), "nfplugin")
|
||||
|
||||
|
||||
@fixture
|
||||
def cache(request: FixtureRequest) -> Cache:
|
||||
"""Return a cache object that can persist state between testing sessions.
|
||||
|
||||
cache.get(key, default)
|
||||
cache.set(key, value)
|
||||
|
||||
Keys must be ``/`` separated strings, where the first part is usually the
|
||||
name of your plugin or application to avoid clashes with other cache users.
|
||||
|
||||
Values can be any object handled by the json stdlib module.
|
||||
"""
|
||||
assert request.config.cache is not None
|
||||
return request.config.cache
|
||||
|
||||
|
||||
def pytest_report_header(config: Config) -> Optional[str]:
|
||||
"""Display cachedir with --cache-show and if non-default."""
|
||||
if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
|
||||
assert config.cache is not None
|
||||
cachedir = config.cache._cachedir
|
||||
# TODO: evaluate generating upward relative paths
|
||||
# starting with .., ../.. if sensible
|
||||
|
||||
try:
|
||||
displaypath = cachedir.relative_to(config.rootpath)
|
||||
except ValueError:
|
||||
displaypath = cachedir
|
||||
return f"cachedir: {displaypath}"
|
||||
return None
|
||||
|
||||
|
||||
def cacheshow(config: Config, session: Session) -> int:
|
||||
from pprint import pformat
|
||||
|
||||
assert config.cache is not None
|
||||
|
||||
tw = TerminalWriter()
|
||||
tw.line("cachedir: " + str(config.cache._cachedir))
|
||||
if not config.cache._cachedir.is_dir():
|
||||
tw.line("cache is empty")
|
||||
return 0
|
||||
|
||||
glob = config.option.cacheshow[0]
|
||||
if glob is None:
|
||||
glob = "*"
|
||||
|
||||
dummy = object()
|
||||
basedir = config.cache._cachedir
|
||||
vdir = basedir / Cache._CACHE_PREFIX_VALUES
|
||||
tw.sep("-", "cache values for %r" % glob)
|
||||
for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
|
||||
key = str(valpath.relative_to(vdir))
|
||||
val = config.cache.get(key, dummy)
|
||||
if val is dummy:
|
||||
tw.line("%s contains unreadable content, will be ignored" % key)
|
||||
else:
|
||||
tw.line("%s contains:" % key)
|
||||
for line in pformat(val).splitlines():
|
||||
tw.line(" " + line)
|
||||
|
||||
ddir = basedir / Cache._CACHE_PREFIX_DIRS
|
||||
if ddir.is_dir():
|
||||
contents = sorted(ddir.rglob(glob))
|
||||
tw.sep("-", "cache directories for %r" % glob)
|
||||
for p in contents:
|
||||
# if p.is_dir():
|
||||
# print("%s/" % p.relative_to(basedir))
|
||||
if p.is_file():
|
||||
key = str(p.relative_to(basedir))
|
||||
tw.line(f"{key} is a file of length {p.stat().st_size:d}")
|
||||
return 0
|
1082
venv/lib/python3.11/site-packages/_pytest/capture.py
Normal file
1082
venv/lib/python3.11/site-packages/_pytest/capture.py
Normal file
File diff suppressed because it is too large
Load diff
426
venv/lib/python3.11/site-packages/_pytest/compat.py
Normal file
426
venv/lib/python3.11/site-packages/_pytest/compat.py
Normal file
|
@ -0,0 +1,426 @@
|
|||
"""Python version compatibility code."""
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import enum
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
from inspect import Parameter
|
||||
from inspect import signature
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import Generic
|
||||
from typing import NoReturn
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
|
||||
import py
|
||||
|
||||
# fmt: off
|
||||
# Workaround for https://github.com/sphinx-doc/sphinx/issues/10351.
|
||||
# If `overload` is imported from `compat` instead of from `typing`,
|
||||
# Sphinx doesn't recognize it as `overload` and the API docs for
|
||||
# overloaded functions look good again. But type checkers handle
|
||||
# it fine.
|
||||
# fmt: on
|
||||
if True:
|
||||
from typing import overload as overload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import Final
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_S = TypeVar("_S")
|
||||
|
||||
#: constant to prepare valuing pylib path replacements/lazy proxies later on
|
||||
# intended for removal in pytest 8.0 or 9.0
|
||||
|
||||
# fmt: off
|
||||
# intentional space to create a fake difference for the verification
|
||||
LEGACY_PATH = py.path. local
|
||||
# fmt: on
|
||||
|
||||
|
||||
def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH:
|
||||
"""Internal wrapper to prepare lazy proxies for legacy_path instances"""
|
||||
return LEGACY_PATH(path)
|
||||
|
||||
|
||||
# fmt: off
|
||||
# Singleton type for NOTSET, as described in:
|
||||
# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
|
||||
class NotSetType(enum.Enum):
|
||||
token = 0
|
||||
NOTSET: Final = NotSetType.token # noqa: E305
|
||||
# fmt: on
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
import importlib.metadata
|
||||
|
||||
importlib_metadata = importlib.metadata
|
||||
else:
|
||||
import importlib_metadata as importlib_metadata # noqa: F401
|
||||
|
||||
|
||||
def _format_args(func: Callable[..., Any]) -> str:
|
||||
return str(signature(func))
|
||||
|
||||
|
||||
def is_generator(func: object) -> bool:
|
||||
genfunc = inspect.isgeneratorfunction(func)
|
||||
return genfunc and not iscoroutinefunction(func)
|
||||
|
||||
|
||||
def iscoroutinefunction(func: object) -> bool:
|
||||
"""Return True if func is a coroutine function (a function defined with async
|
||||
def syntax, and doesn't contain yield), or a function decorated with
|
||||
@asyncio.coroutine.
|
||||
|
||||
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
|
||||
importing asyncio directly, which in turns also initializes the "logging"
|
||||
module as a side-effect (see issue #8).
|
||||
"""
|
||||
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
|
||||
|
||||
|
||||
def is_async_function(func: object) -> bool:
|
||||
"""Return True if the given function seems to be an async function or
|
||||
an async generator."""
|
||||
return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
|
||||
|
||||
|
||||
def getlocation(function, curdir: str | None = None) -> str:
|
||||
function = get_real_func(function)
|
||||
fn = Path(inspect.getfile(function))
|
||||
lineno = function.__code__.co_firstlineno
|
||||
if curdir is not None:
|
||||
try:
|
||||
relfn = fn.relative_to(curdir)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
return "%s:%d" % (relfn, lineno + 1)
|
||||
return "%s:%d" % (fn, lineno + 1)
|
||||
|
||||
|
||||
def num_mock_patch_args(function) -> int:
|
||||
"""Return number of arguments used up by mock arguments (if any)."""
|
||||
patchings = getattr(function, "patchings", None)
|
||||
if not patchings:
|
||||
return 0
|
||||
|
||||
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
|
||||
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
|
||||
|
||||
return len(
|
||||
[
|
||||
p
|
||||
for p in patchings
|
||||
if not p.attribute_name
|
||||
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def getfuncargnames(
|
||||
function: Callable[..., Any],
|
||||
*,
|
||||
name: str = "",
|
||||
is_method: bool = False,
|
||||
cls: type | None = None,
|
||||
) -> tuple[str, ...]:
|
||||
"""Return the names of a function's mandatory arguments.
|
||||
|
||||
Should return the names of all function arguments that:
|
||||
* Aren't bound to an instance or type as in instance or class methods.
|
||||
* Don't have default values.
|
||||
* Aren't bound with functools.partial.
|
||||
* Aren't replaced with mocks.
|
||||
|
||||
The is_method and cls arguments indicate that the function should
|
||||
be treated as a bound method even though it's not unless, only in
|
||||
the case of cls, the function is a static method.
|
||||
|
||||
The name parameter should be the original name in which the function was collected.
|
||||
"""
|
||||
# TODO(RonnyPfannschmidt): This function should be refactored when we
|
||||
# revisit fixtures. The fixture mechanism should ask the node for
|
||||
# the fixture names, and not try to obtain directly from the
|
||||
# function object well after collection has occurred.
|
||||
|
||||
# The parameters attribute of a Signature object contains an
|
||||
# ordered mapping of parameter names to Parameter instances. This
|
||||
# creates a tuple of the names of the parameters that don't have
|
||||
# defaults.
|
||||
try:
|
||||
parameters = signature(function).parameters
|
||||
except (ValueError, TypeError) as e:
|
||||
from _pytest.outcomes import fail
|
||||
|
||||
fail(
|
||||
f"Could not determine arguments of {function!r}: {e}",
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
arg_names = tuple(
|
||||
p.name
|
||||
for p in parameters.values()
|
||||
if (
|
||||
p.kind is Parameter.POSITIONAL_OR_KEYWORD
|
||||
or p.kind is Parameter.KEYWORD_ONLY
|
||||
)
|
||||
and p.default is Parameter.empty
|
||||
)
|
||||
if not name:
|
||||
name = function.__name__
|
||||
|
||||
# If this function should be treated as a bound method even though
|
||||
# it's passed as an unbound method or function, remove the first
|
||||
# parameter name.
|
||||
if is_method or (
|
||||
# Not using `getattr` because we don't want to resolve the staticmethod.
|
||||
# Not using `cls.__dict__` because we want to check the entire MRO.
|
||||
cls
|
||||
and not isinstance(
|
||||
inspect.getattr_static(cls, name, default=None), staticmethod
|
||||
)
|
||||
):
|
||||
arg_names = arg_names[1:]
|
||||
# Remove any names that will be replaced with mocks.
|
||||
if hasattr(function, "__wrapped__"):
|
||||
arg_names = arg_names[num_mock_patch_args(function) :]
|
||||
return arg_names
|
||||
|
||||
|
||||
def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]:
|
||||
# Note: this code intentionally mirrors the code at the beginning of
|
||||
# getfuncargnames, to get the arguments which were excluded from its result
|
||||
# because they had default values.
|
||||
return tuple(
|
||||
p.name
|
||||
for p in signature(function).parameters.values()
|
||||
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
|
||||
and p.default is not Parameter.empty
|
||||
)
|
||||
|
||||
|
||||
_non_printable_ascii_translate_table = {
|
||||
i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
|
||||
}
|
||||
_non_printable_ascii_translate_table.update(
|
||||
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
|
||||
)
|
||||
|
||||
|
||||
def _translate_non_printable(s: str) -> str:
|
||||
return s.translate(_non_printable_ascii_translate_table)
|
||||
|
||||
|
||||
STRING_TYPES = bytes, str
|
||||
|
||||
|
||||
def _bytes_to_ascii(val: bytes) -> str:
|
||||
return val.decode("ascii", "backslashreplace")
|
||||
|
||||
|
||||
def ascii_escaped(val: bytes | str) -> str:
|
||||
r"""If val is pure ASCII, return it as an str, otherwise, escape
|
||||
bytes objects into a sequence of escaped bytes:
|
||||
|
||||
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
|
||||
|
||||
and escapes unicode objects into a sequence of escaped unicode
|
||||
ids, e.g.:
|
||||
|
||||
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
|
||||
|
||||
Note:
|
||||
The obvious "v.decode('unicode-escape')" will return
|
||||
valid UTF-8 unicode if it finds them in bytes, but we
|
||||
want to return escaped bytes for any byte, even if they match
|
||||
a UTF-8 string.
|
||||
"""
|
||||
if isinstance(val, bytes):
|
||||
ret = _bytes_to_ascii(val)
|
||||
else:
|
||||
ret = val.encode("unicode_escape").decode("ascii")
|
||||
return _translate_non_printable(ret)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _PytestWrapper:
|
||||
"""Dummy wrapper around a function object for internal use only.
|
||||
|
||||
Used to correctly unwrap the underlying function object when we are
|
||||
creating fixtures, because we wrap the function object ourselves with a
|
||||
decorator to issue warnings when the fixture function is called directly.
|
||||
"""
|
||||
|
||||
obj: Any
|
||||
|
||||
|
||||
def get_real_func(obj):
|
||||
"""Get the real function object of the (possibly) wrapped object by
|
||||
functools.wraps or functools.partial."""
|
||||
start_obj = obj
|
||||
for i in range(100):
|
||||
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
|
||||
# to trigger a warning if it gets called directly instead of by pytest: we don't
|
||||
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
|
||||
new_obj = getattr(obj, "__pytest_wrapped__", None)
|
||||
if isinstance(new_obj, _PytestWrapper):
|
||||
obj = new_obj.obj
|
||||
break
|
||||
new_obj = getattr(obj, "__wrapped__", None)
|
||||
if new_obj is None:
|
||||
break
|
||||
obj = new_obj
|
||||
else:
|
||||
from _pytest._io.saferepr import saferepr
|
||||
|
||||
raise ValueError(
|
||||
("could not find real function of {start}\nstopped at {current}").format(
|
||||
start=saferepr(start_obj), current=saferepr(obj)
|
||||
)
|
||||
)
|
||||
if isinstance(obj, functools.partial):
|
||||
obj = obj.func
|
||||
return obj
|
||||
|
||||
|
||||
def get_real_method(obj, holder):
|
||||
"""Attempt to obtain the real function object that might be wrapping
|
||||
``obj``, while at the same time returning a bound method to ``holder`` if
|
||||
the original object was a bound method."""
|
||||
try:
|
||||
is_method = hasattr(obj, "__func__")
|
||||
obj = get_real_func(obj)
|
||||
except Exception: # pragma: no cover
|
||||
return obj
|
||||
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
|
||||
obj = obj.__get__(holder)
|
||||
return obj
|
||||
|
||||
|
||||
def getimfunc(func):
|
||||
try:
|
||||
return func.__func__
|
||||
except AttributeError:
|
||||
return func
|
||||
|
||||
|
||||
def safe_getattr(object: Any, name: str, default: Any) -> Any:
|
||||
"""Like getattr but return default upon any Exception or any OutcomeException.
|
||||
|
||||
Attribute access can potentially fail for 'evil' Python objects.
|
||||
See issue #214.
|
||||
It catches OutcomeException because of #2490 (issue #580), new outcomes
|
||||
are derived from BaseException instead of Exception (for more details
|
||||
check #2707).
|
||||
"""
|
||||
from _pytest.outcomes import TEST_OUTCOME
|
||||
|
||||
try:
|
||||
return getattr(object, name, default)
|
||||
except TEST_OUTCOME:
|
||||
return default
|
||||
|
||||
|
||||
def safe_isclass(obj: object) -> bool:
|
||||
"""Ignore any exception via isinstance on Python 3."""
|
||||
try:
|
||||
return inspect.isclass(obj)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
if sys.version_info >= (3, 8):
|
||||
from typing import final as final
|
||||
else:
|
||||
from typing_extensions import final as final
|
||||
elif sys.version_info >= (3, 8):
|
||||
from typing import final as final
|
||||
else:
|
||||
|
||||
def final(f):
|
||||
return f
|
||||
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from functools import cached_property as cached_property
|
||||
else:
|
||||
|
||||
class cached_property(Generic[_S, _T]):
|
||||
__slots__ = ("func", "__doc__")
|
||||
|
||||
def __init__(self, func: Callable[[_S], _T]) -> None:
|
||||
self.func = func
|
||||
self.__doc__ = func.__doc__
|
||||
|
||||
@overload
|
||||
def __get__(
|
||||
self, instance: None, owner: type[_S] | None = ...
|
||||
) -> cached_property[_S, _T]:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __get__(self, instance: _S, owner: type[_S] | None = ...) -> _T:
|
||||
...
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
if instance is None:
|
||||
return self
|
||||
value = instance.__dict__[self.func.__name__] = self.func(instance)
|
||||
return value
|
||||
|
||||
|
||||
def get_user_id() -> int | None:
|
||||
"""Return the current user id, or None if we cannot get it reliably on the current platform."""
|
||||
# win32 does not have a getuid() function.
|
||||
# On Emscripten, getuid() is a stub that always returns 0.
|
||||
if sys.platform in ("win32", "emscripten"):
|
||||
return None
|
||||
# getuid shouldn't fail, but cpython defines such a case.
|
||||
# Let's hope for the best.
|
||||
uid = os.getuid()
|
||||
return uid if uid != -1 else None
|
||||
|
||||
|
||||
# Perform exhaustiveness checking.
|
||||
#
|
||||
# Consider this example:
|
||||
#
|
||||
# MyUnion = Union[int, str]
|
||||
#
|
||||
# def handle(x: MyUnion) -> int {
|
||||
# if isinstance(x, int):
|
||||
# return 1
|
||||
# elif isinstance(x, str):
|
||||
# return 2
|
||||
# else:
|
||||
# raise Exception('unreachable')
|
||||
#
|
||||
# Now suppose we add a new variant:
|
||||
#
|
||||
# MyUnion = Union[int, str, bytes]
|
||||
#
|
||||
# After doing this, we must remember ourselves to go and update the handle
|
||||
# function to handle the new variant.
|
||||
#
|
||||
# With `assert_never` we can do better:
|
||||
#
|
||||
# // raise Exception('unreachable')
|
||||
# return assert_never(x)
|
||||
#
|
||||
# Now, if we forget to handle the new variant, the type-checker will emit a
|
||||
# compile-time error, instead of the runtime error we would have gotten
|
||||
# previously.
|
||||
#
|
||||
# This also work for Enums (if you use `is` to compare) and Literals.
|
||||
def assert_never(value: NoReturn) -> NoReturn:
|
||||
assert False, f"Unhandled value: {value} ({type(value).__name__})"
|
1816
venv/lib/python3.11/site-packages/_pytest/config/__init__.py
Normal file
1816
venv/lib/python3.11/site-packages/_pytest/config/__init__.py
Normal file
File diff suppressed because it is too large
Load diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
551
venv/lib/python3.11/site-packages/_pytest/config/argparsing.py
Normal file
551
venv/lib/python3.11/site-packages/_pytest/config/argparsing.py
Normal file
|
@ -0,0 +1,551 @@
|
|||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from gettext import gettext
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
import _pytest._io
|
||||
from _pytest.compat import final
|
||||
from _pytest.config.exceptions import UsageError
|
||||
from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT
|
||||
from _pytest.deprecated import ARGUMENT_TYPE_STR
|
||||
from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE
|
||||
from _pytest.deprecated import check_ispytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import Literal
|
||||
|
||||
FILE_OR_DIR = "file_or_dir"
|
||||
|
||||
|
||||
@final
|
||||
class Parser:
|
||||
"""Parser for command line arguments and ini-file values.
|
||||
|
||||
:ivar extra_info: Dict of generic param -> value to display in case
|
||||
there's an error processing the command line arguments.
|
||||
"""
|
||||
|
||||
prog: Optional[str] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
usage: Optional[str] = None,
|
||||
processopt: Optional[Callable[["Argument"], None]] = None,
|
||||
*,
|
||||
_ispytest: bool = False,
|
||||
) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True)
|
||||
self._groups: List[OptionGroup] = []
|
||||
self._processopt = processopt
|
||||
self._usage = usage
|
||||
self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {}
|
||||
self._ininames: List[str] = []
|
||||
self.extra_info: Dict[str, Any] = {}
|
||||
|
||||
def processoption(self, option: "Argument") -> None:
|
||||
if self._processopt:
|
||||
if option.dest:
|
||||
self._processopt(option)
|
||||
|
||||
def getgroup(
|
||||
self, name: str, description: str = "", after: Optional[str] = None
|
||||
) -> "OptionGroup":
|
||||
"""Get (or create) a named option Group.
|
||||
|
||||
:param name: Name of the option group.
|
||||
:param description: Long description for --help output.
|
||||
:param after: Name of another group, used for ordering --help output.
|
||||
:returns: The option group.
|
||||
|
||||
The returned group object has an ``addoption`` method with the same
|
||||
signature as :func:`parser.addoption <pytest.Parser.addoption>` but
|
||||
will be shown in the respective group in the output of
|
||||
``pytest --help``.
|
||||
"""
|
||||
for group in self._groups:
|
||||
if group.name == name:
|
||||
return group
|
||||
group = OptionGroup(name, description, parser=self, _ispytest=True)
|
||||
i = 0
|
||||
for i, grp in enumerate(self._groups):
|
||||
if grp.name == after:
|
||||
break
|
||||
self._groups.insert(i + 1, group)
|
||||
return group
|
||||
|
||||
def addoption(self, *opts: str, **attrs: Any) -> None:
|
||||
"""Register a command line option.
|
||||
|
||||
:param opts:
|
||||
Option names, can be short or long options.
|
||||
:param attrs:
|
||||
Same attributes as the argparse library's :py:func:`add_argument()
|
||||
<argparse.ArgumentParser.add_argument>` function accepts.
|
||||
|
||||
After command line parsing, options are available on the pytest config
|
||||
object via ``config.option.NAME`` where ``NAME`` is usually set
|
||||
by passing a ``dest`` attribute, for example
|
||||
``addoption("--long", dest="NAME", ...)``.
|
||||
"""
|
||||
self._anonymous.addoption(*opts, **attrs)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
args: Sequence[Union[str, "os.PathLike[str]"]],
|
||||
namespace: Optional[argparse.Namespace] = None,
|
||||
) -> argparse.Namespace:
|
||||
from _pytest._argcomplete import try_argcomplete
|
||||
|
||||
self.optparser = self._getparser()
|
||||
try_argcomplete(self.optparser)
|
||||
strargs = [os.fspath(x) for x in args]
|
||||
return self.optparser.parse_args(strargs, namespace=namespace)
|
||||
|
||||
def _getparser(self) -> "MyOptionParser":
|
||||
from _pytest._argcomplete import filescompleter
|
||||
|
||||
optparser = MyOptionParser(self, self.extra_info, prog=self.prog)
|
||||
groups = self._groups + [self._anonymous]
|
||||
for group in groups:
|
||||
if group.options:
|
||||
desc = group.description or group.name
|
||||
arggroup = optparser.add_argument_group(desc)
|
||||
for option in group.options:
|
||||
n = option.names()
|
||||
a = option.attrs()
|
||||
arggroup.add_argument(*n, **a)
|
||||
file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*")
|
||||
# bash like autocompletion for dirs (appending '/')
|
||||
# Type ignored because typeshed doesn't know about argcomplete.
|
||||
file_or_dir_arg.completer = filescompleter # type: ignore
|
||||
return optparser
|
||||
|
||||
def parse_setoption(
|
||||
self,
|
||||
args: Sequence[Union[str, "os.PathLike[str]"]],
|
||||
option: argparse.Namespace,
|
||||
namespace: Optional[argparse.Namespace] = None,
|
||||
) -> List[str]:
|
||||
parsedoption = self.parse(args, namespace=namespace)
|
||||
for name, value in parsedoption.__dict__.items():
|
||||
setattr(option, name, value)
|
||||
return cast(List[str], getattr(parsedoption, FILE_OR_DIR))
|
||||
|
||||
def parse_known_args(
|
||||
self,
|
||||
args: Sequence[Union[str, "os.PathLike[str]"]],
|
||||
namespace: Optional[argparse.Namespace] = None,
|
||||
) -> argparse.Namespace:
|
||||
"""Parse the known arguments at this point.
|
||||
|
||||
:returns: An argparse namespace object.
|
||||
"""
|
||||
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
|
||||
|
||||
def parse_known_and_unknown_args(
|
||||
self,
|
||||
args: Sequence[Union[str, "os.PathLike[str]"]],
|
||||
namespace: Optional[argparse.Namespace] = None,
|
||||
) -> Tuple[argparse.Namespace, List[str]]:
|
||||
"""Parse the known arguments at this point, and also return the
|
||||
remaining unknown arguments.
|
||||
|
||||
:returns:
|
||||
A tuple containing an argparse namespace object for the known
|
||||
arguments, and a list of the unknown arguments.
|
||||
"""
|
||||
optparser = self._getparser()
|
||||
strargs = [os.fspath(x) for x in args]
|
||||
return optparser.parse_known_args(strargs, namespace=namespace)
|
||||
|
||||
def addini(
|
||||
self,
|
||||
name: str,
|
||||
help: str,
|
||||
type: Optional[
|
||||
"Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']"
|
||||
] = None,
|
||||
default: Any = None,
|
||||
) -> None:
|
||||
"""Register an ini-file option.
|
||||
|
||||
:param name:
|
||||
Name of the ini-variable.
|
||||
:param type:
|
||||
Type of the variable. Can be:
|
||||
|
||||
* ``string``: a string
|
||||
* ``bool``: a boolean
|
||||
* ``args``: a list of strings, separated as in a shell
|
||||
* ``linelist``: a list of strings, separated by line breaks
|
||||
* ``paths``: a list of :class:`pathlib.Path`, separated as in a shell
|
||||
* ``pathlist``: a list of ``py.path``, separated as in a shell
|
||||
|
||||
.. versionadded:: 7.0
|
||||
The ``paths`` variable type.
|
||||
|
||||
Defaults to ``string`` if ``None`` or not passed.
|
||||
:param default:
|
||||
Default value if no ini-file option exists but is queried.
|
||||
|
||||
The value of ini-variables can be retrieved via a call to
|
||||
:py:func:`config.getini(name) <pytest.Config.getini>`.
|
||||
"""
|
||||
assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool")
|
||||
self._inidict[name] = (help, type, default)
|
||||
self._ininames.append(name)
|
||||
|
||||
|
||||
class ArgumentError(Exception):
|
||||
"""Raised if an Argument instance is created with invalid or
|
||||
inconsistent arguments."""
|
||||
|
||||
def __init__(self, msg: str, option: Union["Argument", str]) -> None:
|
||||
self.msg = msg
|
||||
self.option_id = str(option)
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.option_id:
|
||||
return f"option {self.option_id}: {self.msg}"
|
||||
else:
|
||||
return self.msg
|
||||
|
||||
|
||||
class Argument:
|
||||
"""Class that mimics the necessary behaviour of optparse.Option.
|
||||
|
||||
It's currently a least effort implementation and ignoring choices
|
||||
and integer prefixes.
|
||||
|
||||
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
|
||||
"""
|
||||
|
||||
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
|
||||
|
||||
def __init__(self, *names: str, **attrs: Any) -> None:
|
||||
"""Store params in private vars for use in add_argument."""
|
||||
self._attrs = attrs
|
||||
self._short_opts: List[str] = []
|
||||
self._long_opts: List[str] = []
|
||||
if "%default" in (attrs.get("help") or ""):
|
||||
warnings.warn(ARGUMENT_PERCENT_DEFAULT, stacklevel=3)
|
||||
try:
|
||||
typ = attrs["type"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
# This might raise a keyerror as well, don't want to catch that.
|
||||
if isinstance(typ, str):
|
||||
if typ == "choice":
|
||||
warnings.warn(
|
||||
ARGUMENT_TYPE_STR_CHOICE.format(typ=typ, names=names),
|
||||
stacklevel=4,
|
||||
)
|
||||
# argparse expects a type here take it from
|
||||
# the type of the first element
|
||||
attrs["type"] = type(attrs["choices"][0])
|
||||
else:
|
||||
warnings.warn(
|
||||
ARGUMENT_TYPE_STR.format(typ=typ, names=names), stacklevel=4
|
||||
)
|
||||
attrs["type"] = Argument._typ_map[typ]
|
||||
# Used in test_parseopt -> test_parse_defaultgetter.
|
||||
self.type = attrs["type"]
|
||||
else:
|
||||
self.type = typ
|
||||
try:
|
||||
# Attribute existence is tested in Config._processopt.
|
||||
self.default = attrs["default"]
|
||||
except KeyError:
|
||||
pass
|
||||
self._set_opt_strings(names)
|
||||
dest: Optional[str] = attrs.get("dest")
|
||||
if dest:
|
||||
self.dest = dest
|
||||
elif self._long_opts:
|
||||
self.dest = self._long_opts[0][2:].replace("-", "_")
|
||||
else:
|
||||
try:
|
||||
self.dest = self._short_opts[0][1:]
|
||||
except IndexError as e:
|
||||
self.dest = "???" # Needed for the error repr.
|
||||
raise ArgumentError("need a long or short option", self) from e
|
||||
|
||||
def names(self) -> List[str]:
|
||||
return self._short_opts + self._long_opts
|
||||
|
||||
def attrs(self) -> Mapping[str, Any]:
|
||||
# Update any attributes set by processopt.
|
||||
attrs = "default dest help".split()
|
||||
attrs.append(self.dest)
|
||||
for attr in attrs:
|
||||
try:
|
||||
self._attrs[attr] = getattr(self, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
if self._attrs.get("help"):
|
||||
a = self._attrs["help"]
|
||||
a = a.replace("%default", "%(default)s")
|
||||
# a = a.replace('%prog', '%(prog)s')
|
||||
self._attrs["help"] = a
|
||||
return self._attrs
|
||||
|
||||
def _set_opt_strings(self, opts: Sequence[str]) -> None:
|
||||
"""Directly from optparse.
|
||||
|
||||
Might not be necessary as this is passed to argparse later on.
|
||||
"""
|
||||
for opt in opts:
|
||||
if len(opt) < 2:
|
||||
raise ArgumentError(
|
||||
"invalid option string %r: "
|
||||
"must be at least two characters long" % opt,
|
||||
self,
|
||||
)
|
||||
elif len(opt) == 2:
|
||||
if not (opt[0] == "-" and opt[1] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid short option string %r: "
|
||||
"must be of the form -x, (x any non-dash char)" % opt,
|
||||
self,
|
||||
)
|
||||
self._short_opts.append(opt)
|
||||
else:
|
||||
if not (opt[0:2] == "--" and opt[2] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid long option string %r: "
|
||||
"must start with --, followed by non-dash" % opt,
|
||||
self,
|
||||
)
|
||||
self._long_opts.append(opt)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
args: List[str] = []
|
||||
if self._short_opts:
|
||||
args += ["_short_opts: " + repr(self._short_opts)]
|
||||
if self._long_opts:
|
||||
args += ["_long_opts: " + repr(self._long_opts)]
|
||||
args += ["dest: " + repr(self.dest)]
|
||||
if hasattr(self, "type"):
|
||||
args += ["type: " + repr(self.type)]
|
||||
if hasattr(self, "default"):
|
||||
args += ["default: " + repr(self.default)]
|
||||
return "Argument({})".format(", ".join(args))
|
||||
|
||||
|
||||
class OptionGroup:
|
||||
"""A group of options shown in its own section."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str = "",
|
||||
parser: Optional[Parser] = None,
|
||||
*,
|
||||
_ispytest: bool = False,
|
||||
) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.options: List[Argument] = []
|
||||
self.parser = parser
|
||||
|
||||
def addoption(self, *opts: str, **attrs: Any) -> None:
|
||||
"""Add an option to this group.
|
||||
|
||||
If a shortened version of a long option is specified, it will
|
||||
be suppressed in the help. ``addoption('--twowords', '--two-words')``
|
||||
results in help showing ``--two-words`` only, but ``--twowords`` gets
|
||||
accepted **and** the automatic destination is in ``args.twowords``.
|
||||
|
||||
:param opts:
|
||||
Option names, can be short or long options.
|
||||
:param attrs:
|
||||
Same attributes as the argparse library's :py:func:`add_argument()
|
||||
<argparse.ArgumentParser.add_argument>` function accepts.
|
||||
"""
|
||||
conflict = set(opts).intersection(
|
||||
name for opt in self.options for name in opt.names()
|
||||
)
|
||||
if conflict:
|
||||
raise ValueError("option names %s already added" % conflict)
|
||||
option = Argument(*opts, **attrs)
|
||||
self._addoption_instance(option, shortupper=False)
|
||||
|
||||
def _addoption(self, *opts: str, **attrs: Any) -> None:
|
||||
option = Argument(*opts, **attrs)
|
||||
self._addoption_instance(option, shortupper=True)
|
||||
|
||||
def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None:
|
||||
if not shortupper:
|
||||
for opt in option._short_opts:
|
||||
if opt[0] == "-" and opt[1].islower():
|
||||
raise ValueError("lowercase shortoptions reserved")
|
||||
if self.parser:
|
||||
self.parser.processoption(option)
|
||||
self.options.append(option)
|
||||
|
||||
|
||||
class MyOptionParser(argparse.ArgumentParser):
|
||||
def __init__(
|
||||
self,
|
||||
parser: Parser,
|
||||
extra_info: Optional[Dict[str, Any]] = None,
|
||||
prog: Optional[str] = None,
|
||||
) -> None:
|
||||
self._parser = parser
|
||||
super().__init__(
|
||||
prog=prog,
|
||||
usage=parser._usage,
|
||||
add_help=False,
|
||||
formatter_class=DropShorterLongHelpFormatter,
|
||||
allow_abbrev=False,
|
||||
)
|
||||
# extra_info is a dict of (param -> value) to display if there's
|
||||
# an usage error to provide more contextual information to the user.
|
||||
self.extra_info = extra_info if extra_info else {}
|
||||
|
||||
def error(self, message: str) -> NoReturn:
|
||||
"""Transform argparse error message into UsageError."""
|
||||
msg = f"{self.prog}: error: {message}"
|
||||
|
||||
if hasattr(self._parser, "_config_source_hint"):
|
||||
# Type ignored because the attribute is set dynamically.
|
||||
msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore
|
||||
|
||||
raise UsageError(self.format_usage() + msg)
|
||||
|
||||
# Type ignored because typeshed has a very complex type in the superclass.
|
||||
def parse_args( # type: ignore
|
||||
self,
|
||||
args: Optional[Sequence[str]] = None,
|
||||
namespace: Optional[argparse.Namespace] = None,
|
||||
) -> argparse.Namespace:
|
||||
"""Allow splitting of positional arguments."""
|
||||
parsed, unrecognized = self.parse_known_args(args, namespace)
|
||||
if unrecognized:
|
||||
for arg in unrecognized:
|
||||
if arg and arg[0] == "-":
|
||||
lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
|
||||
for k, v in sorted(self.extra_info.items()):
|
||||
lines.append(f" {k}: {v}")
|
||||
self.error("\n".join(lines))
|
||||
getattr(parsed, FILE_OR_DIR).extend(unrecognized)
|
||||
return parsed
|
||||
|
||||
if sys.version_info[:2] < (3, 9): # pragma: no cover
|
||||
# Backport of https://github.com/python/cpython/pull/14316 so we can
|
||||
# disable long --argument abbreviations without breaking short flags.
|
||||
def _parse_optional(
|
||||
self, arg_string: str
|
||||
) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]:
|
||||
if not arg_string:
|
||||
return None
|
||||
if not arg_string[0] in self.prefix_chars:
|
||||
return None
|
||||
if arg_string in self._option_string_actions:
|
||||
action = self._option_string_actions[arg_string]
|
||||
return action, arg_string, None
|
||||
if len(arg_string) == 1:
|
||||
return None
|
||||
if "=" in arg_string:
|
||||
option_string, explicit_arg = arg_string.split("=", 1)
|
||||
if option_string in self._option_string_actions:
|
||||
action = self._option_string_actions[option_string]
|
||||
return action, option_string, explicit_arg
|
||||
if self.allow_abbrev or not arg_string.startswith("--"):
|
||||
option_tuples = self._get_option_tuples(arg_string)
|
||||
if len(option_tuples) > 1:
|
||||
msg = gettext(
|
||||
"ambiguous option: %(option)s could match %(matches)s"
|
||||
)
|
||||
options = ", ".join(option for _, option, _ in option_tuples)
|
||||
self.error(msg % {"option": arg_string, "matches": options})
|
||||
elif len(option_tuples) == 1:
|
||||
(option_tuple,) = option_tuples
|
||||
return option_tuple
|
||||
if self._negative_number_matcher.match(arg_string):
|
||||
if not self._has_negative_number_optionals:
|
||||
return None
|
||||
if " " in arg_string:
|
||||
return None
|
||||
return None, arg_string, None
|
||||
|
||||
|
||||
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
||||
"""Shorten help for long options that differ only in extra hyphens.
|
||||
|
||||
- Collapse **long** options that are the same except for extra hyphens.
|
||||
- Shortcut if there are only two options and one of them is a short one.
|
||||
- Cache result on the action object as this is called at least 2 times.
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
# Use more accurate terminal width.
|
||||
if "width" not in kwargs:
|
||||
kwargs["width"] = _pytest._io.get_terminal_width()
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def _format_action_invocation(self, action: argparse.Action) -> str:
|
||||
orgstr = super()._format_action_invocation(action)
|
||||
if orgstr and orgstr[0] != "-": # only optional arguments
|
||||
return orgstr
|
||||
res: Optional[str] = getattr(action, "_formatted_action_invocation", None)
|
||||
if res:
|
||||
return res
|
||||
options = orgstr.split(", ")
|
||||
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
|
||||
# a shortcut for '-h, --help' or '--abc', '-a'
|
||||
action._formatted_action_invocation = orgstr # type: ignore
|
||||
return orgstr
|
||||
return_list = []
|
||||
short_long: Dict[str, str] = {}
|
||||
for option in options:
|
||||
if len(option) == 2 or option[2] == " ":
|
||||
continue
|
||||
if not option.startswith("--"):
|
||||
raise ArgumentError(
|
||||
'long optional argument without "--": [%s]' % (option), option
|
||||
)
|
||||
xxoption = option[2:]
|
||||
shortened = xxoption.replace("-", "")
|
||||
if shortened not in short_long or len(short_long[shortened]) < len(
|
||||
xxoption
|
||||
):
|
||||
short_long[shortened] = xxoption
|
||||
# now short_long has been filled out to the longest with dashes
|
||||
# **and** we keep the right option ordering from add_argument
|
||||
for option in options:
|
||||
if len(option) == 2 or option[2] == " ":
|
||||
return_list.append(option)
|
||||
if option[2:] == short_long.get(option.replace("-", "")):
|
||||
return_list.append(option.replace(" ", "=", 1))
|
||||
formatted_action_invocation = ", ".join(return_list)
|
||||
action._formatted_action_invocation = formatted_action_invocation # type: ignore
|
||||
return formatted_action_invocation
|
||||
|
||||
def _split_lines(self, text, width):
|
||||
"""Wrap lines after splitting on original newlines.
|
||||
|
||||
This allows to have explicit line breaks in the help text.
|
||||
"""
|
||||
import textwrap
|
||||
|
||||
lines = []
|
||||
for line in text.splitlines():
|
||||
lines.extend(textwrap.wrap(line.strip(), width))
|
||||
return lines
|
70
venv/lib/python3.11/site-packages/_pytest/config/compat.py
Normal file
70
venv/lib/python3.11/site-packages/_pytest/config/compat.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
import functools
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from ..compat import LEGACY_PATH
|
||||
from ..compat import legacy_path
|
||||
from ..deprecated import HOOK_LEGACY_PATH_ARG
|
||||
from _pytest.nodes import _check_path
|
||||
|
||||
# hookname: (Path, LEGACY_PATH)
|
||||
imply_paths_hooks = {
|
||||
"pytest_ignore_collect": ("collection_path", "path"),
|
||||
"pytest_collect_file": ("file_path", "path"),
|
||||
"pytest_pycollect_makemodule": ("module_path", "path"),
|
||||
"pytest_report_header": ("start_path", "startdir"),
|
||||
"pytest_report_collectionfinish": ("start_path", "startdir"),
|
||||
}
|
||||
|
||||
|
||||
class PathAwareHookProxy:
|
||||
"""
|
||||
this helper wraps around hook callers
|
||||
until pluggy supports fixingcalls, this one will do
|
||||
|
||||
it currently doesn't return full hook caller proxies for fixed hooks,
|
||||
this may have to be changed later depending on bugs
|
||||
"""
|
||||
|
||||
def __init__(self, hook_caller):
|
||||
self.__hook_caller = hook_caller
|
||||
|
||||
def __dir__(self):
|
||||
return dir(self.__hook_caller)
|
||||
|
||||
def __getattr__(self, key, _wraps=functools.wraps):
|
||||
hook = getattr(self.__hook_caller, key)
|
||||
if key not in imply_paths_hooks:
|
||||
self.__dict__[key] = hook
|
||||
return hook
|
||||
else:
|
||||
path_var, fspath_var = imply_paths_hooks[key]
|
||||
|
||||
@_wraps(hook)
|
||||
def fixed_hook(**kw):
|
||||
path_value: Optional[Path] = kw.pop(path_var, None)
|
||||
fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None)
|
||||
if fspath_value is not None:
|
||||
warnings.warn(
|
||||
HOOK_LEGACY_PATH_ARG.format(
|
||||
pylib_path_arg=fspath_var, pathlib_path_arg=path_var
|
||||
),
|
||||
stacklevel=2,
|
||||
)
|
||||
if path_value is not None:
|
||||
if fspath_value is not None:
|
||||
_check_path(path_value, fspath_value)
|
||||
else:
|
||||
fspath_value = legacy_path(path_value)
|
||||
else:
|
||||
assert fspath_value is not None
|
||||
path_value = Path(fspath_value)
|
||||
|
||||
kw[path_var] = path_value
|
||||
kw[fspath_var] = fspath_value
|
||||
return hook(**kw)
|
||||
|
||||
fixed_hook.__name__ = key
|
||||
self.__dict__[key] = fixed_hook
|
||||
return fixed_hook
|
|
@ -0,0 +1,11 @@
|
|||
from _pytest.compat import final
|
||||
|
||||
|
||||
@final
|
||||
class UsageError(Exception):
|
||||
"""Error in pytest usage or invocation."""
|
||||
|
||||
|
||||
class PrintHelp(Exception):
|
||||
"""Raised when pytest should print its help to skip the rest of the
|
||||
argument parsing and validation."""
|
225
venv/lib/python3.11/site-packages/_pytest/config/findpaths.py
Normal file
225
venv/lib/python3.11/site-packages/_pytest/config/findpaths.py
Normal file
|
@ -0,0 +1,225 @@
|
|||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
import iniconfig
|
||||
|
||||
from .exceptions import UsageError
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.pathlib import absolutepath
|
||||
from _pytest.pathlib import commonpath
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from . import Config
|
||||
|
||||
|
||||
def _parse_ini_config(path: Path) -> iniconfig.IniConfig:
|
||||
"""Parse the given generic '.ini' file using legacy IniConfig parser, returning
|
||||
the parsed object.
|
||||
|
||||
Raise UsageError if the file cannot be parsed.
|
||||
"""
|
||||
try:
|
||||
return iniconfig.IniConfig(str(path))
|
||||
except iniconfig.ParseError as exc:
|
||||
raise UsageError(str(exc)) from exc
|
||||
|
||||
|
||||
def load_config_dict_from_file(
|
||||
filepath: Path,
|
||||
) -> Optional[Dict[str, Union[str, List[str]]]]:
|
||||
"""Load pytest configuration from the given file path, if supported.
|
||||
|
||||
Return None if the file does not contain valid pytest configuration.
|
||||
"""
|
||||
|
||||
# Configuration from ini files are obtained from the [pytest] section, if present.
|
||||
if filepath.suffix == ".ini":
|
||||
iniconfig = _parse_ini_config(filepath)
|
||||
|
||||
if "pytest" in iniconfig:
|
||||
return dict(iniconfig["pytest"].items())
|
||||
else:
|
||||
# "pytest.ini" files are always the source of configuration, even if empty.
|
||||
if filepath.name == "pytest.ini":
|
||||
return {}
|
||||
|
||||
# '.cfg' files are considered if they contain a "[tool:pytest]" section.
|
||||
elif filepath.suffix == ".cfg":
|
||||
iniconfig = _parse_ini_config(filepath)
|
||||
|
||||
if "tool:pytest" in iniconfig.sections:
|
||||
return dict(iniconfig["tool:pytest"].items())
|
||||
elif "pytest" in iniconfig.sections:
|
||||
# If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that
|
||||
# plain "[pytest]" sections in setup.cfg files is no longer supported (#3086).
|
||||
fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False)
|
||||
|
||||
# '.toml' files are considered if they contain a [tool.pytest.ini_options] table.
|
||||
elif filepath.suffix == ".toml":
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
else:
|
||||
import tomli as tomllib
|
||||
|
||||
toml_text = filepath.read_text(encoding="utf-8")
|
||||
try:
|
||||
config = tomllib.loads(toml_text)
|
||||
except tomllib.TOMLDecodeError as exc:
|
||||
raise UsageError(f"{filepath}: {exc}") from exc
|
||||
|
||||
result = config.get("tool", {}).get("pytest", {}).get("ini_options", None)
|
||||
if result is not None:
|
||||
# TOML supports richer data types than ini files (strings, arrays, floats, ints, etc),
|
||||
# however we need to convert all scalar values to str for compatibility with the rest
|
||||
# of the configuration system, which expects strings only.
|
||||
def make_scalar(v: object) -> Union[str, List[str]]:
|
||||
return v if isinstance(v, list) else str(v)
|
||||
|
||||
return {k: make_scalar(v) for k, v in result.items()}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def locate_config(
|
||||
args: Iterable[Path],
|
||||
) -> Tuple[Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]]]:
|
||||
"""Search in the list of arguments for a valid ini-file for pytest,
|
||||
and return a tuple of (rootdir, inifile, cfg-dict)."""
|
||||
config_names = [
|
||||
"pytest.ini",
|
||||
".pytest.ini",
|
||||
"pyproject.toml",
|
||||
"tox.ini",
|
||||
"setup.cfg",
|
||||
]
|
||||
args = [x for x in args if not str(x).startswith("-")]
|
||||
if not args:
|
||||
args = [Path.cwd()]
|
||||
for arg in args:
|
||||
argpath = absolutepath(arg)
|
||||
for base in (argpath, *argpath.parents):
|
||||
for config_name in config_names:
|
||||
p = base / config_name
|
||||
if p.is_file():
|
||||
ini_config = load_config_dict_from_file(p)
|
||||
if ini_config is not None:
|
||||
return base, p, ini_config
|
||||
return None, None, {}
|
||||
|
||||
|
||||
def get_common_ancestor(paths: Iterable[Path]) -> Path:
|
||||
common_ancestor: Optional[Path] = None
|
||||
for path in paths:
|
||||
if not path.exists():
|
||||
continue
|
||||
if common_ancestor is None:
|
||||
common_ancestor = path
|
||||
else:
|
||||
if common_ancestor in path.parents or path == common_ancestor:
|
||||
continue
|
||||
elif path in common_ancestor.parents:
|
||||
common_ancestor = path
|
||||
else:
|
||||
shared = commonpath(path, common_ancestor)
|
||||
if shared is not None:
|
||||
common_ancestor = shared
|
||||
if common_ancestor is None:
|
||||
common_ancestor = Path.cwd()
|
||||
elif common_ancestor.is_file():
|
||||
common_ancestor = common_ancestor.parent
|
||||
return common_ancestor
|
||||
|
||||
|
||||
def get_dirs_from_args(args: Iterable[str]) -> List[Path]:
|
||||
def is_option(x: str) -> bool:
|
||||
return x.startswith("-")
|
||||
|
||||
def get_file_part_from_node_id(x: str) -> str:
|
||||
return x.split("::")[0]
|
||||
|
||||
def get_dir_from_path(path: Path) -> Path:
|
||||
if path.is_dir():
|
||||
return path
|
||||
return path.parent
|
||||
|
||||
def safe_exists(path: Path) -> bool:
|
||||
# This can throw on paths that contain characters unrepresentable at the OS level,
|
||||
# or with invalid syntax on Windows (https://bugs.python.org/issue35306)
|
||||
try:
|
||||
return path.exists()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# These look like paths but may not exist
|
||||
possible_paths = (
|
||||
absolutepath(get_file_part_from_node_id(arg))
|
||||
for arg in args
|
||||
if not is_option(arg)
|
||||
)
|
||||
|
||||
return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)]
|
||||
|
||||
|
||||
CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead."
|
||||
|
||||
|
||||
def determine_setup(
|
||||
inifile: Optional[str],
|
||||
args: Sequence[str],
|
||||
rootdir_cmd_arg: Optional[str] = None,
|
||||
config: Optional["Config"] = None,
|
||||
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
|
||||
rootdir = None
|
||||
dirs = get_dirs_from_args(args)
|
||||
if inifile:
|
||||
inipath_ = absolutepath(inifile)
|
||||
inipath: Optional[Path] = inipath_
|
||||
inicfg = load_config_dict_from_file(inipath_) or {}
|
||||
if rootdir_cmd_arg is None:
|
||||
rootdir = inipath_.parent
|
||||
else:
|
||||
ancestor = get_common_ancestor(dirs)
|
||||
rootdir, inipath, inicfg = locate_config([ancestor])
|
||||
if rootdir is None and rootdir_cmd_arg is None:
|
||||
for possible_rootdir in (ancestor, *ancestor.parents):
|
||||
if (possible_rootdir / "setup.py").is_file():
|
||||
rootdir = possible_rootdir
|
||||
break
|
||||
else:
|
||||
if dirs != [ancestor]:
|
||||
rootdir, inipath, inicfg = locate_config(dirs)
|
||||
if rootdir is None:
|
||||
if config is not None:
|
||||
cwd = config.invocation_params.dir
|
||||
else:
|
||||
cwd = Path.cwd()
|
||||
rootdir = get_common_ancestor([cwd, ancestor])
|
||||
if is_fs_root(rootdir):
|
||||
rootdir = ancestor
|
||||
if rootdir_cmd_arg:
|
||||
rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg))
|
||||
if not rootdir.is_dir():
|
||||
raise UsageError(
|
||||
"Directory '{}' not found. Check your '--rootdir' option.".format(
|
||||
rootdir
|
||||
)
|
||||
)
|
||||
assert rootdir is not None
|
||||
return rootdir, inipath, inicfg or {}
|
||||
|
||||
|
||||
def is_fs_root(p: Path) -> bool:
|
||||
r"""
|
||||
Return True if the given path is pointing to the root of the
|
||||
file system ("/" on Unix and "C:\\" on Windows for example).
|
||||
"""
|
||||
return os.path.splitdrive(str(p))[1] == os.sep
|
391
venv/lib/python3.11/site-packages/_pytest/debugging.py
Normal file
391
venv/lib/python3.11/site-packages/_pytest/debugging.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
"""Interactive debugging with PDB, the Python Debugger."""
|
||||
import argparse
|
||||
import functools
|
||||
import sys
|
||||
import types
|
||||
import unittest
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import Generator
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from _pytest import outcomes
|
||||
from _pytest._code import ExceptionInfo
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import ConftestImportFailure
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.config import PytestPluginManager
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.config.exceptions import UsageError
|
||||
from _pytest.nodes import Node
|
||||
from _pytest.reports import BaseReport
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _pytest.capture import CaptureManager
|
||||
from _pytest.runner import CallInfo
|
||||
|
||||
|
||||
def _validate_usepdb_cls(value: str) -> Tuple[str, str]:
|
||||
"""Validate syntax of --pdbcls option."""
|
||||
try:
|
||||
modname, classname = value.split(":")
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
f"{value!r} is not in the format 'modname:classname'"
|
||||
) from e
|
||||
return (modname, classname)
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
"--pdb",
|
||||
dest="usepdb",
|
||||
action="store_true",
|
||||
help="Start the interactive Python debugger on errors or KeyboardInterrupt",
|
||||
)
|
||||
group._addoption(
|
||||
"--pdbcls",
|
||||
dest="usepdb_cls",
|
||||
metavar="modulename:classname",
|
||||
type=_validate_usepdb_cls,
|
||||
help="Specify a custom interactive Python debugger for use with --pdb."
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
||||
)
|
||||
group._addoption(
|
||||
"--trace",
|
||||
dest="trace",
|
||||
action="store_true",
|
||||
help="Immediately break when running each test",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Config) -> None:
|
||||
import pdb
|
||||
|
||||
if config.getvalue("trace"):
|
||||
config.pluginmanager.register(PdbTrace(), "pdbtrace")
|
||||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
|
||||
|
||||
pytestPDB._saved.append(
|
||||
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config)
|
||||
)
|
||||
pdb.set_trace = pytestPDB.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
pytestPDB._config = config
|
||||
|
||||
# NOTE: not using pytest_unconfigure, since it might get called although
|
||||
# pytest_configure was not (if another plugin raises UsageError).
|
||||
def fin() -> None:
|
||||
(
|
||||
pdb.set_trace,
|
||||
pytestPDB._pluginmanager,
|
||||
pytestPDB._config,
|
||||
) = pytestPDB._saved.pop()
|
||||
|
||||
config.add_cleanup(fin)
|
||||
|
||||
|
||||
class pytestPDB:
|
||||
"""Pseudo PDB that defers to the real pdb."""
|
||||
|
||||
_pluginmanager: Optional[PytestPluginManager] = None
|
||||
_config: Optional[Config] = None
|
||||
_saved: List[
|
||||
Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]]
|
||||
] = []
|
||||
_recursive_debug = 0
|
||||
_wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None
|
||||
|
||||
@classmethod
|
||||
def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]:
|
||||
if capman:
|
||||
return capman.is_capturing()
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _import_pdb_cls(cls, capman: Optional["CaptureManager"]):
|
||||
if not cls._config:
|
||||
import pdb
|
||||
|
||||
# Happens when using pytest.set_trace outside of a test.
|
||||
return pdb.Pdb
|
||||
|
||||
usepdb_cls = cls._config.getvalue("usepdb_cls")
|
||||
|
||||
if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls:
|
||||
return cls._wrapped_pdb_cls[1]
|
||||
|
||||
if usepdb_cls:
|
||||
modname, classname = usepdb_cls
|
||||
|
||||
try:
|
||||
__import__(modname)
|
||||
mod = sys.modules[modname]
|
||||
|
||||
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
|
||||
parts = classname.split(".")
|
||||
pdb_cls = getattr(mod, parts[0])
|
||||
for part in parts[1:]:
|
||||
pdb_cls = getattr(pdb_cls, part)
|
||||
except Exception as exc:
|
||||
value = ":".join((modname, classname))
|
||||
raise UsageError(
|
||||
f"--pdbcls: could not import {value!r}: {exc}"
|
||||
) from exc
|
||||
else:
|
||||
import pdb
|
||||
|
||||
pdb_cls = pdb.Pdb
|
||||
|
||||
wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman)
|
||||
cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls)
|
||||
return wrapped_cls
|
||||
|
||||
@classmethod
|
||||
def _get_pdb_wrapper_class(cls, pdb_cls, capman: Optional["CaptureManager"]):
|
||||
import _pytest.config
|
||||
|
||||
# Type ignored because mypy doesn't support "dynamic"
|
||||
# inheritance like this.
|
||||
class PytestPdbWrapper(pdb_cls): # type: ignore[valid-type,misc]
|
||||
_pytest_capman = capman
|
||||
_continued = False
|
||||
|
||||
def do_debug(self, arg):
|
||||
cls._recursive_debug += 1
|
||||
ret = super().do_debug(arg)
|
||||
cls._recursive_debug -= 1
|
||||
return ret
|
||||
|
||||
def do_continue(self, arg):
|
||||
ret = super().do_continue(arg)
|
||||
if cls._recursive_debug == 0:
|
||||
assert cls._config is not None
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
|
||||
capman = self._pytest_capman
|
||||
capturing = pytestPDB._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB continue (IO-capturing resumed)")
|
||||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB continue (IO-capturing resumed for %s)"
|
||||
% capturing,
|
||||
)
|
||||
assert capman is not None
|
||||
capman.resume()
|
||||
else:
|
||||
tw.sep(">", "PDB continue")
|
||||
assert cls._pluginmanager is not None
|
||||
cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self)
|
||||
self._continued = True
|
||||
return ret
|
||||
|
||||
do_c = do_cont = do_continue
|
||||
|
||||
def do_quit(self, arg):
|
||||
"""Raise Exit outcome when quit command is used in pdb.
|
||||
|
||||
This is a bit of a hack - it would be better if BdbQuit
|
||||
could be handled, but this would require to wrap the
|
||||
whole pytest run, and adjust the report etc.
|
||||
"""
|
||||
ret = super().do_quit(arg)
|
||||
|
||||
if cls._recursive_debug == 0:
|
||||
outcomes.exit("Quitting debugger")
|
||||
|
||||
return ret
|
||||
|
||||
do_q = do_quit
|
||||
do_exit = do_quit
|
||||
|
||||
def setup(self, f, tb):
|
||||
"""Suspend on setup().
|
||||
|
||||
Needed after do_continue resumed, and entering another
|
||||
breakpoint again.
|
||||
"""
|
||||
ret = super().setup(f, tb)
|
||||
if not ret and self._continued:
|
||||
# pdb.setup() returns True if the command wants to exit
|
||||
# from the interaction: do not suspend capturing then.
|
||||
if self._pytest_capman:
|
||||
self._pytest_capman.suspend_global_capture(in_=True)
|
||||
return ret
|
||||
|
||||
def get_stack(self, f, t):
|
||||
stack, i = super().get_stack(f, t)
|
||||
if f is None:
|
||||
# Find last non-hidden frame.
|
||||
i = max(0, len(stack) - 1)
|
||||
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
|
||||
i -= 1
|
||||
return stack, i
|
||||
|
||||
return PytestPdbWrapper
|
||||
|
||||
@classmethod
|
||||
def _init_pdb(cls, method, *args, **kwargs):
|
||||
"""Initialize PDB debugging, dropping any IO capturing."""
|
||||
import _pytest.config
|
||||
|
||||
if cls._pluginmanager is None:
|
||||
capman: Optional[CaptureManager] = None
|
||||
else:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspend(in_=True)
|
||||
|
||||
if cls._config:
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
|
||||
if cls._recursive_debug == 0:
|
||||
# Handle header similar to pdb.set_trace in py37+.
|
||||
header = kwargs.pop("header", None)
|
||||
if header is not None:
|
||||
tw.sep(">", header)
|
||||
else:
|
||||
capturing = cls._is_capturing(capman)
|
||||
if capturing == "global":
|
||||
tw.sep(">", f"PDB {method} (IO-capturing turned off)")
|
||||
elif capturing:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB %s (IO-capturing turned off for %s)"
|
||||
% (method, capturing),
|
||||
)
|
||||
else:
|
||||
tw.sep(">", f"PDB {method}")
|
||||
|
||||
_pdb = cls._import_pdb_cls(capman)(**kwargs)
|
||||
|
||||
if cls._pluginmanager:
|
||||
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
|
||||
return _pdb
|
||||
|
||||
@classmethod
|
||||
def set_trace(cls, *args, **kwargs) -> None:
|
||||
"""Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
|
||||
frame = sys._getframe().f_back
|
||||
_pdb = cls._init_pdb("set_trace", *args, **kwargs)
|
||||
_pdb.set_trace(frame)
|
||||
|
||||
|
||||
class PdbInvoke:
|
||||
def pytest_exception_interact(
|
||||
self, node: Node, call: "CallInfo[Any]", report: BaseReport
|
||||
) -> None:
|
||||
capman = node.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspend_global_capture(in_=True)
|
||||
out, err = capman.read_global_capture()
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.write(err)
|
||||
assert call.excinfo is not None
|
||||
|
||||
if not isinstance(call.excinfo.value, unittest.SkipTest):
|
||||
_enter_pdb(node, call.excinfo, report)
|
||||
|
||||
def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None:
|
||||
tb = _postmortem_traceback(excinfo)
|
||||
post_mortem(tb)
|
||||
|
||||
|
||||
class PdbTrace:
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]:
|
||||
wrap_pytest_function_for_tracing(pyfuncitem)
|
||||
yield
|
||||
|
||||
|
||||
def wrap_pytest_function_for_tracing(pyfuncitem):
|
||||
"""Change the Python function object of the given Function item by a
|
||||
wrapper which actually enters pdb before calling the python function
|
||||
itself, effectively leaving the user in the pdb prompt in the first
|
||||
statement of the function."""
|
||||
_pdb = pytestPDB._init_pdb("runcall")
|
||||
testfunction = pyfuncitem.obj
|
||||
|
||||
# we can't just return `partial(pdb.runcall, testfunction)` because (on
|
||||
# python < 3.7.4) runcall's first param is `func`, which means we'd get
|
||||
# an exception if one of the kwargs to testfunction was called `func`.
|
||||
@functools.wraps(testfunction)
|
||||
def wrapper(*args, **kwargs):
|
||||
func = functools.partial(testfunction, *args, **kwargs)
|
||||
_pdb.runcall(func)
|
||||
|
||||
pyfuncitem.obj = wrapper
|
||||
|
||||
|
||||
def maybe_wrap_pytest_function_for_tracing(pyfuncitem):
|
||||
"""Wrap the given pytestfunct item for tracing support if --trace was given in
|
||||
the command line."""
|
||||
if pyfuncitem.config.getvalue("trace"):
|
||||
wrap_pytest_function_for_tracing(pyfuncitem)
|
||||
|
||||
|
||||
def _enter_pdb(
|
||||
node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport
|
||||
) -> BaseReport:
|
||||
# XXX we re-use the TerminalReporter's terminalwriter
|
||||
# because this seems to avoid some encoding related troubles
|
||||
# for not completely clear reasons.
|
||||
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
|
||||
tw.line()
|
||||
|
||||
showcapture = node.config.option.showcapture
|
||||
|
||||
for sectionname, content in (
|
||||
("stdout", rep.capstdout),
|
||||
("stderr", rep.capstderr),
|
||||
("log", rep.caplog),
|
||||
):
|
||||
if showcapture in (sectionname, "all") and content:
|
||||
tw.sep(">", "captured " + sectionname)
|
||||
if content[-1:] == "\n":
|
||||
content = content[:-1]
|
||||
tw.line(content)
|
||||
|
||||
tw.sep(">", "traceback")
|
||||
rep.toterminal(tw)
|
||||
tw.sep(">", "entering PDB")
|
||||
tb = _postmortem_traceback(excinfo)
|
||||
rep._pdbshown = True # type: ignore[attr-defined]
|
||||
post_mortem(tb)
|
||||
return rep
|
||||
|
||||
|
||||
def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType:
|
||||
from doctest import UnexpectedException
|
||||
|
||||
if isinstance(excinfo.value, UnexpectedException):
|
||||
# A doctest.UnexpectedException is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
return excinfo.value.exc_info[2]
|
||||
elif isinstance(excinfo.value, ConftestImportFailure):
|
||||
# A config.ConftestImportFailure is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
return excinfo.value.excinfo[2]
|
||||
else:
|
||||
assert excinfo._excinfo is not None
|
||||
return excinfo._excinfo[2]
|
||||
|
||||
|
||||
def post_mortem(t: types.TracebackType) -> None:
|
||||
p = pytestPDB._init_pdb("post_mortem")
|
||||
p.reset()
|
||||
p.interaction(None, t)
|
||||
if p.quitting:
|
||||
outcomes.exit("Quitting debugger")
|
146
venv/lib/python3.11/site-packages/_pytest/deprecated.py
Normal file
146
venv/lib/python3.11/site-packages/_pytest/deprecated.py
Normal file
|
@ -0,0 +1,146 @@
|
|||
"""Deprecation messages and bits of code used elsewhere in the codebase that
|
||||
is planned to be removed in the next pytest release.
|
||||
|
||||
Keeping it in a central location makes it easy to track what is deprecated and should
|
||||
be removed when the time comes.
|
||||
|
||||
All constants defined in this module should be either instances of
|
||||
:class:`PytestWarning`, or :class:`UnformattedWarning`
|
||||
in case of warnings which need to format their messages.
|
||||
"""
|
||||
from warnings import warn
|
||||
|
||||
from _pytest.warning_types import PytestDeprecationWarning
|
||||
from _pytest.warning_types import PytestRemovedIn8Warning
|
||||
from _pytest.warning_types import UnformattedWarning
|
||||
|
||||
# set of plugins which have been integrated into the core; we use this list to ignore
|
||||
# them during registration to avoid conflicts
|
||||
DEPRECATED_EXTERNAL_PLUGINS = {
|
||||
"pytest_catchlog",
|
||||
"pytest_capturelog",
|
||||
"pytest_faulthandler",
|
||||
}
|
||||
|
||||
NOSE_SUPPORT = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"Support for nose tests is deprecated and will be removed in a future release.\n"
|
||||
"{nodeid} is using nose method: `{method}` ({stage})\n"
|
||||
"See docs: https://docs.pytest.org/en/stable/deprecations.html#support-for-tests-written-for-nose",
|
||||
)
|
||||
|
||||
NOSE_SUPPORT_METHOD = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"Support for nose tests is deprecated and will be removed in a future release.\n"
|
||||
"{nodeid} is using nose-specific method: `{method}(self)`\n"
|
||||
"To remove this warning, rename it to `{method}_method(self)`\n"
|
||||
"See docs: https://docs.pytest.org/en/stable/deprecations.html#support-for-tests-written-for-nose",
|
||||
)
|
||||
|
||||
|
||||
# This can be* removed pytest 8, but it's harmless and common, so no rush to remove.
|
||||
# * If you're in the future: "could have been".
|
||||
YIELD_FIXTURE = PytestDeprecationWarning(
|
||||
"@pytest.yield_fixture is deprecated.\n"
|
||||
"Use @pytest.fixture instead; they are the same."
|
||||
)
|
||||
|
||||
WARNING_CMDLINE_PREPARSE_HOOK = PytestRemovedIn8Warning(
|
||||
"The pytest_cmdline_preparse hook is deprecated and will be removed in a future release. \n"
|
||||
"Please use pytest_load_initial_conftests hook instead."
|
||||
)
|
||||
|
||||
FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestRemovedIn8Warning(
|
||||
"The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; "
|
||||
"use self.session.gethookproxy() and self.session.isinitpath() instead. "
|
||||
)
|
||||
|
||||
STRICT_OPTION = PytestRemovedIn8Warning(
|
||||
"The --strict option is deprecated, use --strict-markers instead."
|
||||
)
|
||||
|
||||
# This deprecation is never really meant to be removed.
|
||||
PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.")
|
||||
|
||||
ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning(
|
||||
'pytest now uses argparse. "%default" should be changed to "%(default)s"',
|
||||
)
|
||||
|
||||
ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"`type` argument to addoption() is the string {typ!r}."
|
||||
" For choices this is optional and can be omitted, "
|
||||
" but when supplied should be a type (for example `str` or `int`)."
|
||||
" (options: {names})",
|
||||
)
|
||||
|
||||
ARGUMENT_TYPE_STR = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"`type` argument to addoption() is the string {typ!r}, "
|
||||
" but when supplied should be a type (for example `str` or `int`)."
|
||||
" (options: {names})",
|
||||
)
|
||||
|
||||
|
||||
HOOK_LEGACY_PATH_ARG = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n"
|
||||
"see https://docs.pytest.org/en/latest/deprecations.html"
|
||||
"#py-path-local-arguments-for-hooks-replaced-with-pathlib-path",
|
||||
)
|
||||
|
||||
NODE_CTOR_FSPATH_ARG = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"The (fspath: py.path.local) argument to {node_type_name} is deprecated. "
|
||||
"Please use the (path: pathlib.Path) argument instead.\n"
|
||||
"See https://docs.pytest.org/en/latest/deprecations.html"
|
||||
"#fspath-argument-for-node-constructors-replaced-with-pathlib-path",
|
||||
)
|
||||
|
||||
WARNS_NONE_ARG = PytestRemovedIn8Warning(
|
||||
"Passing None has been deprecated.\n"
|
||||
"See https://docs.pytest.org/en/latest/how-to/capture-warnings.html"
|
||||
"#additional-use-cases-of-warnings-in-tests"
|
||||
" for alternatives in common use cases."
|
||||
)
|
||||
|
||||
KEYWORD_MSG_ARG = UnformattedWarning(
|
||||
PytestRemovedIn8Warning,
|
||||
"pytest.{func}(msg=...) is now deprecated, use pytest.{func}(reason=...) instead",
|
||||
)
|
||||
|
||||
INSTANCE_COLLECTOR = PytestRemovedIn8Warning(
|
||||
"The pytest.Instance collector type is deprecated and is no longer used. "
|
||||
"See https://docs.pytest.org/en/latest/deprecations.html#the-pytest-instance-collector",
|
||||
)
|
||||
HOOK_LEGACY_MARKING = UnformattedWarning(
|
||||
PytestDeprecationWarning,
|
||||
"The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n"
|
||||
"Please use the pytest.hook{type}({hook_opts}) decorator instead\n"
|
||||
" to configure the hooks.\n"
|
||||
" See https://docs.pytest.org/en/latest/deprecations.html"
|
||||
"#configuring-hook-specs-impls-using-markers",
|
||||
)
|
||||
|
||||
# You want to make some `__init__` or function "private".
|
||||
#
|
||||
# def my_private_function(some, args):
|
||||
# ...
|
||||
#
|
||||
# Do this:
|
||||
#
|
||||
# def my_private_function(some, args, *, _ispytest: bool = False):
|
||||
# check_ispytest(_ispytest)
|
||||
# ...
|
||||
#
|
||||
# Change all internal/allowed calls to
|
||||
#
|
||||
# my_private_function(some, args, _ispytest=True)
|
||||
#
|
||||
# All other calls will get the default _ispytest=False and trigger
|
||||
# the warning (possibly error in the future).
|
||||
|
||||
|
||||
def check_ispytest(ispytest: bool) -> None:
|
||||
if not ispytest:
|
||||
warn(PRIVATE, stacklevel=3)
|
751
venv/lib/python3.11/site-packages/_pytest/doctest.py
Normal file
751
venv/lib/python3.11/site-packages/_pytest/doctest.py
Normal file
|
@ -0,0 +1,751 @@
|
|||
"""Discover and run doctests in modules and test files."""
|
||||
import bdb
|
||||
import inspect
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import Dict
|
||||
from typing import Generator
|
||||
from typing import Iterable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Pattern
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from _pytest import outcomes
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
from _pytest._code.code import ReprFileLocation
|
||||
from _pytest._code.code import TerminalRepr
|
||||
from _pytest._io import TerminalWriter
|
||||
from _pytest.compat import safe_getattr
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import fixture
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.outcomes import OutcomeException
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.pathlib import fnmatch_ex
|
||||
from _pytest.pathlib import import_path
|
||||
from _pytest.python import Module
|
||||
from _pytest.python_api import approx
|
||||
from _pytest.warning_types import PytestWarning
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import doctest
|
||||
|
||||
DOCTEST_REPORT_CHOICE_NONE = "none"
|
||||
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
|
||||
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
|
||||
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
|
||||
|
||||
DOCTEST_REPORT_CHOICES = (
|
||||
DOCTEST_REPORT_CHOICE_NONE,
|
||||
DOCTEST_REPORT_CHOICE_CDIFF,
|
||||
DOCTEST_REPORT_CHOICE_NDIFF,
|
||||
DOCTEST_REPORT_CHOICE_UDIFF,
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
|
||||
)
|
||||
|
||||
# Lazy definition of runner class
|
||||
RUNNER_CLASS = None
|
||||
# Lazy definition of output checker class
|
||||
CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
parser.addini(
|
||||
"doctest_optionflags",
|
||||
"Option flags for doctests",
|
||||
type="args",
|
||||
default=["ELLIPSIS"],
|
||||
)
|
||||
parser.addini(
|
||||
"doctest_encoding", "Encoding used for doctest files", default="utf-8"
|
||||
)
|
||||
group = parser.getgroup("collect")
|
||||
group.addoption(
|
||||
"--doctest-modules",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Run doctests in all .py modules",
|
||||
dest="doctestmodules",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-report",
|
||||
type=str.lower,
|
||||
default="udiff",
|
||||
help="Choose another output format for diffs on doctest failure",
|
||||
choices=DOCTEST_REPORT_CHOICES,
|
||||
dest="doctestreport",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-glob",
|
||||
action="append",
|
||||
default=[],
|
||||
metavar="pat",
|
||||
help="Doctests file matching pattern, default: test*.txt",
|
||||
dest="doctestglob",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-ignore-import-errors",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Ignore doctest ImportErrors",
|
||||
dest="doctest_ignore_import_errors",
|
||||
)
|
||||
group.addoption(
|
||||
"--doctest-continue-on-failure",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="For a given doctest, continue to run after the first failure",
|
||||
dest="doctest_continue_on_failure",
|
||||
)
|
||||
|
||||
|
||||
def pytest_unconfigure() -> None:
|
||||
global RUNNER_CLASS
|
||||
|
||||
RUNNER_CLASS = None
|
||||
|
||||
|
||||
def pytest_collect_file(
|
||||
file_path: Path,
|
||||
parent: Collector,
|
||||
) -> Optional[Union["DoctestModule", "DoctestTextfile"]]:
|
||||
config = parent.config
|
||||
if file_path.suffix == ".py":
|
||||
if config.option.doctestmodules and not any(
|
||||
(_is_setup_py(file_path), _is_main_py(file_path))
|
||||
):
|
||||
mod: DoctestModule = DoctestModule.from_parent(parent, path=file_path)
|
||||
return mod
|
||||
elif _is_doctest(config, file_path, parent):
|
||||
txt: DoctestTextfile = DoctestTextfile.from_parent(parent, path=file_path)
|
||||
return txt
|
||||
return None
|
||||
|
||||
|
||||
def _is_setup_py(path: Path) -> bool:
|
||||
if path.name != "setup.py":
|
||||
return False
|
||||
contents = path.read_bytes()
|
||||
return b"setuptools" in contents or b"distutils" in contents
|
||||
|
||||
|
||||
def _is_doctest(config: Config, path: Path, parent: Collector) -> bool:
|
||||
if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
|
||||
return True
|
||||
globs = config.getoption("doctestglob") or ["test*.txt"]
|
||||
return any(fnmatch_ex(glob, path) for glob in globs)
|
||||
|
||||
|
||||
def _is_main_py(path: Path) -> bool:
|
||||
return path.name == "__main__.py"
|
||||
|
||||
|
||||
class ReprFailDoctest(TerminalRepr):
|
||||
def __init__(
|
||||
self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]]
|
||||
) -> None:
|
||||
self.reprlocation_lines = reprlocation_lines
|
||||
|
||||
def toterminal(self, tw: TerminalWriter) -> None:
|
||||
for reprlocation, lines in self.reprlocation_lines:
|
||||
for line in lines:
|
||||
tw.line(line)
|
||||
reprlocation.toterminal(tw)
|
||||
|
||||
|
||||
class MultipleDoctestFailures(Exception):
|
||||
def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None:
|
||||
super().__init__()
|
||||
self.failures = failures
|
||||
|
||||
|
||||
def _init_runner_class() -> Type["doctest.DocTestRunner"]:
|
||||
import doctest
|
||||
|
||||
class PytestDoctestRunner(doctest.DebugRunner):
|
||||
"""Runner to collect failures.
|
||||
|
||||
Note that the out variable in this case is a list instead of a
|
||||
stdout-like object.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
checker: Optional["doctest.OutputChecker"] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
optionflags: int = 0,
|
||||
continue_on_failure: bool = True,
|
||||
) -> None:
|
||||
super().__init__(checker=checker, verbose=verbose, optionflags=optionflags)
|
||||
self.continue_on_failure = continue_on_failure
|
||||
|
||||
def report_failure(
|
||||
self,
|
||||
out,
|
||||
test: "doctest.DocTest",
|
||||
example: "doctest.Example",
|
||||
got: str,
|
||||
) -> None:
|
||||
failure = doctest.DocTestFailure(test, example, got)
|
||||
if self.continue_on_failure:
|
||||
out.append(failure)
|
||||
else:
|
||||
raise failure
|
||||
|
||||
def report_unexpected_exception(
|
||||
self,
|
||||
out,
|
||||
test: "doctest.DocTest",
|
||||
example: "doctest.Example",
|
||||
exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType],
|
||||
) -> None:
|
||||
if isinstance(exc_info[1], OutcomeException):
|
||||
raise exc_info[1]
|
||||
if isinstance(exc_info[1], bdb.BdbQuit):
|
||||
outcomes.exit("Quitting debugger")
|
||||
failure = doctest.UnexpectedException(test, example, exc_info)
|
||||
if self.continue_on_failure:
|
||||
out.append(failure)
|
||||
else:
|
||||
raise failure
|
||||
|
||||
return PytestDoctestRunner
|
||||
|
||||
|
||||
def _get_runner(
|
||||
checker: Optional["doctest.OutputChecker"] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
optionflags: int = 0,
|
||||
continue_on_failure: bool = True,
|
||||
) -> "doctest.DocTestRunner":
|
||||
# We need this in order to do a lazy import on doctest
|
||||
global RUNNER_CLASS
|
||||
if RUNNER_CLASS is None:
|
||||
RUNNER_CLASS = _init_runner_class()
|
||||
# Type ignored because the continue_on_failure argument is only defined on
|
||||
# PytestDoctestRunner, which is lazily defined so can't be used as a type.
|
||||
return RUNNER_CLASS( # type: ignore
|
||||
checker=checker,
|
||||
verbose=verbose,
|
||||
optionflags=optionflags,
|
||||
continue_on_failure=continue_on_failure,
|
||||
)
|
||||
|
||||
|
||||
class DoctestItem(Item):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
parent: "Union[DoctestTextfile, DoctestModule]",
|
||||
runner: Optional["doctest.DocTestRunner"] = None,
|
||||
dtest: Optional["doctest.DocTest"] = None,
|
||||
) -> None:
|
||||
super().__init__(name, parent)
|
||||
self.runner = runner
|
||||
self.dtest = dtest
|
||||
self.obj = None
|
||||
self.fixture_request: Optional[FixtureRequest] = None
|
||||
|
||||
@classmethod
|
||||
def from_parent( # type: ignore
|
||||
cls,
|
||||
parent: "Union[DoctestTextfile, DoctestModule]",
|
||||
*,
|
||||
name: str,
|
||||
runner: "doctest.DocTestRunner",
|
||||
dtest: "doctest.DocTest",
|
||||
):
|
||||
# incompatible signature due to imposed limits on subclass
|
||||
"""The public named constructor."""
|
||||
return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
|
||||
|
||||
def setup(self) -> None:
|
||||
if self.dtest is not None:
|
||||
self.fixture_request = _setup_fixtures(self)
|
||||
globs = dict(getfixture=self.fixture_request.getfixturevalue)
|
||||
for name, value in self.fixture_request.getfixturevalue(
|
||||
"doctest_namespace"
|
||||
).items():
|
||||
globs[name] = value
|
||||
self.dtest.globs.update(globs)
|
||||
|
||||
def runtest(self) -> None:
|
||||
assert self.dtest is not None
|
||||
assert self.runner is not None
|
||||
_check_all_skipped(self.dtest)
|
||||
self._disable_output_capturing_for_darwin()
|
||||
failures: List["doctest.DocTestFailure"] = []
|
||||
# Type ignored because we change the type of `out` from what
|
||||
# doctest expects.
|
||||
self.runner.run(self.dtest, out=failures) # type: ignore[arg-type]
|
||||
if failures:
|
||||
raise MultipleDoctestFailures(failures)
|
||||
|
||||
def _disable_output_capturing_for_darwin(self) -> None:
|
||||
"""Disable output capturing. Otherwise, stdout is lost to doctest (#985)."""
|
||||
if platform.system() != "Darwin":
|
||||
return
|
||||
capman = self.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspend_global_capture(in_=True)
|
||||
out, err = capman.read_global_capture()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
# TODO: Type ignored -- breaks Liskov Substitution.
|
||||
def repr_failure( # type: ignore[override]
|
||||
self,
|
||||
excinfo: ExceptionInfo[BaseException],
|
||||
) -> Union[str, TerminalRepr]:
|
||||
import doctest
|
||||
|
||||
failures: Optional[
|
||||
Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]
|
||||
] = None
|
||||
if isinstance(
|
||||
excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
|
||||
):
|
||||
failures = [excinfo.value]
|
||||
elif isinstance(excinfo.value, MultipleDoctestFailures):
|
||||
failures = excinfo.value.failures
|
||||
|
||||
if failures is None:
|
||||
return super().repr_failure(excinfo)
|
||||
|
||||
reprlocation_lines = []
|
||||
for failure in failures:
|
||||
example = failure.example
|
||||
test = failure.test
|
||||
filename = test.filename
|
||||
if test.lineno is None:
|
||||
lineno = None
|
||||
else:
|
||||
lineno = test.lineno + example.lineno + 1
|
||||
message = type(failure).__name__
|
||||
# TODO: ReprFileLocation doesn't expect a None lineno.
|
||||
reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
|
||||
checker = _get_checker()
|
||||
report_choice = _get_report_choice(self.config.getoption("doctestreport"))
|
||||
if lineno is not None:
|
||||
assert failure.test.docstring is not None
|
||||
lines = failure.test.docstring.splitlines(False)
|
||||
# add line numbers to the left of the error message
|
||||
assert test.lineno is not None
|
||||
lines = [
|
||||
"%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)
|
||||
]
|
||||
# trim docstring error lines to 10
|
||||
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
|
||||
else:
|
||||
lines = [
|
||||
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
|
||||
]
|
||||
indent = ">>>"
|
||||
for line in example.source.splitlines():
|
||||
lines.append(f"??? {indent} {line}")
|
||||
indent = "..."
|
||||
if isinstance(failure, doctest.DocTestFailure):
|
||||
lines += checker.output_difference(
|
||||
example, failure.got, report_choice
|
||||
).split("\n")
|
||||
else:
|
||||
inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
|
||||
lines += [
|
||||
x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
|
||||
]
|
||||
reprlocation_lines.append((reprlocation, lines))
|
||||
return ReprFailDoctest(reprlocation_lines)
|
||||
|
||||
def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
|
||||
assert self.dtest is not None
|
||||
return self.path, self.dtest.lineno, "[doctest] %s" % self.name
|
||||
|
||||
|
||||
def _get_flag_lookup() -> Dict[str, int]:
|
||||
import doctest
|
||||
|
||||
return dict(
|
||||
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
|
||||
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
|
||||
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
|
||||
ELLIPSIS=doctest.ELLIPSIS,
|
||||
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
|
||||
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
|
||||
ALLOW_UNICODE=_get_allow_unicode_flag(),
|
||||
ALLOW_BYTES=_get_allow_bytes_flag(),
|
||||
NUMBER=_get_number_flag(),
|
||||
)
|
||||
|
||||
|
||||
def get_optionflags(parent):
|
||||
optionflags_str = parent.config.getini("doctest_optionflags")
|
||||
flag_lookup_table = _get_flag_lookup()
|
||||
flag_acc = 0
|
||||
for flag in optionflags_str:
|
||||
flag_acc |= flag_lookup_table[flag]
|
||||
return flag_acc
|
||||
|
||||
|
||||
def _get_continue_on_failure(config):
|
||||
continue_on_failure = config.getvalue("doctest_continue_on_failure")
|
||||
if continue_on_failure:
|
||||
# We need to turn off this if we use pdb since we should stop at
|
||||
# the first failure.
|
||||
if config.getvalue("usepdb"):
|
||||
continue_on_failure = False
|
||||
return continue_on_failure
|
||||
|
||||
|
||||
class DoctestTextfile(Module):
|
||||
obj = None
|
||||
|
||||
def collect(self) -> Iterable[DoctestItem]:
|
||||
import doctest
|
||||
|
||||
# Inspired by doctest.testfile; ideally we would use it directly,
|
||||
# but it doesn't support passing a custom checker.
|
||||
encoding = self.config.getini("doctest_encoding")
|
||||
text = self.path.read_text(encoding)
|
||||
filename = str(self.path)
|
||||
name = self.path.name
|
||||
globs = {"__name__": "__main__"}
|
||||
|
||||
optionflags = get_optionflags(self)
|
||||
|
||||
runner = _get_runner(
|
||||
verbose=False,
|
||||
optionflags=optionflags,
|
||||
checker=_get_checker(),
|
||||
continue_on_failure=_get_continue_on_failure(self.config),
|
||||
)
|
||||
|
||||
parser = doctest.DocTestParser()
|
||||
test = parser.get_doctest(text, globs, name, filename, 0)
|
||||
if test.examples:
|
||||
yield DoctestItem.from_parent(
|
||||
self, name=test.name, runner=runner, dtest=test
|
||||
)
|
||||
|
||||
|
||||
def _check_all_skipped(test: "doctest.DocTest") -> None:
|
||||
"""Raise pytest.skip() if all examples in the given DocTest have the SKIP
|
||||
option set."""
|
||||
import doctest
|
||||
|
||||
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
|
||||
if all_skipped:
|
||||
skip("all tests skipped by +SKIP option")
|
||||
|
||||
|
||||
def _is_mocked(obj: object) -> bool:
|
||||
"""Return if an object is possibly a mock object by checking the
|
||||
existence of a highly improbable attribute."""
|
||||
return (
|
||||
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
|
||||
is not None
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _patch_unwrap_mock_aware() -> Generator[None, None, None]:
|
||||
"""Context manager which replaces ``inspect.unwrap`` with a version
|
||||
that's aware of mock objects and doesn't recurse into them."""
|
||||
real_unwrap = inspect.unwrap
|
||||
|
||||
def _mock_aware_unwrap(
|
||||
func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None
|
||||
) -> Any:
|
||||
try:
|
||||
if stop is None or stop is _is_mocked:
|
||||
return real_unwrap(func, stop=_is_mocked)
|
||||
_stop = stop
|
||||
return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func))
|
||||
except Exception as e:
|
||||
warnings.warn(
|
||||
"Got %r when unwrapping %r. This is usually caused "
|
||||
"by a violation of Python's object protocol; see e.g. "
|
||||
"https://github.com/pytest-dev/pytest/issues/5080" % (e, func),
|
||||
PytestWarning,
|
||||
)
|
||||
raise
|
||||
|
||||
inspect.unwrap = _mock_aware_unwrap
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
inspect.unwrap = real_unwrap
|
||||
|
||||
|
||||
class DoctestModule(Module):
|
||||
def collect(self) -> Iterable[DoctestItem]:
|
||||
import doctest
|
||||
|
||||
class MockAwareDocTestFinder(doctest.DocTestFinder):
|
||||
"""A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
|
||||
|
||||
https://github.com/pytest-dev/pytest/issues/3456
|
||||
https://bugs.python.org/issue25532
|
||||
"""
|
||||
|
||||
def _find_lineno(self, obj, source_lines):
|
||||
"""Doctest code does not take into account `@property`, this
|
||||
is a hackish way to fix it. https://bugs.python.org/issue17446
|
||||
|
||||
Wrapped Doctests will need to be unwrapped so the correct
|
||||
line number is returned. This will be reported upstream. #8796
|
||||
"""
|
||||
if isinstance(obj, property):
|
||||
obj = getattr(obj, "fget", obj)
|
||||
|
||||
if hasattr(obj, "__wrapped__"):
|
||||
# Get the main obj in case of it being wrapped
|
||||
obj = inspect.unwrap(obj)
|
||||
|
||||
# Type ignored because this is a private function.
|
||||
return super()._find_lineno( # type:ignore[misc]
|
||||
obj,
|
||||
source_lines,
|
||||
)
|
||||
|
||||
def _find(
|
||||
self, tests, obj, name, module, source_lines, globs, seen
|
||||
) -> None:
|
||||
if _is_mocked(obj):
|
||||
return
|
||||
with _patch_unwrap_mock_aware():
|
||||
# Type ignored because this is a private function.
|
||||
super()._find( # type:ignore[misc]
|
||||
tests, obj, name, module, source_lines, globs, seen
|
||||
)
|
||||
|
||||
if self.path.name == "conftest.py":
|
||||
module = self.config.pluginmanager._importconftest(
|
||||
self.path,
|
||||
self.config.getoption("importmode"),
|
||||
rootpath=self.config.rootpath,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
module = import_path(
|
||||
self.path,
|
||||
root=self.config.rootpath,
|
||||
mode=self.config.getoption("importmode"),
|
||||
)
|
||||
except ImportError:
|
||||
if self.config.getvalue("doctest_ignore_import_errors"):
|
||||
skip("unable to import module %r" % self.path)
|
||||
else:
|
||||
raise
|
||||
# Uses internal doctest module parsing mechanism.
|
||||
finder = MockAwareDocTestFinder()
|
||||
optionflags = get_optionflags(self)
|
||||
runner = _get_runner(
|
||||
verbose=False,
|
||||
optionflags=optionflags,
|
||||
checker=_get_checker(),
|
||||
continue_on_failure=_get_continue_on_failure(self.config),
|
||||
)
|
||||
|
||||
for test in finder.find(module, module.__name__):
|
||||
if test.examples: # skip empty doctests
|
||||
yield DoctestItem.from_parent(
|
||||
self, name=test.name, runner=runner, dtest=test
|
||||
)
|
||||
|
||||
|
||||
def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest:
|
||||
"""Used by DoctestTextfile and DoctestItem to setup fixture information."""
|
||||
|
||||
def func() -> None:
|
||||
pass
|
||||
|
||||
doctest_item.funcargs = {} # type: ignore[attr-defined]
|
||||
fm = doctest_item.session._fixturemanager
|
||||
doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined]
|
||||
node=doctest_item, func=func, cls=None, funcargs=False
|
||||
)
|
||||
fixture_request = FixtureRequest(doctest_item, _ispytest=True)
|
||||
fixture_request._fillfixtures()
|
||||
return fixture_request
|
||||
|
||||
|
||||
def _init_checker_class() -> Type["doctest.OutputChecker"]:
|
||||
import doctest
|
||||
import re
|
||||
|
||||
class LiteralsOutputChecker(doctest.OutputChecker):
|
||||
# Based on doctest_nose_plugin.py from the nltk project
|
||||
# (https://github.com/nltk/nltk) and on the "numtest" doctest extension
|
||||
# by Sebastien Boisgerault (https://github.com/boisgera/numtest).
|
||||
|
||||
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
|
||||
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
|
||||
_number_re = re.compile(
|
||||
r"""
|
||||
(?P<number>
|
||||
(?P<mantissa>
|
||||
(?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
|
||||
|
|
||||
(?P<integer2> [+-]?\d+)\.
|
||||
)
|
||||
(?:
|
||||
[Ee]
|
||||
(?P<exponent1> [+-]?\d+)
|
||||
)?
|
||||
|
|
||||
(?P<integer3> [+-]?\d+)
|
||||
(?:
|
||||
[Ee]
|
||||
(?P<exponent2> [+-]?\d+)
|
||||
)
|
||||
)
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
def check_output(self, want: str, got: str, optionflags: int) -> bool:
|
||||
if super().check_output(want, got, optionflags):
|
||||
return True
|
||||
|
||||
allow_unicode = optionflags & _get_allow_unicode_flag()
|
||||
allow_bytes = optionflags & _get_allow_bytes_flag()
|
||||
allow_number = optionflags & _get_number_flag()
|
||||
|
||||
if not allow_unicode and not allow_bytes and not allow_number:
|
||||
return False
|
||||
|
||||
def remove_prefixes(regex: Pattern[str], txt: str) -> str:
|
||||
return re.sub(regex, r"\1\2", txt)
|
||||
|
||||
if allow_unicode:
|
||||
want = remove_prefixes(self._unicode_literal_re, want)
|
||||
got = remove_prefixes(self._unicode_literal_re, got)
|
||||
|
||||
if allow_bytes:
|
||||
want = remove_prefixes(self._bytes_literal_re, want)
|
||||
got = remove_prefixes(self._bytes_literal_re, got)
|
||||
|
||||
if allow_number:
|
||||
got = self._remove_unwanted_precision(want, got)
|
||||
|
||||
return super().check_output(want, got, optionflags)
|
||||
|
||||
def _remove_unwanted_precision(self, want: str, got: str) -> str:
|
||||
wants = list(self._number_re.finditer(want))
|
||||
gots = list(self._number_re.finditer(got))
|
||||
if len(wants) != len(gots):
|
||||
return got
|
||||
offset = 0
|
||||
for w, g in zip(wants, gots):
|
||||
fraction: Optional[str] = w.group("fraction")
|
||||
exponent: Optional[str] = w.group("exponent1")
|
||||
if exponent is None:
|
||||
exponent = w.group("exponent2")
|
||||
precision = 0 if fraction is None else len(fraction)
|
||||
if exponent is not None:
|
||||
precision -= int(exponent)
|
||||
if float(w.group()) == approx(float(g.group()), abs=10**-precision):
|
||||
# They're close enough. Replace the text we actually
|
||||
# got with the text we want, so that it will match when we
|
||||
# check the string literally.
|
||||
got = (
|
||||
got[: g.start() + offset] + w.group() + got[g.end() + offset :]
|
||||
)
|
||||
offset += w.end() - w.start() - (g.end() - g.start())
|
||||
return got
|
||||
|
||||
return LiteralsOutputChecker
|
||||
|
||||
|
||||
def _get_checker() -> "doctest.OutputChecker":
|
||||
"""Return a doctest.OutputChecker subclass that supports some
|
||||
additional options:
|
||||
|
||||
* ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
|
||||
prefixes (respectively) in string literals. Useful when the same
|
||||
doctest should run in Python 2 and Python 3.
|
||||
|
||||
* NUMBER to ignore floating-point differences smaller than the
|
||||
precision of the literal number in the doctest.
|
||||
|
||||
An inner class is used to avoid importing "doctest" at the module
|
||||
level.
|
||||
"""
|
||||
global CHECKER_CLASS
|
||||
if CHECKER_CLASS is None:
|
||||
CHECKER_CLASS = _init_checker_class()
|
||||
return CHECKER_CLASS()
|
||||
|
||||
|
||||
def _get_allow_unicode_flag() -> int:
|
||||
"""Register and return the ALLOW_UNICODE flag."""
|
||||
import doctest
|
||||
|
||||
return doctest.register_optionflag("ALLOW_UNICODE")
|
||||
|
||||
|
||||
def _get_allow_bytes_flag() -> int:
|
||||
"""Register and return the ALLOW_BYTES flag."""
|
||||
import doctest
|
||||
|
||||
return doctest.register_optionflag("ALLOW_BYTES")
|
||||
|
||||
|
||||
def _get_number_flag() -> int:
|
||||
"""Register and return the NUMBER flag."""
|
||||
import doctest
|
||||
|
||||
return doctest.register_optionflag("NUMBER")
|
||||
|
||||
|
||||
def _get_report_choice(key: str) -> int:
|
||||
"""Return the actual `doctest` module flag value.
|
||||
|
||||
We want to do it as late as possible to avoid importing `doctest` and all
|
||||
its dependencies when parsing options, as it adds overhead and breaks tests.
|
||||
"""
|
||||
import doctest
|
||||
|
||||
return {
|
||||
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
|
||||
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
|
||||
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
|
||||
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
|
||||
DOCTEST_REPORT_CHOICE_NONE: 0,
|
||||
}[key]
|
||||
|
||||
|
||||
@fixture(scope="session")
|
||||
def doctest_namespace() -> Dict[str, Any]:
|
||||
"""Fixture that returns a :py:class:`dict` that will be injected into the
|
||||
namespace of doctests.
|
||||
|
||||
Usually this fixture is used in conjunction with another ``autouse`` fixture:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace["np"] = numpy
|
||||
|
||||
For more details: :ref:`doctest_namespace`.
|
||||
"""
|
||||
return dict()
|
95
venv/lib/python3.11/site-packages/_pytest/faulthandler.py
Normal file
95
venv/lib/python3.11/site-packages/_pytest/faulthandler.py
Normal file
|
@ -0,0 +1,95 @@
|
|||
import io
|
||||
import os
|
||||
import sys
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.stash import StashKey
|
||||
|
||||
|
||||
fault_handler_stderr_fd_key = StashKey[int]()
|
||||
fault_handler_originally_enabled_key = StashKey[bool]()
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
help = (
|
||||
"Dump the traceback of all threads if a test takes "
|
||||
"more than TIMEOUT seconds to finish"
|
||||
)
|
||||
parser.addini("faulthandler_timeout", help, default=0.0)
|
||||
|
||||
|
||||
def pytest_configure(config: Config) -> None:
|
||||
import faulthandler
|
||||
|
||||
config.stash[fault_handler_stderr_fd_key] = os.dup(get_stderr_fileno())
|
||||
config.stash[fault_handler_originally_enabled_key] = faulthandler.is_enabled()
|
||||
faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key])
|
||||
|
||||
|
||||
def pytest_unconfigure(config: Config) -> None:
|
||||
import faulthandler
|
||||
|
||||
faulthandler.disable()
|
||||
# Close the dup file installed during pytest_configure.
|
||||
if fault_handler_stderr_fd_key in config.stash:
|
||||
os.close(config.stash[fault_handler_stderr_fd_key])
|
||||
del config.stash[fault_handler_stderr_fd_key]
|
||||
if config.stash.get(fault_handler_originally_enabled_key, False):
|
||||
# Re-enable the faulthandler if it was originally enabled.
|
||||
faulthandler.enable(file=get_stderr_fileno())
|
||||
|
||||
|
||||
def get_stderr_fileno() -> int:
|
||||
try:
|
||||
fileno = sys.stderr.fileno()
|
||||
# The Twisted Logger will return an invalid file descriptor since it is not backed
|
||||
# by an FD. So, let's also forward this to the same code path as with pytest-xdist.
|
||||
if fileno == -1:
|
||||
raise AttributeError()
|
||||
return fileno
|
||||
except (AttributeError, io.UnsupportedOperation):
|
||||
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
|
||||
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
|
||||
# This is potentially dangerous, but the best we can do.
|
||||
return sys.__stderr__.fileno()
|
||||
|
||||
|
||||
def get_timeout_config_value(config: Config) -> float:
|
||||
return float(config.getini("faulthandler_timeout") or 0.0)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True, trylast=True)
|
||||
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
|
||||
timeout = get_timeout_config_value(item.config)
|
||||
if timeout > 0:
|
||||
import faulthandler
|
||||
|
||||
stderr = item.config.stash[fault_handler_stderr_fd_key]
|
||||
faulthandler.dump_traceback_later(timeout, file=stderr)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
faulthandler.cancel_dump_traceback_later()
|
||||
else:
|
||||
yield
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_enter_pdb() -> None:
|
||||
"""Cancel any traceback dumping due to timeout before entering pdb."""
|
||||
import faulthandler
|
||||
|
||||
faulthandler.cancel_dump_traceback_later()
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_exception_interact() -> None:
|
||||
"""Cancel any traceback dumping due to an interactive exception being
|
||||
raised."""
|
||||
import faulthandler
|
||||
|
||||
faulthandler.cancel_dump_traceback_later()
|
1713
venv/lib/python3.11/site-packages/_pytest/fixtures.py
Normal file
1713
venv/lib/python3.11/site-packages/_pytest/fixtures.py
Normal file
File diff suppressed because it is too large
Load diff
44
venv/lib/python3.11/site-packages/_pytest/freeze_support.py
Normal file
44
venv/lib/python3.11/site-packages/_pytest/freeze_support.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
"""Provides a function to report all internal modules for using freezing
|
||||
tools."""
|
||||
import types
|
||||
from typing import Iterator
|
||||
from typing import List
|
||||
from typing import Union
|
||||
|
||||
|
||||
def freeze_includes() -> List[str]:
|
||||
"""Return a list of module names used by pytest that should be
|
||||
included by cx_freeze."""
|
||||
import _pytest
|
||||
|
||||
result = list(_iter_all_modules(_pytest))
|
||||
return result
|
||||
|
||||
|
||||
def _iter_all_modules(
|
||||
package: Union[str, types.ModuleType],
|
||||
prefix: str = "",
|
||||
) -> Iterator[str]:
|
||||
"""Iterate over the names of all modules that can be found in the given
|
||||
package, recursively.
|
||||
|
||||
>>> import _pytest
|
||||
>>> list(_iter_all_modules(_pytest))
|
||||
['_pytest._argcomplete', '_pytest._code.code', ...]
|
||||
"""
|
||||
import os
|
||||
import pkgutil
|
||||
|
||||
if isinstance(package, str):
|
||||
path = package
|
||||
else:
|
||||
# Type ignored because typeshed doesn't define ModuleType.__path__
|
||||
# (only defined on packages).
|
||||
package_path = package.__path__ # type: ignore[attr-defined]
|
||||
path, prefix = package_path[0], package.__name__ + "."
|
||||
for _, name, is_package in pkgutil.iter_modules([path]):
|
||||
if is_package:
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
266
venv/lib/python3.11/site-packages/_pytest/helpconfig.py
Normal file
266
venv/lib/python3.11/site-packages/_pytest/helpconfig.py
Normal file
|
@ -0,0 +1,266 @@
|
|||
"""Version info, help messages, tracing configuration."""
|
||||
import os
|
||||
import sys
|
||||
from argparse import Action
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import ExitCode
|
||||
from _pytest.config import PrintHelp
|
||||
from _pytest.config.argparsing import Parser
|
||||
|
||||
|
||||
class HelpAction(Action):
|
||||
"""An argparse Action that will raise an exception in order to skip the
|
||||
rest of the argument parsing when --help is passed.
|
||||
|
||||
This prevents argparse from quitting due to missing required arguments
|
||||
when any are defined, for example by ``pytest_addoption``.
|
||||
This is similar to the way that the builtin argparse --help option is
|
||||
implemented by raising SystemExit.
|
||||
"""
|
||||
|
||||
def __init__(self, option_strings, dest=None, default=False, help=None):
|
||||
super().__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
const=True,
|
||||
default=default,
|
||||
nargs=0,
|
||||
help=help,
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, self.const)
|
||||
|
||||
# We should only skip the rest of the parsing after preparse is done.
|
||||
if getattr(parser._parser, "after_preparse", False):
|
||||
raise PrintHelp
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption(
|
||||
"--version",
|
||||
"-V",
|
||||
action="count",
|
||||
default=0,
|
||||
dest="version",
|
||||
help="Display pytest version and information about plugins. "
|
||||
"When given twice, also display information about plugins.",
|
||||
)
|
||||
group._addoption(
|
||||
"-h",
|
||||
"--help",
|
||||
action=HelpAction,
|
||||
dest="help",
|
||||
help="Show help message and configuration info",
|
||||
)
|
||||
group._addoption(
|
||||
"-p",
|
||||
action="append",
|
||||
dest="plugins",
|
||||
default=[],
|
||||
metavar="name",
|
||||
help="Early-load given plugin module name or entry point (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.",
|
||||
)
|
||||
group.addoption(
|
||||
"--traceconfig",
|
||||
"--trace-config",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Trace considerations of conftest.py files",
|
||||
)
|
||||
group.addoption(
|
||||
"--debug",
|
||||
action="store",
|
||||
nargs="?",
|
||||
const="pytestdebug.log",
|
||||
dest="debug",
|
||||
metavar="DEBUG_FILE_NAME",
|
||||
help="Store internal tracing debug information in this log file. "
|
||||
"This file is opened with 'w' and truncated as a result, care advised. "
|
||||
"Default: pytestdebug.log.",
|
||||
)
|
||||
group._addoption(
|
||||
"-o",
|
||||
"--override-ini",
|
||||
dest="override_ini",
|
||||
action="append",
|
||||
help='Override ini option with "option=value" style, '
|
||||
"e.g. `-o xfail_strict=True -o cache_dir=cache`.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_cmdline_parse():
|
||||
outcome = yield
|
||||
config: Config = outcome.get_result()
|
||||
|
||||
if config.option.debug:
|
||||
# --debug | --debug <file.log> was provided.
|
||||
path = config.option.debug
|
||||
debugfile = open(path, "w", encoding="utf-8")
|
||||
debugfile.write(
|
||||
"versions pytest-%s, "
|
||||
"python-%s\ncwd=%s\nargs=%s\n\n"
|
||||
% (
|
||||
pytest.__version__,
|
||||
".".join(map(str, sys.version_info)),
|
||||
os.getcwd(),
|
||||
config.invocation_params.args,
|
||||
)
|
||||
)
|
||||
config.trace.root.setwriter(debugfile.write)
|
||||
undo_tracing = config.pluginmanager.enable_tracing()
|
||||
sys.stderr.write("writing pytest debug information to %s\n" % path)
|
||||
|
||||
def unset_tracing() -> None:
|
||||
debugfile.close()
|
||||
sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name)
|
||||
config.trace.root.setwriter(None)
|
||||
undo_tracing()
|
||||
|
||||
config.add_cleanup(unset_tracing)
|
||||
|
||||
|
||||
def showversion(config: Config) -> None:
|
||||
if config.option.version > 1:
|
||||
sys.stdout.write(
|
||||
"This is pytest version {}, imported from {}\n".format(
|
||||
pytest.__version__, pytest.__file__
|
||||
)
|
||||
)
|
||||
plugininfo = getpluginversioninfo(config)
|
||||
if plugininfo:
|
||||
for line in plugininfo:
|
||||
sys.stdout.write(line + "\n")
|
||||
else:
|
||||
sys.stdout.write(f"pytest {pytest.__version__}\n")
|
||||
|
||||
|
||||
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
|
||||
if config.option.version > 0:
|
||||
showversion(config)
|
||||
return 0
|
||||
elif config.option.help:
|
||||
config._do_configure()
|
||||
showhelp(config)
|
||||
config._ensure_unconfigure()
|
||||
return 0
|
||||
return None
|
||||
|
||||
|
||||
def showhelp(config: Config) -> None:
|
||||
import textwrap
|
||||
|
||||
reporter = config.pluginmanager.get_plugin("terminalreporter")
|
||||
tw = reporter._tw
|
||||
tw.write(config._parser.optparser.format_help())
|
||||
tw.line()
|
||||
tw.line(
|
||||
"[pytest] ini-options in the first "
|
||||
"pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:"
|
||||
)
|
||||
tw.line()
|
||||
|
||||
columns = tw.fullwidth # costly call
|
||||
indent_len = 24 # based on argparse's max_help_position=24
|
||||
indent = " " * indent_len
|
||||
for name in config._parser._ininames:
|
||||
help, type, default = config._parser._inidict[name]
|
||||
if type is None:
|
||||
type = "string"
|
||||
if help is None:
|
||||
raise TypeError(f"help argument cannot be None for {name}")
|
||||
spec = f"{name} ({type}):"
|
||||
tw.write(" %s" % spec)
|
||||
spec_len = len(spec)
|
||||
if spec_len > (indent_len - 3):
|
||||
# Display help starting at a new line.
|
||||
tw.line()
|
||||
helplines = textwrap.wrap(
|
||||
help,
|
||||
columns,
|
||||
initial_indent=indent,
|
||||
subsequent_indent=indent,
|
||||
break_on_hyphens=False,
|
||||
)
|
||||
|
||||
for line in helplines:
|
||||
tw.line(line)
|
||||
else:
|
||||
# Display help starting after the spec, following lines indented.
|
||||
tw.write(" " * (indent_len - spec_len - 2))
|
||||
wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False)
|
||||
|
||||
if wrapped:
|
||||
tw.line(wrapped[0])
|
||||
for line in wrapped[1:]:
|
||||
tw.line(indent + line)
|
||||
|
||||
tw.line()
|
||||
tw.line("Environment variables:")
|
||||
vars = [
|
||||
("PYTEST_ADDOPTS", "Extra command line options"),
|
||||
("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"),
|
||||
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"),
|
||||
("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"),
|
||||
]
|
||||
for name, help in vars:
|
||||
tw.line(f" {name:<24} {help}")
|
||||
tw.line()
|
||||
tw.line()
|
||||
|
||||
tw.line("to see available markers type: pytest --markers")
|
||||
tw.line("to see available fixtures type: pytest --fixtures")
|
||||
tw.line(
|
||||
"(shown according to specified file_or_dir or current dir "
|
||||
"if not specified; fixtures with leading '_' are only shown "
|
||||
"with the '-v' option"
|
||||
)
|
||||
|
||||
for warningreport in reporter.stats.get("warnings", []):
|
||||
tw.line("warning : " + warningreport.message, red=True)
|
||||
return
|
||||
|
||||
|
||||
conftest_options = [("pytest_plugins", "list of plugin names to load")]
|
||||
|
||||
|
||||
def getpluginversioninfo(config: Config) -> List[str]:
|
||||
lines = []
|
||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
if plugininfo:
|
||||
lines.append("setuptools registered plugins:")
|
||||
for plugin, dist in plugininfo:
|
||||
loc = getattr(plugin, "__file__", repr(plugin))
|
||||
content = f"{dist.project_name}-{dist.version} at {loc}"
|
||||
lines.append(" " + content)
|
||||
return lines
|
||||
|
||||
|
||||
def pytest_report_header(config: Config) -> List[str]:
|
||||
lines = []
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
lines.append(f"using: pytest-{pytest.__version__}")
|
||||
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
lines.extend(verinfo)
|
||||
|
||||
if config.option.traceconfig:
|
||||
lines.append("active plugins:")
|
||||
items = config.pluginmanager.list_name_plugin()
|
||||
for name, plugin in items:
|
||||
if hasattr(plugin, "__file__"):
|
||||
r = plugin.__file__
|
||||
else:
|
||||
r = repr(plugin)
|
||||
lines.append(f" {name:<20}: {r}")
|
||||
return lines
|
979
venv/lib/python3.11/site-packages/_pytest/hookspec.py
Normal file
979
venv/lib/python3.11/site-packages/_pytest/hookspec.py
Normal file
|
@ -0,0 +1,979 @@
|
|||
"""Hook specifications for pytest plugins which are invoked by pytest itself
|
||||
and by builtin plugins."""
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from pluggy import HookspecMarker
|
||||
|
||||
from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import pdb
|
||||
import warnings
|
||||
from typing_extensions import Literal
|
||||
|
||||
from _pytest._code.code import ExceptionRepr
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import ExitCode
|
||||
from _pytest.config import PytestPluginManager
|
||||
from _pytest.config import _PluggyPlugin
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureDef
|
||||
from _pytest.fixtures import SubRequest
|
||||
from _pytest.main import Session
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.outcomes import Exit
|
||||
from _pytest.python import Class
|
||||
from _pytest.python import Function
|
||||
from _pytest.python import Metafunc
|
||||
from _pytest.python import Module
|
||||
from _pytest.reports import CollectReport
|
||||
from _pytest.reports import TestReport
|
||||
from _pytest.runner import CallInfo
|
||||
from _pytest.terminal import TerminalReporter
|
||||
from _pytest.terminal import TestShortLogReport
|
||||
from _pytest.compat import LEGACY_PATH
|
||||
|
||||
|
||||
hookspec = HookspecMarker("pytest")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Initialization hooks called for every plugin
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None:
|
||||
"""Called at plugin registration time to allow adding new hooks via a call to
|
||||
``pluginmanager.add_hookspecs(module_or_class, prefix)``.
|
||||
|
||||
:param pytest.PytestPluginManager pluginmanager: The pytest plugin manager.
|
||||
|
||||
.. note::
|
||||
This hook is incompatible with ``hookwrapper=True``.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_plugin_registered(
|
||||
plugin: "_PluggyPlugin", manager: "PytestPluginManager"
|
||||
) -> None:
|
||||
"""A new pytest plugin got registered.
|
||||
|
||||
:param plugin: The plugin module or instance.
|
||||
:param pytest.PytestPluginManager manager: pytest plugin manager.
|
||||
|
||||
.. note::
|
||||
This hook is incompatible with ``hookwrapper=True``.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None:
|
||||
"""Register argparse-style options and ini-style config values,
|
||||
called once at the beginning of a test run.
|
||||
|
||||
.. note::
|
||||
|
||||
This function should be implemented only in plugins or ``conftest.py``
|
||||
files situated at the tests root directory due to how pytest
|
||||
:ref:`discovers plugins during startup <pluginorder>`.
|
||||
|
||||
:param pytest.Parser parser:
|
||||
To add command line options, call
|
||||
:py:func:`parser.addoption(...) <pytest.Parser.addoption>`.
|
||||
To add ini-file values call :py:func:`parser.addini(...)
|
||||
<pytest.Parser.addini>`.
|
||||
|
||||
:param pytest.PytestPluginManager pluginmanager:
|
||||
The pytest plugin manager, which can be used to install :py:func:`hookspec`'s
|
||||
or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks
|
||||
to change how command line options are added.
|
||||
|
||||
Options can later be accessed through the
|
||||
:py:class:`config <pytest.Config>` object, respectively:
|
||||
|
||||
- :py:func:`config.getoption(name) <pytest.Config.getoption>` to
|
||||
retrieve the value of a command line option.
|
||||
|
||||
- :py:func:`config.getini(name) <pytest.Config.getini>` to retrieve
|
||||
a value read from an ini-style file.
|
||||
|
||||
The config object is passed around on many internal objects via the ``.config``
|
||||
attribute or can be retrieved as the ``pytestconfig`` fixture.
|
||||
|
||||
.. note::
|
||||
This hook is incompatible with ``hookwrapper=True``.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_configure(config: "Config") -> None:
|
||||
"""Allow plugins and conftest files to perform initial configuration.
|
||||
|
||||
This hook is called for every plugin and initial conftest file
|
||||
after command line options have been parsed.
|
||||
|
||||
After that, the hook is called for other conftest files as they are
|
||||
imported.
|
||||
|
||||
.. note::
|
||||
This hook is incompatible with ``hookwrapper=True``.
|
||||
|
||||
:param pytest.Config config: The pytest config object.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Bootstrapping hooks called for plugins registered early enough:
|
||||
# internal and 3rd party plugins.
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_cmdline_parse(
|
||||
pluginmanager: "PytestPluginManager", args: List[str]
|
||||
) -> Optional["Config"]:
|
||||
"""Return an initialized :class:`~pytest.Config`, parsing the specified args.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
.. note::
|
||||
This hook will only be called for plugin classes passed to the
|
||||
``plugins`` arg when using `pytest.main`_ to perform an in-process
|
||||
test run.
|
||||
|
||||
:param pluginmanager: The pytest plugin manager.
|
||||
:param args: List of arguments passed on the command line.
|
||||
:returns: A pytest config object.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(warn_on_impl=WARNING_CMDLINE_PREPARSE_HOOK)
|
||||
def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None:
|
||||
"""(**Deprecated**) modify command line arguments before option parsing.
|
||||
|
||||
This hook is considered deprecated and will be removed in a future pytest version. Consider
|
||||
using :hook:`pytest_load_initial_conftests` instead.
|
||||
|
||||
.. note::
|
||||
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param args: Arguments passed on the command line.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]:
|
||||
"""Called for performing the main command line action. The default
|
||||
implementation will invoke the configure hooks and runtest_mainloop.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:returns: The exit code.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_load_initial_conftests(
|
||||
early_config: "Config", parser: "Parser", args: List[str]
|
||||
) -> None:
|
||||
"""Called to implement the loading of initial conftest files ahead
|
||||
of command line option parsing.
|
||||
|
||||
.. note::
|
||||
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
|
||||
|
||||
:param early_config: The pytest config object.
|
||||
:param args: Arguments passed on the command line.
|
||||
:param parser: To add command line options.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# collection hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_collection(session: "Session") -> Optional[object]:
|
||||
"""Perform the collection phase for the given session.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
The return value is not used, but only stops further processing.
|
||||
|
||||
The default collection phase is this (see individual hooks for full details):
|
||||
|
||||
1. Starting from ``session`` as the initial collector:
|
||||
|
||||
1. ``pytest_collectstart(collector)``
|
||||
2. ``report = pytest_make_collect_report(collector)``
|
||||
3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred
|
||||
4. For each collected node:
|
||||
|
||||
1. If an item, ``pytest_itemcollected(item)``
|
||||
2. If a collector, recurse into it.
|
||||
|
||||
5. ``pytest_collectreport(report)``
|
||||
|
||||
2. ``pytest_collection_modifyitems(session, config, items)``
|
||||
|
||||
1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times)
|
||||
|
||||
3. ``pytest_collection_finish(session)``
|
||||
4. Set ``session.items`` to the list of collected items
|
||||
5. Set ``session.testscollected`` to the number of collected items
|
||||
|
||||
You can implement this hook to only perform some action before collection,
|
||||
for example the terminal plugin uses it to start displaying the collection
|
||||
counter (and returns `None`).
|
||||
|
||||
:param session: The pytest session object.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
session: "Session", config: "Config", items: List["Item"]
|
||||
) -> None:
|
||||
"""Called after collection has been performed. May filter or re-order
|
||||
the items in-place.
|
||||
|
||||
:param session: The pytest session object.
|
||||
:param config: The pytest config object.
|
||||
:param items: List of item objects.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_collection_finish(session: "Session") -> None:
|
||||
"""Called after collection has been performed and modified.
|
||||
|
||||
:param session: The pytest session object.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_ignore_collect(
|
||||
collection_path: Path, path: "LEGACY_PATH", config: "Config"
|
||||
) -> Optional[bool]:
|
||||
"""Return True to prevent considering this path for collection.
|
||||
|
||||
This hook is consulted for all files and directories prior to calling
|
||||
more specific hooks.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param collection_path: The path to analyze.
|
||||
:param path: The path to analyze (deprecated).
|
||||
:param config: The pytest config object.
|
||||
|
||||
.. versionchanged:: 7.0.0
|
||||
The ``collection_path`` parameter was added as a :class:`pathlib.Path`
|
||||
equivalent of the ``path`` parameter. The ``path`` parameter
|
||||
has been deprecated.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_collect_file(
|
||||
file_path: Path, path: "LEGACY_PATH", parent: "Collector"
|
||||
) -> "Optional[Collector]":
|
||||
"""Create a :class:`~pytest.Collector` for the given path, or None if not relevant.
|
||||
|
||||
The new node needs to have the specified ``parent`` as a parent.
|
||||
|
||||
:param file_path: The path to analyze.
|
||||
:param path: The path to collect (deprecated).
|
||||
|
||||
.. versionchanged:: 7.0.0
|
||||
The ``file_path`` parameter was added as a :class:`pathlib.Path`
|
||||
equivalent of the ``path`` parameter. The ``path`` parameter
|
||||
has been deprecated.
|
||||
"""
|
||||
|
||||
|
||||
# logging hooks for collection
|
||||
|
||||
|
||||
def pytest_collectstart(collector: "Collector") -> None:
|
||||
"""Collector starts collecting.
|
||||
|
||||
:param collector:
|
||||
The collector.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_itemcollected(item: "Item") -> None:
|
||||
"""We just collected a test item.
|
||||
|
||||
:param item:
|
||||
The item.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_collectreport(report: "CollectReport") -> None:
|
||||
"""Collector finished collecting.
|
||||
|
||||
:param report:
|
||||
The collect report.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_deselected(items: Sequence["Item"]) -> None:
|
||||
"""Called for deselected test items, e.g. by keyword.
|
||||
|
||||
May be called multiple times.
|
||||
|
||||
:param items:
|
||||
The items.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]":
|
||||
"""Perform :func:`collector.collect() <pytest.Collector.collect>` and return
|
||||
a :class:`~pytest.CollectReport`.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param collector:
|
||||
The collector.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Python test function related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pycollect_makemodule(
|
||||
module_path: Path, path: "LEGACY_PATH", parent
|
||||
) -> Optional["Module"]:
|
||||
"""Return a :class:`pytest.Module` collector or None for the given path.
|
||||
|
||||
This hook will be called for each matching test module path.
|
||||
The :hook:`pytest_collect_file` hook needs to be used if you want to
|
||||
create test modules for files that do not match as a test module.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param module_path: The path of the module to collect.
|
||||
:param path: The path of the module to collect (deprecated).
|
||||
|
||||
.. versionchanged:: 7.0.0
|
||||
The ``module_path`` parameter was added as a :class:`pathlib.Path`
|
||||
equivalent of the ``path`` parameter.
|
||||
|
||||
The ``path`` parameter has been deprecated in favor of ``fspath``.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pycollect_makeitem(
|
||||
collector: Union["Module", "Class"], name: str, obj: object
|
||||
) -> Union[None, "Item", "Collector", List[Union["Item", "Collector"]]]:
|
||||
"""Return a custom item/collector for a Python object in a module, or None.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param collector:
|
||||
The module/class collector.
|
||||
:param name:
|
||||
The name of the object in the module/class.
|
||||
:param obj:
|
||||
The object.
|
||||
:returns:
|
||||
The created items/collectors.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
|
||||
"""Call underlying test function.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param pyfuncitem:
|
||||
The function item.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: "Metafunc") -> None:
|
||||
"""Generate (multiple) parametrized calls to a test function.
|
||||
|
||||
:param metafunc:
|
||||
The :class:`~pytest.Metafunc` helper for the test function.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_parametrize_id(
|
||||
config: "Config", val: object, argname: str
|
||||
) -> Optional[str]:
|
||||
"""Return a user-friendly string representation of the given ``val``
|
||||
that will be used by @pytest.mark.parametrize calls, or None if the hook
|
||||
doesn't know about ``val``.
|
||||
|
||||
The parameter name is available as ``argname``, if required.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param val: The parametrized value.
|
||||
:param str argname: The automatic parameter name produced by pytest.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# runtest related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtestloop(session: "Session") -> Optional[object]:
|
||||
"""Perform the main runtest loop (after collection finished).
|
||||
|
||||
The default hook implementation performs the runtest protocol for all items
|
||||
collected in the session (``session.items``), unless the collection failed
|
||||
or the ``collectonly`` pytest option is set.
|
||||
|
||||
If at any point :py:func:`pytest.exit` is called, the loop is
|
||||
terminated immediately.
|
||||
|
||||
If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the
|
||||
loop is terminated after the runtest protocol for the current item is finished.
|
||||
|
||||
:param session: The pytest session object.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
The return value is not used, but only stops further processing.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtest_protocol(
|
||||
item: "Item", nextitem: "Optional[Item]"
|
||||
) -> Optional[object]:
|
||||
"""Perform the runtest protocol for a single test item.
|
||||
|
||||
The default runtest protocol is this (see individual hooks for full details):
|
||||
|
||||
- ``pytest_runtest_logstart(nodeid, location)``
|
||||
|
||||
- Setup phase:
|
||||
- ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``)
|
||||
- ``report = pytest_runtest_makereport(item, call)``
|
||||
- ``pytest_runtest_logreport(report)``
|
||||
- ``pytest_exception_interact(call, report)`` if an interactive exception occurred
|
||||
|
||||
- Call phase, if the the setup passed and the ``setuponly`` pytest option is not set:
|
||||
- ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``)
|
||||
- ``report = pytest_runtest_makereport(item, call)``
|
||||
- ``pytest_runtest_logreport(report)``
|
||||
- ``pytest_exception_interact(call, report)`` if an interactive exception occurred
|
||||
|
||||
- Teardown phase:
|
||||
- ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``)
|
||||
- ``report = pytest_runtest_makereport(item, call)``
|
||||
- ``pytest_runtest_logreport(report)``
|
||||
- ``pytest_exception_interact(call, report)`` if an interactive exception occurred
|
||||
|
||||
- ``pytest_runtest_logfinish(nodeid, location)``
|
||||
|
||||
:param item: Test item for which the runtest protocol is performed.
|
||||
:param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend).
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
The return value is not used, but only stops further processing.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_logstart(
|
||||
nodeid: str, location: Tuple[str, Optional[int], str]
|
||||
) -> None:
|
||||
"""Called at the start of running the runtest protocol for a single item.
|
||||
|
||||
See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
|
||||
|
||||
:param nodeid: Full node ID of the item.
|
||||
:param location: A tuple of ``(filename, lineno, testname)``
|
||||
where ``filename`` is a file path relative to ``config.rootpath``
|
||||
and ``lineno`` is 0-based.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_logfinish(
|
||||
nodeid: str, location: Tuple[str, Optional[int], str]
|
||||
) -> None:
|
||||
"""Called at the end of running the runtest protocol for a single item.
|
||||
|
||||
See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
|
||||
|
||||
:param nodeid: Full node ID of the item.
|
||||
:param location: A tuple of ``(filename, lineno, testname)``
|
||||
where ``filename`` is a file path relative to ``config.rootpath``
|
||||
and ``lineno`` is 0-based.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_setup(item: "Item") -> None:
|
||||
"""Called to perform the setup phase for a test item.
|
||||
|
||||
The default implementation runs ``setup()`` on ``item`` and all of its
|
||||
parents (which haven't been setup yet). This includes obtaining the
|
||||
values of fixtures required by the item (which haven't been obtained
|
||||
yet).
|
||||
|
||||
:param item:
|
||||
The item.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_call(item: "Item") -> None:
|
||||
"""Called to run the test for test item (the call phase).
|
||||
|
||||
The default implementation calls ``item.runtest()``.
|
||||
|
||||
:param item:
|
||||
The item.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None:
|
||||
"""Called to perform the teardown phase for a test item.
|
||||
|
||||
The default implementation runs the finalizers and calls ``teardown()``
|
||||
on ``item`` and all of its parents (which need to be torn down). This
|
||||
includes running the teardown phase of fixtures required by the item (if
|
||||
they go out of scope).
|
||||
|
||||
:param item:
|
||||
The item.
|
||||
:param nextitem:
|
||||
The scheduled-to-be-next test item (None if no further test item is
|
||||
scheduled). This argument is used to perform exact teardowns, i.e.
|
||||
calling just enough finalizers so that nextitem only needs to call
|
||||
setup functions.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_runtest_makereport(
|
||||
item: "Item", call: "CallInfo[None]"
|
||||
) -> Optional["TestReport"]:
|
||||
"""Called to create a :class:`~pytest.TestReport` for each of
|
||||
the setup, call and teardown runtest phases of a test item.
|
||||
|
||||
See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
|
||||
|
||||
:param item: The item.
|
||||
:param call: The :class:`~pytest.CallInfo` for the phase.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_runtest_logreport(report: "TestReport") -> None:
|
||||
"""Process the :class:`~pytest.TestReport` produced for each
|
||||
of the setup, call and teardown runtest phases of an item.
|
||||
|
||||
See :hook:`pytest_runtest_protocol` for a description of the runtest protocol.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_to_serializable(
|
||||
config: "Config",
|
||||
report: Union["CollectReport", "TestReport"],
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Serialize the given report object into a data structure suitable for
|
||||
sending over the wire, e.g. converted to JSON.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param report: The report.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_from_serializable(
|
||||
config: "Config",
|
||||
data: Dict[str, Any],
|
||||
) -> Optional[Union["CollectReport", "TestReport"]]:
|
||||
"""Restore a report object previously serialized with
|
||||
:hook:`pytest_report_to_serializable`.
|
||||
|
||||
:param config: The pytest config object.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Fixture related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_fixture_setup(
|
||||
fixturedef: "FixtureDef[Any]", request: "SubRequest"
|
||||
) -> Optional[object]:
|
||||
"""Perform fixture setup execution.
|
||||
|
||||
:param fixturdef:
|
||||
The fixture definition object.
|
||||
:param request:
|
||||
The fixture request object.
|
||||
:returns:
|
||||
The return value of the call to the fixture function.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
|
||||
.. note::
|
||||
If the fixture function returns None, other implementations of
|
||||
this hook function will continue to be called, according to the
|
||||
behavior of the :ref:`firstresult` option.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_fixture_post_finalizer(
|
||||
fixturedef: "FixtureDef[Any]", request: "SubRequest"
|
||||
) -> None:
|
||||
"""Called after fixture teardown, but before the cache is cleared, so
|
||||
the fixture result ``fixturedef.cached_result`` is still available (not
|
||||
``None``).
|
||||
|
||||
:param fixturdef:
|
||||
The fixture definition object.
|
||||
:param request:
|
||||
The fixture request object.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# test session related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_sessionstart(session: "Session") -> None:
|
||||
"""Called after the ``Session`` object has been created and before performing collection
|
||||
and entering the run test loop.
|
||||
|
||||
:param session: The pytest session object.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_sessionfinish(
|
||||
session: "Session",
|
||||
exitstatus: Union[int, "ExitCode"],
|
||||
) -> None:
|
||||
"""Called after whole test run finished, right before returning the exit status to the system.
|
||||
|
||||
:param session: The pytest session object.
|
||||
:param exitstatus: The status which pytest will return to the system.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_unconfigure(config: "Config") -> None:
|
||||
"""Called before test process is exited.
|
||||
|
||||
:param config: The pytest config object.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# hooks for customizing the assert methods
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_assertrepr_compare(
|
||||
config: "Config", op: str, left: object, right: object
|
||||
) -> Optional[List[str]]:
|
||||
"""Return explanation for comparisons in failing assert expressions.
|
||||
|
||||
Return None for no custom explanation, otherwise return a list
|
||||
of strings. The strings will be joined by newlines but any newlines
|
||||
*in* a string will be escaped. Note that all but the first line will
|
||||
be indented slightly, the intention is for the first line to be a summary.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param op: The operator, e.g. `"=="`, `"!="`, `"not in"`.
|
||||
:param left: The left operand.
|
||||
:param right: The right operand.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None:
|
||||
"""Called whenever an assertion passes.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
Use this hook to do some processing after a passing assertion.
|
||||
The original assertion information is available in the `orig` string
|
||||
and the pytest introspected assertion information is available in the
|
||||
`expl` string.
|
||||
|
||||
This hook must be explicitly enabled by the ``enable_assertion_pass_hook``
|
||||
ini-file option:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
enable_assertion_pass_hook=true
|
||||
|
||||
You need to **clean the .pyc** files in your project directory and interpreter libraries
|
||||
when enabling this option, as assertions will require to be re-written.
|
||||
|
||||
:param item: pytest item object of current test.
|
||||
:param lineno: Line number of the assert statement.
|
||||
:param orig: String with the original assertion.
|
||||
:param expl: String with the assert explanation.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Hooks for influencing reporting (invoked from _pytest_terminal).
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_report_header( # type:ignore[empty-body]
|
||||
config: "Config", start_path: Path, startdir: "LEGACY_PATH"
|
||||
) -> Union[str, List[str]]:
|
||||
"""Return a string or list of strings to be displayed as header info for terminal reporting.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param start_path: The starting dir.
|
||||
:param startdir: The starting dir (deprecated).
|
||||
|
||||
.. note::
|
||||
|
||||
Lines returned by a plugin are displayed before those of plugins which
|
||||
ran before it.
|
||||
If you want to have your line(s) displayed first, use
|
||||
:ref:`trylast=True <plugin-hookorder>`.
|
||||
|
||||
.. note::
|
||||
|
||||
This function should be implemented only in plugins or ``conftest.py``
|
||||
files situated at the tests root directory due to how pytest
|
||||
:ref:`discovers plugins during startup <pluginorder>`.
|
||||
|
||||
.. versionchanged:: 7.0.0
|
||||
The ``start_path`` parameter was added as a :class:`pathlib.Path`
|
||||
equivalent of the ``startdir`` parameter. The ``startdir`` parameter
|
||||
has been deprecated.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_report_collectionfinish( # type:ignore[empty-body]
|
||||
config: "Config",
|
||||
start_path: Path,
|
||||
startdir: "LEGACY_PATH",
|
||||
items: Sequence["Item"],
|
||||
) -> Union[str, List[str]]:
|
||||
"""Return a string or list of strings to be displayed after collection
|
||||
has finished successfully.
|
||||
|
||||
These strings will be displayed after the standard "collected X items" message.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param start_path: The starting dir.
|
||||
:param startdir: The starting dir (deprecated).
|
||||
:param items: List of pytest items that are going to be executed; this list should not be modified.
|
||||
|
||||
.. note::
|
||||
|
||||
Lines returned by a plugin are displayed before those of plugins which
|
||||
ran before it.
|
||||
If you want to have your line(s) displayed first, use
|
||||
:ref:`trylast=True <plugin-hookorder>`.
|
||||
|
||||
.. versionchanged:: 7.0.0
|
||||
The ``start_path`` parameter was added as a :class:`pathlib.Path`
|
||||
equivalent of the ``startdir`` parameter. The ``startdir`` parameter
|
||||
has been deprecated.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_teststatus( # type:ignore[empty-body]
|
||||
report: Union["CollectReport", "TestReport"], config: "Config"
|
||||
) -> "TestShortLogReport | Tuple[str, str, Union[str, Tuple[str, Mapping[str, bool]]]]":
|
||||
"""Return result-category, shortletter and verbose word for status
|
||||
reporting.
|
||||
|
||||
The result-category is a category in which to count the result, for
|
||||
example "passed", "skipped", "error" or the empty string.
|
||||
|
||||
The shortletter is shown as testing progresses, for example ".", "s",
|
||||
"E" or the empty string.
|
||||
|
||||
The verbose word is shown as testing progresses in verbose mode, for
|
||||
example "PASSED", "SKIPPED", "ERROR" or the empty string.
|
||||
|
||||
pytest may style these implicitly according to the report outcome.
|
||||
To provide explicit styling, return a tuple for the verbose word,
|
||||
for example ``"rerun", "R", ("RERUN", {"yellow": True})``.
|
||||
|
||||
:param report: The report object whose status is to be returned.
|
||||
:param config: The pytest config object.
|
||||
:returns: The test status.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_terminal_summary(
|
||||
terminalreporter: "TerminalReporter",
|
||||
exitstatus: "ExitCode",
|
||||
config: "Config",
|
||||
) -> None:
|
||||
"""Add a section to terminal summary reporting.
|
||||
|
||||
:param terminalreporter: The internal terminal reporter object.
|
||||
:param exitstatus: The exit status that will be reported back to the OS.
|
||||
:param config: The pytest config object.
|
||||
|
||||
.. versionadded:: 4.2
|
||||
The ``config`` parameter.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(historic=True)
|
||||
def pytest_warning_recorded(
|
||||
warning_message: "warnings.WarningMessage",
|
||||
when: "Literal['config', 'collect', 'runtest']",
|
||||
nodeid: str,
|
||||
location: Optional[Tuple[str, int, str]],
|
||||
) -> None:
|
||||
"""Process a warning captured by the internal pytest warnings plugin.
|
||||
|
||||
:param warning_message:
|
||||
The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains
|
||||
the same attributes as the parameters of :py:func:`warnings.showwarning`.
|
||||
|
||||
:param when:
|
||||
Indicates when the warning was captured. Possible values:
|
||||
|
||||
* ``"config"``: during pytest configuration/initialization stage.
|
||||
* ``"collect"``: during test collection.
|
||||
* ``"runtest"``: during test execution.
|
||||
|
||||
:param nodeid:
|
||||
Full id of the item.
|
||||
|
||||
:param location:
|
||||
When available, holds information about the execution context of the captured
|
||||
warning (filename, linenumber, function). ``function`` evaluates to <module>
|
||||
when the execution context is at the module level.
|
||||
|
||||
.. versionadded:: 6.0
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Hooks for influencing skipping
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_markeval_namespace( # type:ignore[empty-body]
|
||||
config: "Config",
|
||||
) -> Dict[str, Any]:
|
||||
"""Called when constructing the globals dictionary used for
|
||||
evaluating string conditions in xfail/skipif markers.
|
||||
|
||||
This is useful when the condition for a marker requires
|
||||
objects that are expensive or impossible to obtain during
|
||||
collection time, which is required by normal boolean
|
||||
conditions.
|
||||
|
||||
.. versionadded:: 6.2
|
||||
|
||||
:param config: The pytest config object.
|
||||
:returns: A dictionary of additional globals to add.
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# error handling and internal debugging hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
|
||||
def pytest_internalerror(
|
||||
excrepr: "ExceptionRepr",
|
||||
excinfo: "ExceptionInfo[BaseException]",
|
||||
) -> Optional[bool]:
|
||||
"""Called for internal errors.
|
||||
|
||||
Return True to suppress the fallback handling of printing an
|
||||
INTERNALERROR message directly to sys.stderr.
|
||||
|
||||
:param excrepr: The exception repr object.
|
||||
:param excinfo: The exception info.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_keyboard_interrupt(
|
||||
excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]",
|
||||
) -> None:
|
||||
"""Called for keyboard interrupt.
|
||||
|
||||
:param excinfo: The exception info.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_exception_interact(
|
||||
node: Union["Item", "Collector"],
|
||||
call: "CallInfo[Any]",
|
||||
report: Union["CollectReport", "TestReport"],
|
||||
) -> None:
|
||||
"""Called when an exception was raised which can potentially be
|
||||
interactively handled.
|
||||
|
||||
May be called during collection (see :hook:`pytest_make_collect_report`),
|
||||
in which case ``report`` is a :class:`CollectReport`.
|
||||
|
||||
May be called during runtest of an item (see :hook:`pytest_runtest_protocol`),
|
||||
in which case ``report`` is a :class:`TestReport`.
|
||||
|
||||
This hook is not called if the exception that was raised is an internal
|
||||
exception like ``skip.Exception``.
|
||||
|
||||
:param node:
|
||||
The item or collector.
|
||||
:param call:
|
||||
The call information. Contains the exception.
|
||||
:param report:
|
||||
The collection or test report.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
|
||||
"""Called upon pdb.set_trace().
|
||||
|
||||
Can be used by plugins to take special action just before the python
|
||||
debugger enters interactive mode.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param pdb: The Pdb instance.
|
||||
"""
|
||||
|
||||
|
||||
def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
|
||||
"""Called when leaving pdb (e.g. with continue after pdb.set_trace()).
|
||||
|
||||
Can be used by plugins to take special action just after the python
|
||||
debugger leaves interactive mode.
|
||||
|
||||
:param config: The pytest config object.
|
||||
:param pdb: The Pdb instance.
|
||||
"""
|
699
venv/lib/python3.11/site-packages/_pytest/junitxml.py
Normal file
699
venv/lib/python3.11/site-packages/_pytest/junitxml.py
Normal file
|
@ -0,0 +1,699 @@
|
|||
"""Report test results in JUnit-XML format, for use with Jenkins and build
|
||||
integration servers.
|
||||
|
||||
Based on initial code from Ross Lawley.
|
||||
|
||||
Output conforms to
|
||||
https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
|
||||
"""
|
||||
import functools
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
from datetime import datetime
|
||||
from typing import Callable
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Match
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from _pytest import nodes
|
||||
from _pytest import timing
|
||||
from _pytest._code.code import ExceptionRepr
|
||||
from _pytest._code.code import ReprFileLocation
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import filename_arg
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.reports import TestReport
|
||||
from _pytest.stash import StashKey
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
|
||||
xml_key = StashKey["LogXML"]()
|
||||
|
||||
|
||||
def bin_xml_escape(arg: object) -> str:
|
||||
r"""Visually escape invalid XML characters.
|
||||
|
||||
For example, transforms
|
||||
'hello\aworld\b'
|
||||
into
|
||||
'hello#x07world#x08'
|
||||
Note that the #xABs are *not* XML escapes - missing the ampersand «.
|
||||
The idea is to escape visually for the user rather than for XML itself.
|
||||
"""
|
||||
|
||||
def repl(matchobj: Match[str]) -> str:
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return "#x%02X" % i
|
||||
else:
|
||||
return "#x%04X" % i
|
||||
|
||||
# The spec range of valid chars is:
|
||||
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
|
||||
# For an unknown(?) reason, we disallow #x7F (DEL) as well.
|
||||
illegal_xml_re = (
|
||||
"[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]"
|
||||
)
|
||||
return re.sub(illegal_xml_re, repl, str(arg))
|
||||
|
||||
|
||||
def merge_family(left, right) -> None:
|
||||
result = {}
|
||||
for kl, vl in left.items():
|
||||
for kr, vr in right.items():
|
||||
if not isinstance(vl, list):
|
||||
raise TypeError(type(vl))
|
||||
result[kl] = vl + vr
|
||||
left.update(result)
|
||||
|
||||
|
||||
families = {}
|
||||
families["_base"] = {"testcase": ["classname", "name"]}
|
||||
families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
|
||||
|
||||
# xUnit 1.x inherits legacy attributes.
|
||||
families["xunit1"] = families["_base"].copy()
|
||||
merge_family(families["xunit1"], families["_base_legacy"])
|
||||
|
||||
# xUnit 2.x uses strict base attributes.
|
||||
families["xunit2"] = families["_base"]
|
||||
|
||||
|
||||
class _NodeReporter:
|
||||
def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None:
|
||||
self.id = nodeid
|
||||
self.xml = xml
|
||||
self.add_stats = self.xml.add_stats
|
||||
self.family = self.xml.family
|
||||
self.duration = 0.0
|
||||
self.properties: List[Tuple[str, str]] = []
|
||||
self.nodes: List[ET.Element] = []
|
||||
self.attrs: Dict[str, str] = {}
|
||||
|
||||
def append(self, node: ET.Element) -> None:
|
||||
self.xml.add_stats(node.tag)
|
||||
self.nodes.append(node)
|
||||
|
||||
def add_property(self, name: str, value: object) -> None:
|
||||
self.properties.append((str(name), bin_xml_escape(value)))
|
||||
|
||||
def add_attribute(self, name: str, value: object) -> None:
|
||||
self.attrs[str(name)] = bin_xml_escape(value)
|
||||
|
||||
def make_properties_node(self) -> Optional[ET.Element]:
|
||||
"""Return a Junit node containing custom properties, if any."""
|
||||
if self.properties:
|
||||
properties = ET.Element("properties")
|
||||
for name, value in self.properties:
|
||||
properties.append(ET.Element("property", name=name, value=value))
|
||||
return properties
|
||||
return None
|
||||
|
||||
def record_testreport(self, testreport: TestReport) -> None:
|
||||
names = mangle_test_address(testreport.nodeid)
|
||||
existing_attrs = self.attrs
|
||||
classnames = names[:-1]
|
||||
if self.xml.prefix:
|
||||
classnames.insert(0, self.xml.prefix)
|
||||
attrs: Dict[str, str] = {
|
||||
"classname": ".".join(classnames),
|
||||
"name": bin_xml_escape(names[-1]),
|
||||
"file": testreport.location[0],
|
||||
}
|
||||
if testreport.location[1] is not None:
|
||||
attrs["line"] = str(testreport.location[1])
|
||||
if hasattr(testreport, "url"):
|
||||
attrs["url"] = testreport.url
|
||||
self.attrs = attrs
|
||||
self.attrs.update(existing_attrs) # Restore any user-defined attributes.
|
||||
|
||||
# Preserve legacy testcase behavior.
|
||||
if self.family == "xunit1":
|
||||
return
|
||||
|
||||
# Filter out attributes not permitted by this test family.
|
||||
# Including custom attributes because they are not valid here.
|
||||
temp_attrs = {}
|
||||
for key in self.attrs.keys():
|
||||
if key in families[self.family]["testcase"]:
|
||||
temp_attrs[key] = self.attrs[key]
|
||||
self.attrs = temp_attrs
|
||||
|
||||
def to_xml(self) -> ET.Element:
|
||||
testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration)
|
||||
properties = self.make_properties_node()
|
||||
if properties is not None:
|
||||
testcase.append(properties)
|
||||
testcase.extend(self.nodes)
|
||||
return testcase
|
||||
|
||||
def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None:
|
||||
node = ET.Element(tag, message=message)
|
||||
node.text = bin_xml_escape(data)
|
||||
self.append(node)
|
||||
|
||||
def write_captured_output(self, report: TestReport) -> None:
|
||||
if not self.xml.log_passing_tests and report.passed:
|
||||
return
|
||||
|
||||
content_out = report.capstdout
|
||||
content_log = report.caplog
|
||||
content_err = report.capstderr
|
||||
if self.xml.logging == "no":
|
||||
return
|
||||
content_all = ""
|
||||
if self.xml.logging in ["log", "all"]:
|
||||
content_all = self._prepare_content(content_log, " Captured Log ")
|
||||
if self.xml.logging in ["system-out", "out-err", "all"]:
|
||||
content_all += self._prepare_content(content_out, " Captured Out ")
|
||||
self._write_content(report, content_all, "system-out")
|
||||
content_all = ""
|
||||
if self.xml.logging in ["system-err", "out-err", "all"]:
|
||||
content_all += self._prepare_content(content_err, " Captured Err ")
|
||||
self._write_content(report, content_all, "system-err")
|
||||
content_all = ""
|
||||
if content_all:
|
||||
self._write_content(report, content_all, "system-out")
|
||||
|
||||
def _prepare_content(self, content: str, header: str) -> str:
|
||||
return "\n".join([header.center(80, "-"), content, ""])
|
||||
|
||||
def _write_content(self, report: TestReport, content: str, jheader: str) -> None:
|
||||
tag = ET.Element(jheader)
|
||||
tag.text = bin_xml_escape(content)
|
||||
self.append(tag)
|
||||
|
||||
def append_pass(self, report: TestReport) -> None:
|
||||
self.add_stats("passed")
|
||||
|
||||
def append_failure(self, report: TestReport) -> None:
|
||||
# msg = str(report.longrepr.reprtraceback.extraline)
|
||||
if hasattr(report, "wasxfail"):
|
||||
self._add_simple("skipped", "xfail-marked test passes unexpectedly")
|
||||
else:
|
||||
assert report.longrepr is not None
|
||||
reprcrash: Optional[ReprFileLocation] = getattr(
|
||||
report.longrepr, "reprcrash", None
|
||||
)
|
||||
if reprcrash is not None:
|
||||
message = reprcrash.message
|
||||
else:
|
||||
message = str(report.longrepr)
|
||||
message = bin_xml_escape(message)
|
||||
self._add_simple("failure", message, str(report.longrepr))
|
||||
|
||||
def append_collect_error(self, report: TestReport) -> None:
|
||||
# msg = str(report.longrepr.reprtraceback.extraline)
|
||||
assert report.longrepr is not None
|
||||
self._add_simple("error", "collection failure", str(report.longrepr))
|
||||
|
||||
def append_collect_skipped(self, report: TestReport) -> None:
|
||||
self._add_simple("skipped", "collection skipped", str(report.longrepr))
|
||||
|
||||
def append_error(self, report: TestReport) -> None:
|
||||
assert report.longrepr is not None
|
||||
reprcrash: Optional[ReprFileLocation] = getattr(
|
||||
report.longrepr, "reprcrash", None
|
||||
)
|
||||
if reprcrash is not None:
|
||||
reason = reprcrash.message
|
||||
else:
|
||||
reason = str(report.longrepr)
|
||||
|
||||
if report.when == "teardown":
|
||||
msg = f'failed on teardown with "{reason}"'
|
||||
else:
|
||||
msg = f'failed on setup with "{reason}"'
|
||||
self._add_simple("error", bin_xml_escape(msg), str(report.longrepr))
|
||||
|
||||
def append_skipped(self, report: TestReport) -> None:
|
||||
if hasattr(report, "wasxfail"):
|
||||
xfailreason = report.wasxfail
|
||||
if xfailreason.startswith("reason: "):
|
||||
xfailreason = xfailreason[8:]
|
||||
xfailreason = bin_xml_escape(xfailreason)
|
||||
skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason)
|
||||
self.append(skipped)
|
||||
else:
|
||||
assert isinstance(report.longrepr, tuple)
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = skipreason[9:]
|
||||
details = f"{filename}:{lineno}: {skipreason}"
|
||||
|
||||
skipped = ET.Element("skipped", type="pytest.skip", message=skipreason)
|
||||
skipped.text = bin_xml_escape(details)
|
||||
self.append(skipped)
|
||||
self.write_captured_output(report)
|
||||
|
||||
def finalize(self) -> None:
|
||||
data = self.to_xml()
|
||||
self.__dict__.clear()
|
||||
# Type ignored because mypy doesn't like overriding a method.
|
||||
# Also the return value doesn't match...
|
||||
self.to_xml = lambda: data # type: ignore[assignment]
|
||||
|
||||
|
||||
def _warn_incompatibility_with_xunit2(
|
||||
request: FixtureRequest, fixture_name: str
|
||||
) -> None:
|
||||
"""Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions."""
|
||||
from _pytest.warning_types import PytestWarning
|
||||
|
||||
xml = request.config.stash.get(xml_key, None)
|
||||
if xml is not None and xml.family not in ("xunit1", "legacy"):
|
||||
request.node.warn(
|
||||
PytestWarning(
|
||||
"{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(
|
||||
fixture_name=fixture_name, family=xml.family
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def record_property(request: FixtureRequest) -> Callable[[str, object], None]:
|
||||
"""Add extra properties to the calling test.
|
||||
|
||||
User properties become part of the test report and are available to the
|
||||
configured reporters, like JUnit XML.
|
||||
|
||||
The fixture is callable with ``name, value``. The value is automatically
|
||||
XML-encoded.
|
||||
|
||||
Example::
|
||||
|
||||
def test_function(record_property):
|
||||
record_property("example_key", 1)
|
||||
"""
|
||||
_warn_incompatibility_with_xunit2(request, "record_property")
|
||||
|
||||
def append_property(name: str, value: object) -> None:
|
||||
request.node.user_properties.append((name, value))
|
||||
|
||||
return append_property
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]:
|
||||
"""Add extra xml attributes to the tag for the calling test.
|
||||
|
||||
The fixture is callable with ``name, value``. The value is
|
||||
automatically XML-encoded.
|
||||
"""
|
||||
from _pytest.warning_types import PytestExperimentalApiWarning
|
||||
|
||||
request.node.warn(
|
||||
PytestExperimentalApiWarning("record_xml_attribute is an experimental feature")
|
||||
)
|
||||
|
||||
_warn_incompatibility_with_xunit2(request, "record_xml_attribute")
|
||||
|
||||
# Declare noop
|
||||
def add_attr_noop(name: str, value: object) -> None:
|
||||
pass
|
||||
|
||||
attr_func = add_attr_noop
|
||||
|
||||
xml = request.config.stash.get(xml_key, None)
|
||||
if xml is not None:
|
||||
node_reporter = xml.node_reporter(request.node.nodeid)
|
||||
attr_func = node_reporter.add_attribute
|
||||
|
||||
return attr_func
|
||||
|
||||
|
||||
def _check_record_param_type(param: str, v: str) -> None:
|
||||
"""Used by record_testsuite_property to check that the given parameter name is of the proper
|
||||
type."""
|
||||
__tracebackhide__ = True
|
||||
if not isinstance(v, str):
|
||||
msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable]
|
||||
raise TypeError(msg.format(param=param, g=type(v).__name__))
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]:
|
||||
"""Record a new ``<property>`` tag as child of the root ``<testsuite>``.
|
||||
|
||||
This is suitable to writing global information regarding the entire test
|
||||
suite, and is compatible with ``xunit2`` JUnit family.
|
||||
|
||||
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_foo(record_testsuite_property):
|
||||
record_testsuite_property("ARCH", "PPC")
|
||||
record_testsuite_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
:param name:
|
||||
The property name.
|
||||
:param value:
|
||||
The property value. Will be converted to a string.
|
||||
|
||||
.. warning::
|
||||
|
||||
Currently this fixture **does not work** with the
|
||||
`pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See
|
||||
:issue:`7767` for details.
|
||||
"""
|
||||
|
||||
__tracebackhide__ = True
|
||||
|
||||
def record_func(name: str, value: object) -> None:
|
||||
"""No-op function in case --junitxml was not passed in the command-line."""
|
||||
__tracebackhide__ = True
|
||||
_check_record_param_type("name", name)
|
||||
|
||||
xml = request.config.stash.get(xml_key, None)
|
||||
if xml is not None:
|
||||
record_func = xml.add_global_property # noqa
|
||||
return record_func
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
group = parser.getgroup("terminal reporting")
|
||||
group.addoption(
|
||||
"--junitxml",
|
||||
"--junit-xml",
|
||||
action="store",
|
||||
dest="xmlpath",
|
||||
metavar="path",
|
||||
type=functools.partial(filename_arg, optname="--junitxml"),
|
||||
default=None,
|
||||
help="Create junit-xml style report file at given path",
|
||||
)
|
||||
group.addoption(
|
||||
"--junitprefix",
|
||||
"--junit-prefix",
|
||||
action="store",
|
||||
metavar="str",
|
||||
default=None,
|
||||
help="Prepend prefix to classnames in junit-xml output",
|
||||
)
|
||||
parser.addini(
|
||||
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
|
||||
)
|
||||
parser.addini(
|
||||
"junit_logging",
|
||||
"Write captured log messages to JUnit report: "
|
||||
"one of no|log|system-out|system-err|out-err|all",
|
||||
default="no",
|
||||
)
|
||||
parser.addini(
|
||||
"junit_log_passing_tests",
|
||||
"Capture log information for passing tests to JUnit report: ",
|
||||
type="bool",
|
||||
default=True,
|
||||
)
|
||||
parser.addini(
|
||||
"junit_duration_report",
|
||||
"Duration time to report: one of total|call",
|
||||
default="total",
|
||||
) # choices=['total', 'call'])
|
||||
parser.addini(
|
||||
"junit_family",
|
||||
"Emit XML for schema: one of legacy|xunit1|xunit2",
|
||||
default="xunit2",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: Config) -> None:
|
||||
xmlpath = config.option.xmlpath
|
||||
# Prevent opening xmllog on worker nodes (xdist).
|
||||
if xmlpath and not hasattr(config, "workerinput"):
|
||||
junit_family = config.getini("junit_family")
|
||||
config.stash[xml_key] = LogXML(
|
||||
xmlpath,
|
||||
config.option.junitprefix,
|
||||
config.getini("junit_suite_name"),
|
||||
config.getini("junit_logging"),
|
||||
config.getini("junit_duration_report"),
|
||||
junit_family,
|
||||
config.getini("junit_log_passing_tests"),
|
||||
)
|
||||
config.pluginmanager.register(config.stash[xml_key])
|
||||
|
||||
|
||||
def pytest_unconfigure(config: Config) -> None:
|
||||
xml = config.stash.get(xml_key, None)
|
||||
if xml:
|
||||
del config.stash[xml_key]
|
||||
config.pluginmanager.unregister(xml)
|
||||
|
||||
|
||||
def mangle_test_address(address: str) -> List[str]:
|
||||
path, possible_open_bracket, params = address.partition("[")
|
||||
names = path.split("::")
|
||||
# Convert file path to dotted path.
|
||||
names[0] = names[0].replace(nodes.SEP, ".")
|
||||
names[0] = re.sub(r"\.py$", "", names[0])
|
||||
# Put any params back.
|
||||
names[-1] += possible_open_bracket + params
|
||||
return names
|
||||
|
||||
|
||||
class LogXML:
|
||||
def __init__(
|
||||
self,
|
||||
logfile,
|
||||
prefix: Optional[str],
|
||||
suite_name: str = "pytest",
|
||||
logging: str = "no",
|
||||
report_duration: str = "total",
|
||||
family="xunit1",
|
||||
log_passing_tests: bool = True,
|
||||
) -> None:
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
self.logfile = os.path.normpath(os.path.abspath(logfile))
|
||||
self.prefix = prefix
|
||||
self.suite_name = suite_name
|
||||
self.logging = logging
|
||||
self.log_passing_tests = log_passing_tests
|
||||
self.report_duration = report_duration
|
||||
self.family = family
|
||||
self.stats: Dict[str, int] = dict.fromkeys(
|
||||
["error", "passed", "failure", "skipped"], 0
|
||||
)
|
||||
self.node_reporters: Dict[
|
||||
Tuple[Union[str, TestReport], object], _NodeReporter
|
||||
] = {}
|
||||
self.node_reporters_ordered: List[_NodeReporter] = []
|
||||
self.global_properties: List[Tuple[str, str]] = []
|
||||
|
||||
# List of reports that failed on call but teardown is pending.
|
||||
self.open_reports: List[TestReport] = []
|
||||
self.cnt_double_fail_tests = 0
|
||||
|
||||
# Replaces convenience family with real family.
|
||||
if self.family == "legacy":
|
||||
self.family = "xunit1"
|
||||
|
||||
def finalize(self, report: TestReport) -> None:
|
||||
nodeid = getattr(report, "nodeid", report)
|
||||
# Local hack to handle xdist report order.
|
||||
workernode = getattr(report, "node", None)
|
||||
reporter = self.node_reporters.pop((nodeid, workernode))
|
||||
if reporter is not None:
|
||||
reporter.finalize()
|
||||
|
||||
def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter:
|
||||
nodeid: Union[str, TestReport] = getattr(report, "nodeid", report)
|
||||
# Local hack to handle xdist report order.
|
||||
workernode = getattr(report, "node", None)
|
||||
|
||||
key = nodeid, workernode
|
||||
|
||||
if key in self.node_reporters:
|
||||
# TODO: breaks for --dist=each
|
||||
return self.node_reporters[key]
|
||||
|
||||
reporter = _NodeReporter(nodeid, self)
|
||||
|
||||
self.node_reporters[key] = reporter
|
||||
self.node_reporters_ordered.append(reporter)
|
||||
|
||||
return reporter
|
||||
|
||||
def add_stats(self, key: str) -> None:
|
||||
if key in self.stats:
|
||||
self.stats[key] += 1
|
||||
|
||||
def _opentestcase(self, report: TestReport) -> _NodeReporter:
|
||||
reporter = self.node_reporter(report)
|
||||
reporter.record_testreport(report)
|
||||
return reporter
|
||||
|
||||
def pytest_runtest_logreport(self, report: TestReport) -> None:
|
||||
"""Handle a setup/call/teardown report, generating the appropriate
|
||||
XML tags as necessary.
|
||||
|
||||
Note: due to plugins like xdist, this hook may be called in interlaced
|
||||
order with reports from other nodes. For example:
|
||||
|
||||
Usual call order:
|
||||
-> setup node1
|
||||
-> call node1
|
||||
-> teardown node1
|
||||
-> setup node2
|
||||
-> call node2
|
||||
-> teardown node2
|
||||
|
||||
Possible call order in xdist:
|
||||
-> setup node1
|
||||
-> call node1
|
||||
-> setup node2
|
||||
-> call node2
|
||||
-> teardown node2
|
||||
-> teardown node1
|
||||
"""
|
||||
close_report = None
|
||||
if report.passed:
|
||||
if report.when == "call": # ignore setup/teardown
|
||||
reporter = self._opentestcase(report)
|
||||
reporter.append_pass(report)
|
||||
elif report.failed:
|
||||
if report.when == "teardown":
|
||||
# The following vars are needed when xdist plugin is used.
|
||||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(
|
||||
rep
|
||||
for rep in self.open_reports
|
||||
if (
|
||||
rep.nodeid == report.nodeid
|
||||
and getattr(rep, "item_index", None) == report_ii
|
||||
and getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if close_report:
|
||||
# We need to open new testcase in case we have failure in
|
||||
# call and error in teardown in order to follow junit
|
||||
# schema.
|
||||
self.finalize(close_report)
|
||||
self.cnt_double_fail_tests += 1
|
||||
reporter = self._opentestcase(report)
|
||||
if report.when == "call":
|
||||
reporter.append_failure(report)
|
||||
self.open_reports.append(report)
|
||||
if not self.log_passing_tests:
|
||||
reporter.write_captured_output(report)
|
||||
else:
|
||||
reporter.append_error(report)
|
||||
elif report.skipped:
|
||||
reporter = self._opentestcase(report)
|
||||
reporter.append_skipped(report)
|
||||
self.update_testcase_duration(report)
|
||||
if report.when == "teardown":
|
||||
reporter = self._opentestcase(report)
|
||||
reporter.write_captured_output(report)
|
||||
|
||||
for propname, propvalue in report.user_properties:
|
||||
reporter.add_property(propname, str(propvalue))
|
||||
|
||||
self.finalize(report)
|
||||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(
|
||||
rep
|
||||
for rep in self.open_reports
|
||||
if (
|
||||
rep.nodeid == report.nodeid
|
||||
and getattr(rep, "item_index", None) == report_ii
|
||||
and getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if close_report:
|
||||
self.open_reports.remove(close_report)
|
||||
|
||||
def update_testcase_duration(self, report: TestReport) -> None:
|
||||
"""Accumulate total duration for nodeid from given report and update
|
||||
the Junit.testcase with the new total if already created."""
|
||||
if self.report_duration == "total" or report.when == self.report_duration:
|
||||
reporter = self.node_reporter(report)
|
||||
reporter.duration += getattr(report, "duration", 0.0)
|
||||
|
||||
def pytest_collectreport(self, report: TestReport) -> None:
|
||||
if not report.passed:
|
||||
reporter = self._opentestcase(report)
|
||||
if report.failed:
|
||||
reporter.append_collect_error(report)
|
||||
else:
|
||||
reporter.append_collect_skipped(report)
|
||||
|
||||
def pytest_internalerror(self, excrepr: ExceptionRepr) -> None:
|
||||
reporter = self.node_reporter("internal")
|
||||
reporter.attrs.update(classname="pytest", name="internal")
|
||||
reporter._add_simple("error", "internal error", str(excrepr))
|
||||
|
||||
def pytest_sessionstart(self) -> None:
|
||||
self.suite_start_time = timing.time()
|
||||
|
||||
def pytest_sessionfinish(self) -> None:
|
||||
dirname = os.path.dirname(os.path.abspath(self.logfile))
|
||||
# exist_ok avoids filesystem race conditions between checking path existence and requesting creation
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
|
||||
with open(self.logfile, "w", encoding="utf-8") as logfile:
|
||||
suite_stop_time = timing.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
|
||||
numtests = (
|
||||
self.stats["passed"]
|
||||
+ self.stats["failure"]
|
||||
+ self.stats["skipped"]
|
||||
+ self.stats["error"]
|
||||
- self.cnt_double_fail_tests
|
||||
)
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
suite_node = ET.Element(
|
||||
"testsuite",
|
||||
name=self.suite_name,
|
||||
errors=str(self.stats["error"]),
|
||||
failures=str(self.stats["failure"]),
|
||||
skipped=str(self.stats["skipped"]),
|
||||
tests=str(numtests),
|
||||
time="%.3f" % suite_time_delta,
|
||||
timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),
|
||||
hostname=platform.node(),
|
||||
)
|
||||
global_properties = self._get_global_properties_node()
|
||||
if global_properties is not None:
|
||||
suite_node.append(global_properties)
|
||||
for node_reporter in self.node_reporters_ordered:
|
||||
suite_node.append(node_reporter.to_xml())
|
||||
testsuites = ET.Element("testsuites")
|
||||
testsuites.append(suite_node)
|
||||
logfile.write(ET.tostring(testsuites, encoding="unicode"))
|
||||
|
||||
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
|
||||
terminalreporter.write_sep("-", f"generated xml file: {self.logfile}")
|
||||
|
||||
def add_global_property(self, name: str, value: object) -> None:
|
||||
__tracebackhide__ = True
|
||||
_check_record_param_type("name", name)
|
||||
self.global_properties.append((name, bin_xml_escape(value)))
|
||||
|
||||
def _get_global_properties_node(self) -> Optional[ET.Element]:
|
||||
"""Return a Junit node containing custom properties, if any."""
|
||||
if self.global_properties:
|
||||
properties = ET.Element("properties")
|
||||
for name, value in self.global_properties:
|
||||
properties.append(ET.Element("property", name=name, value=value))
|
||||
return properties
|
||||
return None
|
479
venv/lib/python3.11/site-packages/_pytest/legacypath.py
Normal file
479
venv/lib/python3.11/site-packages/_pytest/legacypath.py
Normal file
|
@ -0,0 +1,479 @@
|
|||
"""Add backward compatibility support for the legacy py path type."""
|
||||
import dataclasses
|
||||
import shlex
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from iniconfig import SectionWrapper
|
||||
|
||||
from _pytest.cacheprovider import Cache
|
||||
from _pytest.compat import final
|
||||
from _pytest.compat import LEGACY_PATH
|
||||
from _pytest.compat import legacy_path
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.config import PytestPluginManager
|
||||
from _pytest.deprecated import check_ispytest
|
||||
from _pytest.fixtures import fixture
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.main import Session
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.nodes import Node
|
||||
from _pytest.pytester import HookRecorder
|
||||
from _pytest.pytester import Pytester
|
||||
from _pytest.pytester import RunResult
|
||||
from _pytest.terminal import TerminalReporter
|
||||
from _pytest.tmpdir import TempPathFactory
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import Final
|
||||
|
||||
import pexpect
|
||||
|
||||
|
||||
@final
|
||||
class Testdir:
|
||||
"""
|
||||
Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead.
|
||||
|
||||
All methods just forward to an internal :class:`Pytester` instance, converting results
|
||||
to `legacy_path` objects as necessary.
|
||||
"""
|
||||
|
||||
__test__ = False
|
||||
|
||||
CLOSE_STDIN: "Final" = Pytester.CLOSE_STDIN
|
||||
TimeoutExpired: "Final" = Pytester.TimeoutExpired
|
||||
|
||||
def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self._pytester = pytester
|
||||
|
||||
@property
|
||||
def tmpdir(self) -> LEGACY_PATH:
|
||||
"""Temporary directory where tests are executed."""
|
||||
return legacy_path(self._pytester.path)
|
||||
|
||||
@property
|
||||
def test_tmproot(self) -> LEGACY_PATH:
|
||||
return legacy_path(self._pytester._test_tmproot)
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
return self._pytester._request
|
||||
|
||||
@property
|
||||
def plugins(self):
|
||||
return self._pytester.plugins
|
||||
|
||||
@plugins.setter
|
||||
def plugins(self, plugins):
|
||||
self._pytester.plugins = plugins
|
||||
|
||||
@property
|
||||
def monkeypatch(self) -> MonkeyPatch:
|
||||
return self._pytester._monkeypatch
|
||||
|
||||
def make_hook_recorder(self, pluginmanager) -> HookRecorder:
|
||||
"""See :meth:`Pytester.make_hook_recorder`."""
|
||||
return self._pytester.make_hook_recorder(pluginmanager)
|
||||
|
||||
def chdir(self) -> None:
|
||||
"""See :meth:`Pytester.chdir`."""
|
||||
return self._pytester.chdir()
|
||||
|
||||
def finalize(self) -> None:
|
||||
"""See :meth:`Pytester._finalize`."""
|
||||
return self._pytester._finalize()
|
||||
|
||||
def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.makefile`."""
|
||||
if ext and not ext.startswith("."):
|
||||
# pytester.makefile is going to throw a ValueError in a way that
|
||||
# testdir.makefile did not, because
|
||||
# pathlib.Path is stricter suffixes than py.path
|
||||
# This ext arguments is likely user error, but since testdir has
|
||||
# allowed this, we will prepend "." as a workaround to avoid breaking
|
||||
# testdir usage that worked before
|
||||
ext = "." + ext
|
||||
return legacy_path(self._pytester.makefile(ext, *args, **kwargs))
|
||||
|
||||
def makeconftest(self, source) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.makeconftest`."""
|
||||
return legacy_path(self._pytester.makeconftest(source))
|
||||
|
||||
def makeini(self, source) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.makeini`."""
|
||||
return legacy_path(self._pytester.makeini(source))
|
||||
|
||||
def getinicfg(self, source: str) -> SectionWrapper:
|
||||
"""See :meth:`Pytester.getinicfg`."""
|
||||
return self._pytester.getinicfg(source)
|
||||
|
||||
def makepyprojecttoml(self, source) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.makepyprojecttoml`."""
|
||||
return legacy_path(self._pytester.makepyprojecttoml(source))
|
||||
|
||||
def makepyfile(self, *args, **kwargs) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.makepyfile`."""
|
||||
return legacy_path(self._pytester.makepyfile(*args, **kwargs))
|
||||
|
||||
def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.maketxtfile`."""
|
||||
return legacy_path(self._pytester.maketxtfile(*args, **kwargs))
|
||||
|
||||
def syspathinsert(self, path=None) -> None:
|
||||
"""See :meth:`Pytester.syspathinsert`."""
|
||||
return self._pytester.syspathinsert(path)
|
||||
|
||||
def mkdir(self, name) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.mkdir`."""
|
||||
return legacy_path(self._pytester.mkdir(name))
|
||||
|
||||
def mkpydir(self, name) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.mkpydir`."""
|
||||
return legacy_path(self._pytester.mkpydir(name))
|
||||
|
||||
def copy_example(self, name=None) -> LEGACY_PATH:
|
||||
"""See :meth:`Pytester.copy_example`."""
|
||||
return legacy_path(self._pytester.copy_example(name))
|
||||
|
||||
def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]:
|
||||
"""See :meth:`Pytester.getnode`."""
|
||||
return self._pytester.getnode(config, arg)
|
||||
|
||||
def getpathnode(self, path):
|
||||
"""See :meth:`Pytester.getpathnode`."""
|
||||
return self._pytester.getpathnode(path)
|
||||
|
||||
def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]:
|
||||
"""See :meth:`Pytester.genitems`."""
|
||||
return self._pytester.genitems(colitems)
|
||||
|
||||
def runitem(self, source):
|
||||
"""See :meth:`Pytester.runitem`."""
|
||||
return self._pytester.runitem(source)
|
||||
|
||||
def inline_runsource(self, source, *cmdlineargs):
|
||||
"""See :meth:`Pytester.inline_runsource`."""
|
||||
return self._pytester.inline_runsource(source, *cmdlineargs)
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
"""See :meth:`Pytester.inline_genitems`."""
|
||||
return self._pytester.inline_genitems(*args)
|
||||
|
||||
def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False):
|
||||
"""See :meth:`Pytester.inline_run`."""
|
||||
return self._pytester.inline_run(
|
||||
*args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc
|
||||
)
|
||||
|
||||
def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
|
||||
"""See :meth:`Pytester.runpytest_inprocess`."""
|
||||
return self._pytester.runpytest_inprocess(*args, **kwargs)
|
||||
|
||||
def runpytest(self, *args, **kwargs) -> RunResult:
|
||||
"""See :meth:`Pytester.runpytest`."""
|
||||
return self._pytester.runpytest(*args, **kwargs)
|
||||
|
||||
def parseconfig(self, *args) -> Config:
|
||||
"""See :meth:`Pytester.parseconfig`."""
|
||||
return self._pytester.parseconfig(*args)
|
||||
|
||||
def parseconfigure(self, *args) -> Config:
|
||||
"""See :meth:`Pytester.parseconfigure`."""
|
||||
return self._pytester.parseconfigure(*args)
|
||||
|
||||
def getitem(self, source, funcname="test_func"):
|
||||
"""See :meth:`Pytester.getitem`."""
|
||||
return self._pytester.getitem(source, funcname)
|
||||
|
||||
def getitems(self, source):
|
||||
"""See :meth:`Pytester.getitems`."""
|
||||
return self._pytester.getitems(source)
|
||||
|
||||
def getmodulecol(self, source, configargs=(), withinit=False):
|
||||
"""See :meth:`Pytester.getmodulecol`."""
|
||||
return self._pytester.getmodulecol(
|
||||
source, configargs=configargs, withinit=withinit
|
||||
)
|
||||
|
||||
def collect_by_name(
|
||||
self, modcol: Collector, name: str
|
||||
) -> Optional[Union[Item, Collector]]:
|
||||
"""See :meth:`Pytester.collect_by_name`."""
|
||||
return self._pytester.collect_by_name(modcol, name)
|
||||
|
||||
def popen(
|
||||
self,
|
||||
cmdargs,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=CLOSE_STDIN,
|
||||
**kw,
|
||||
):
|
||||
"""See :meth:`Pytester.popen`."""
|
||||
return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw)
|
||||
|
||||
def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult:
|
||||
"""See :meth:`Pytester.run`."""
|
||||
return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin)
|
||||
|
||||
def runpython(self, script) -> RunResult:
|
||||
"""See :meth:`Pytester.runpython`."""
|
||||
return self._pytester.runpython(script)
|
||||
|
||||
def runpython_c(self, command):
|
||||
"""See :meth:`Pytester.runpython_c`."""
|
||||
return self._pytester.runpython_c(command)
|
||||
|
||||
def runpytest_subprocess(self, *args, timeout=None) -> RunResult:
|
||||
"""See :meth:`Pytester.runpytest_subprocess`."""
|
||||
return self._pytester.runpytest_subprocess(*args, timeout=timeout)
|
||||
|
||||
def spawn_pytest(
|
||||
self, string: str, expect_timeout: float = 10.0
|
||||
) -> "pexpect.spawn":
|
||||
"""See :meth:`Pytester.spawn_pytest`."""
|
||||
return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout)
|
||||
|
||||
def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
|
||||
"""See :meth:`Pytester.spawn`."""
|
||||
return self._pytester.spawn(cmd, expect_timeout=expect_timeout)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Testdir {self.tmpdir!r}>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.tmpdir)
|
||||
|
||||
|
||||
class LegacyTestdirPlugin:
|
||||
@staticmethod
|
||||
@fixture
|
||||
def testdir(pytester: Pytester) -> Testdir:
|
||||
"""
|
||||
Identical to :fixture:`pytester`, and provides an instance whose methods return
|
||||
legacy ``LEGACY_PATH`` objects instead when applicable.
|
||||
|
||||
New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`.
|
||||
"""
|
||||
return Testdir(pytester, _ispytest=True)
|
||||
|
||||
|
||||
@final
|
||||
@dataclasses.dataclass
|
||||
class TempdirFactory:
|
||||
"""Backward compatibility wrapper that implements :class:`py.path.local`
|
||||
for :class:`TempPathFactory`.
|
||||
|
||||
.. note::
|
||||
These days, it is preferred to use ``tmp_path_factory``.
|
||||
|
||||
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||
|
||||
"""
|
||||
|
||||
_tmppath_factory: TempPathFactory
|
||||
|
||||
def __init__(
|
||||
self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False
|
||||
) -> None:
|
||||
check_ispytest(_ispytest)
|
||||
self._tmppath_factory = tmppath_factory
|
||||
|
||||
def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
|
||||
"""Same as :meth:`TempPathFactory.mktemp`, but returns a :class:`py.path.local` object."""
|
||||
return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
|
||||
|
||||
def getbasetemp(self) -> LEGACY_PATH:
|
||||
"""Same as :meth:`TempPathFactory.getbasetemp`, but returns a :class:`py.path.local` object."""
|
||||
return legacy_path(self._tmppath_factory.getbasetemp().resolve())
|
||||
|
||||
|
||||
class LegacyTmpdirPlugin:
|
||||
@staticmethod
|
||||
@fixture(scope="session")
|
||||
def tmpdir_factory(request: FixtureRequest) -> TempdirFactory:
|
||||
"""Return a :class:`pytest.TempdirFactory` instance for the test session."""
|
||||
# Set dynamically by pytest_configure().
|
||||
return request.config._tmpdirhandler # type: ignore
|
||||
|
||||
@staticmethod
|
||||
@fixture
|
||||
def tmpdir(tmp_path: Path) -> LEGACY_PATH:
|
||||
"""Return a temporary directory path object which is unique to each test
|
||||
function invocation, created as a sub directory of the base temporary
|
||||
directory.
|
||||
|
||||
By default, a new base temporary directory is created each test session,
|
||||
and old bases are removed after 3 sessions, to aid in debugging. If
|
||||
``--basetemp`` is used then it is cleared each session. See :ref:`base
|
||||
temporary directory`.
|
||||
|
||||
The returned object is a `legacy_path`_ object.
|
||||
|
||||
.. note::
|
||||
These days, it is preferred to use ``tmp_path``.
|
||||
|
||||
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
|
||||
|
||||
.. _legacy_path: https://py.readthedocs.io/en/latest/path.html
|
||||
"""
|
||||
return legacy_path(tmp_path)
|
||||
|
||||
|
||||
def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH:
|
||||
"""Return a directory path object with the given name.
|
||||
|
||||
Same as :func:`mkdir`, but returns a legacy py path instance.
|
||||
"""
|
||||
return legacy_path(self.mkdir(name))
|
||||
|
||||
|
||||
def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH:
|
||||
"""(deprecated) The file system path of the test module which collected this test."""
|
||||
return legacy_path(self.path)
|
||||
|
||||
|
||||
def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH:
|
||||
"""The directory from which pytest was invoked.
|
||||
|
||||
Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
|
||||
|
||||
:type: LEGACY_PATH
|
||||
"""
|
||||
return legacy_path(self.startpath)
|
||||
|
||||
|
||||
def Config_invocation_dir(self: Config) -> LEGACY_PATH:
|
||||
"""The directory from which pytest was invoked.
|
||||
|
||||
Prefer to use :attr:`invocation_params.dir <InvocationParams.dir>`,
|
||||
which is a :class:`pathlib.Path`.
|
||||
|
||||
:type: LEGACY_PATH
|
||||
"""
|
||||
return legacy_path(str(self.invocation_params.dir))
|
||||
|
||||
|
||||
def Config_rootdir(self: Config) -> LEGACY_PATH:
|
||||
"""The path to the :ref:`rootdir <rootdir>`.
|
||||
|
||||
Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`.
|
||||
|
||||
:type: LEGACY_PATH
|
||||
"""
|
||||
return legacy_path(str(self.rootpath))
|
||||
|
||||
|
||||
def Config_inifile(self: Config) -> Optional[LEGACY_PATH]:
|
||||
"""The path to the :ref:`configfile <configfiles>`.
|
||||
|
||||
Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`.
|
||||
|
||||
:type: Optional[LEGACY_PATH]
|
||||
"""
|
||||
return legacy_path(str(self.inipath)) if self.inipath else None
|
||||
|
||||
|
||||
def Session_stardir(self: Session) -> LEGACY_PATH:
|
||||
"""The path from which pytest was invoked.
|
||||
|
||||
Prefer to use ``startpath`` which is a :class:`pathlib.Path`.
|
||||
|
||||
:type: LEGACY_PATH
|
||||
"""
|
||||
return legacy_path(self.startpath)
|
||||
|
||||
|
||||
def Config__getini_unknown_type(
|
||||
self, name: str, type: str, value: Union[str, List[str]]
|
||||
):
|
||||
if type == "pathlist":
|
||||
# TODO: This assert is probably not valid in all cases.
|
||||
assert self.inipath is not None
|
||||
dp = self.inipath.parent
|
||||
input_values = shlex.split(value) if isinstance(value, str) else value
|
||||
return [legacy_path(str(dp / x)) for x in input_values]
|
||||
else:
|
||||
raise ValueError(f"unknown configuration type: {type}", value)
|
||||
|
||||
|
||||
def Node_fspath(self: Node) -> LEGACY_PATH:
|
||||
"""(deprecated) returns a legacy_path copy of self.path"""
|
||||
return legacy_path(self.path)
|
||||
|
||||
|
||||
def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None:
|
||||
self.path = Path(value)
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_load_initial_conftests(early_config: Config) -> None:
|
||||
"""Monkeypatch legacy path attributes in several classes, as early as possible."""
|
||||
mp = MonkeyPatch()
|
||||
early_config.add_cleanup(mp.undo)
|
||||
|
||||
# Add Cache.makedir().
|
||||
mp.setattr(Cache, "makedir", Cache_makedir, raising=False)
|
||||
|
||||
# Add FixtureRequest.fspath property.
|
||||
mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False)
|
||||
|
||||
# Add TerminalReporter.startdir property.
|
||||
mp.setattr(
|
||||
TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False
|
||||
)
|
||||
|
||||
# Add Config.{invocation_dir,rootdir,inifile} properties.
|
||||
mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False)
|
||||
mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False)
|
||||
mp.setattr(Config, "inifile", property(Config_inifile), raising=False)
|
||||
|
||||
# Add Session.startdir property.
|
||||
mp.setattr(Session, "startdir", property(Session_stardir), raising=False)
|
||||
|
||||
# Add pathlist configuration type.
|
||||
mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type)
|
||||
|
||||
# Add Node.fspath property.
|
||||
mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False)
|
||||
|
||||
|
||||
@hookimpl
|
||||
def pytest_configure(config: Config) -> None:
|
||||
"""Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed."""
|
||||
if config.pluginmanager.has_plugin("tmpdir"):
|
||||
mp = MonkeyPatch()
|
||||
config.add_cleanup(mp.undo)
|
||||
# Create TmpdirFactory and attach it to the config object.
|
||||
#
|
||||
# This is to comply with existing plugins which expect the handler to be
|
||||
# available at pytest_configure time, but ideally should be moved entirely
|
||||
# to the tmpdir_factory session fixture.
|
||||
try:
|
||||
tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined]
|
||||
except AttributeError:
|
||||
# tmpdir plugin is blocked.
|
||||
pass
|
||||
else:
|
||||
_tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True)
|
||||
mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False)
|
||||
|
||||
config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir")
|
||||
|
||||
|
||||
@hookimpl
|
||||
def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None:
|
||||
# pytester is not loaded by default and is commonly loaded from a conftest,
|
||||
# so checking for it in `pytest_configure` is not enough.
|
||||
is_pytester = plugin is manager.get_plugin("pytester")
|
||||
if is_pytester and not manager.is_registered(LegacyTestdirPlugin):
|
||||
manager.register(LegacyTestdirPlugin, "legacypath-pytester")
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue