1
0
Fork 0
forked from bton/matekasse

tests versuch 2

This commit is contained in:
2000-Trek 2023-07-28 23:30:45 +02:00
parent fdf385fe06
commit c88f7df83a
2363 changed files with 408191 additions and 0 deletions

View file

@ -0,0 +1,6 @@
from .serving import run_simple as run_simple
from .test import Client as Client
from .wrappers import Request as Request
from .wrappers import Response as Response
__version__ = "2.3.6"

View file

@ -0,0 +1,341 @@
from __future__ import annotations
import logging
import operator
import re
import sys
import typing as t
from datetime import datetime
from datetime import timezone
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIEnvironment
from .wrappers.request import Request
_logger: logging.Logger | None = None
class _Missing:
def __repr__(self) -> str:
return "no value"
def __reduce__(self) -> str:
return "_missing"
_missing = _Missing()
@t.overload
def _make_encode_wrapper(reference: str) -> t.Callable[[str], str]:
...
@t.overload
def _make_encode_wrapper(reference: bytes) -> t.Callable[[str], bytes]:
...
def _make_encode_wrapper(reference: t.AnyStr) -> t.Callable[[str], t.AnyStr]:
"""Create a function that will be called with a string argument. If
the reference is bytes, values will be encoded to bytes.
"""
if isinstance(reference, str):
return lambda x: x
return operator.methodcaller("encode", "latin1")
def _check_str_tuple(value: tuple[t.AnyStr, ...]) -> None:
"""Ensure tuple items are all strings or all bytes."""
if not value:
return
item_type = str if isinstance(value[0], str) else bytes
if any(not isinstance(item, item_type) for item in value):
raise TypeError(f"Cannot mix str and bytes arguments (got {value!r})")
_default_encoding = sys.getdefaultencoding()
def _to_bytes(
x: str | bytes, charset: str = _default_encoding, errors: str = "strict"
) -> bytes:
if x is None or isinstance(x, bytes):
return x
if isinstance(x, (bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
@t.overload
def _to_str( # type: ignore
x: None,
charset: str | None = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
@t.overload
def _to_str(
x: t.Any,
charset: str | None = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Any | None,
charset: str | None = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> str | bytes | None:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
def _wsgi_decoding_dance(
s: str, charset: str = "utf-8", errors: str = "replace"
) -> str:
return s.encode("latin1").decode(charset, errors)
def _wsgi_encoding_dance(s: str, charset: str = "utf-8", errors: str = "strict") -> str:
return s.encode(charset).decode("latin1", errors)
def _get_environ(obj: WSGIEnvironment | Request) -> WSGIEnvironment:
env = getattr(obj, "environ", obj)
assert isinstance(
env, dict
), f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
return env
def _has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
class _ColorStreamHandler(logging.StreamHandler):
"""On Windows, wrap stream with Colorama for ANSI style support."""
def __init__(self) -> None:
try:
import colorama
except ImportError:
stream = None
else:
stream = colorama.AnsiToWin32(sys.stderr)
super().__init__(stream)
def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
"""Log a message to the 'werkzeug' logger.
The logger is created the first time it is needed. If there is no
level set, it is set to :data:`logging.INFO`. If there is no handler
for the logger's effective level, a :class:`logging.StreamHandler`
is added.
"""
global _logger
if _logger is None:
_logger = logging.getLogger("werkzeug")
if _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
if not _has_level_handler(_logger):
_logger.addHandler(_ColorStreamHandler())
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
@t.overload
def _dt_as_utc(dt: None) -> None:
...
@t.overload
def _dt_as_utc(dt: datetime) -> datetime:
...
def _dt_as_utc(dt: datetime | None) -> datetime | None:
if dt is None:
return dt
if dt.tzinfo is None:
return dt.replace(tzinfo=timezone.utc)
elif dt.tzinfo != timezone.utc:
return dt.astimezone(timezone.utc)
return dt
_TAccessorValue = t.TypeVar("_TAccessorValue")
class _DictAccessorProperty(t.Generic[_TAccessorValue]):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(
self,
name: str,
default: _TAccessorValue | None = None,
load_func: t.Callable[[str], _TAccessorValue] | None = None,
dump_func: t.Callable[[_TAccessorValue], str] | None = None,
read_only: bool | None = None,
doc: str | None = None,
) -> None:
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def lookup(self, instance: t.Any) -> t.MutableMapping[str, t.Any]:
raise NotImplementedError
@t.overload
def __get__(
self, instance: None, owner: type
) -> _DictAccessorProperty[_TAccessorValue]:
...
@t.overload
def __get__(self, instance: t.Any, owner: type) -> _TAccessorValue:
...
def __get__(
self, instance: t.Any | None, owner: type
) -> _TAccessorValue | _DictAccessorProperty[_TAccessorValue]:
if instance is None:
return self
storage = self.lookup(instance)
if self.name not in storage:
return self.default # type: ignore
value = storage[self.name]
if self.load_func is not None:
try:
return self.load_func(value)
except (ValueError, TypeError):
return self.default # type: ignore
return value # type: ignore
def __set__(self, instance: t.Any, value: _TAccessorValue) -> None:
if self.read_only:
raise AttributeError("read only property")
if self.dump_func is not None:
self.lookup(instance)[self.name] = self.dump_func(value)
else:
self.lookup(instance)[self.name] = value
def __delete__(self, instance: t.Any) -> None:
if self.read_only:
raise AttributeError("read only property")
self.lookup(instance).pop(self.name, None)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name}>"
def _decode_idna(domain: str) -> str:
try:
data = domain.encode("ascii")
except UnicodeEncodeError:
# If the domain is not ASCII, it's decoded already.
return domain
try:
# Try decoding in one shot.
return data.decode("idna")
except UnicodeDecodeError:
pass
# Decode each part separately, leaving invalid parts as punycode.
parts = []
for part in data.split(b"."):
try:
parts.append(part.decode("idna"))
except UnicodeDecodeError:
parts.append(part.decode("ascii"))
return ".".join(parts)
_plain_int_re = re.compile(r"-?\d+", re.ASCII)
_plain_float_re = re.compile(r"-?\d+\.\d+", re.ASCII)
def _plain_int(value: str) -> int:
"""Parse an int only if it is only ASCII digits and ``-``.
This disallows ``+``, ``_``, and non-ASCII digits, which are accepted by ``int`` but
are not allowed in HTTP header values.
"""
if _plain_int_re.fullmatch(value) is None:
raise ValueError
return int(value)
def _plain_float(value: str) -> float:
"""Parse a float only if it is only ASCII digits and ``-``, and contains digits
before and after the ``.``.
This disallows ``+``, ``_``, non-ASCII digits, and ``.123``, which are accepted by
``float`` but are not allowed in HTTP header values.
"""
if _plain_float_re.fullmatch(value) is None:
raise ValueError
return float(value)

View file

@ -0,0 +1,458 @@
from __future__ import annotations
import fnmatch
import os
import subprocess
import sys
import threading
import time
import typing as t
from itertools import chain
from pathlib import PurePath
from ._internal import _log
# The various system prefixes where imports are found. Base values are
# different when running in a virtualenv. All reloaders will ignore the
# base paths (usually the system installation). The stat reloader won't
# scan the virtualenv paths, it will only include modules that are
# already imported.
_ignore_always = tuple({sys.base_prefix, sys.base_exec_prefix})
prefix = {*_ignore_always, sys.prefix, sys.exec_prefix}
if hasattr(sys, "real_prefix"):
# virtualenv < 20
prefix.add(sys.real_prefix)
_stat_ignore_scan = tuple(prefix)
del prefix
_ignore_common_dirs = {
"__pycache__",
".git",
".hg",
".tox",
".nox",
".pytest_cache",
".mypy_cache",
}
def _iter_module_paths() -> t.Iterator[str]:
"""Find the filesystem paths associated with imported modules."""
# List is in case the value is modified by the app while updating.
for module in list(sys.modules.values()):
name = getattr(module, "__file__", None)
if name is None or name.startswith(_ignore_always):
continue
while not os.path.isfile(name):
# Zip file, find the base file without the module path.
old = name
name = os.path.dirname(name)
if name == old: # skip if it was all directories somehow
break
else:
yield name
def _remove_by_pattern(paths: set[str], exclude_patterns: set[str]) -> None:
for pattern in exclude_patterns:
paths.difference_update(fnmatch.filter(paths, pattern))
def _find_stat_paths(
extra_files: set[str], exclude_patterns: set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Returns imported
module files, Python files under non-system paths. Extra files and
Python files under extra directories can also be scanned.
System paths have to be excluded for efficiency. Non-system paths,
such as a project root or ``sys.path.insert``, should be the paths
of interest to the user anyway.
"""
paths = set()
for path in chain(list(sys.path), extra_files):
path = os.path.abspath(path)
if os.path.isfile(path):
# zip file on sys.path, or extra file
paths.add(path)
continue
parent_has_py = {os.path.dirname(path): True}
for root, dirs, files in os.walk(path):
# Optimizations: ignore system prefixes, __pycache__ will
# have a py or pyc module at the import path, ignore some
# common known dirs such as version control and tool caches.
if (
root.startswith(_stat_ignore_scan)
or os.path.basename(root) in _ignore_common_dirs
):
dirs.clear()
continue
has_py = False
for name in files:
if name.endswith((".py", ".pyc")):
has_py = True
paths.add(os.path.join(root, name))
# Optimization: stop scanning a directory if neither it nor
# its parent contained Python files.
if not (has_py or parent_has_py[os.path.dirname(root)]):
dirs.clear()
continue
parent_has_py[root] = has_py
paths.update(_iter_module_paths())
_remove_by_pattern(paths, exclude_patterns)
return paths
def _find_watchdog_paths(
extra_files: set[str], exclude_patterns: set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Looks at the same
sources as the stat reloader, but watches everything under
directories instead of individual files.
"""
dirs = set()
for name in chain(list(sys.path), extra_files):
name = os.path.abspath(name)
if os.path.isfile(name):
name = os.path.dirname(name)
dirs.add(name)
for name in _iter_module_paths():
dirs.add(os.path.dirname(name))
_remove_by_pattern(dirs, exclude_patterns)
return _find_common_roots(dirs)
def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
root: dict[str, dict] = {}
for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node: t.Mapping[str, dict], path: tuple[str, ...]) -> None:
for prefix, child in node.items():
_walk(child, path + (prefix,))
if not node:
rv.add(os.path.join(*path))
_walk(root, ())
return rv
def _get_args_for_reloading() -> list[str]:
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
if sys.version_info >= (3, 10):
# sys.orig_argv, added in Python 3.10, contains the exact args used to invoke
# Python. Still replace argv[0] with sys.executable for accuracy.
return [sys.executable, *sys.orig_argv[1:]]
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(f"{py_script}.exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = t.cast(str, __main__.__package__)
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += f".{name}"
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
class ReloaderLoop:
name = ""
def __init__(
self,
extra_files: t.Iterable[str] | None = None,
exclude_patterns: t.Iterable[str] | None = None,
interval: int | float = 1,
) -> None:
self.extra_files: set[str] = {os.path.abspath(x) for x in extra_files or ()}
self.exclude_patterns: set[str] = set(exclude_patterns or ())
self.interval = interval
def __enter__(self) -> ReloaderLoop:
"""Do any setup, then run one step of the watch to populate the
initial filesystem state.
"""
self.run_step()
return self
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
"""Clean up any resources associated with the reloader."""
pass
def run(self) -> None:
"""Continually run the watch step, sleeping for the configured
interval after each step.
"""
while True:
self.run_step()
time.sleep(self.interval)
def run_step(self) -> None:
"""Run one step for watching the filesystem. Called once to set
up initial state, then repeatedly to update it.
"""
pass
def restart_with_reloader(self) -> int:
"""Spawn a new Python interpreter with the same arguments as the
current one, but running the reloader thread.
"""
while True:
_log("info", f" * Restarting with {self.name}")
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename: str) -> None:
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename: str) -> None:
filename = os.path.abspath(filename)
_log("info", f" * Detected change in {filename!r}, reloading")
class StatReloaderLoop(ReloaderLoop):
name = "stat"
def __enter__(self) -> ReloaderLoop:
self.mtimes: dict[str, float] = {}
return super().__enter__()
def run_step(self) -> None:
for name in _find_stat_paths(self.extra_files, self.exclude_patterns):
try:
mtime = os.stat(name).st_mtime
except OSError:
continue
old_time = self.mtimes.get(name)
if old_time is None:
self.mtimes[name] = mtime
continue
if mtime > old_time:
self.trigger_reload(name)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from watchdog.events import EVENT_TYPE_OPENED
from watchdog.events import FileModifiedEvent
super().__init__(*args, **kwargs)
trigger_reload = self.trigger_reload
class EventHandler(PatternMatchingEventHandler):
def on_any_event(self, event: FileModifiedEvent): # type: ignore
if event.event_type == EVENT_TYPE_OPENED:
return
trigger_reload(event.src_path)
reloader_name = Observer.__name__.lower() # type: ignore[attr-defined]
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
self.name = f"watchdog ({reloader_name})"
self.observer = Observer()
# Extra patterns can be non-Python files, match them in addition
# to all Python files in default and extra directories. Ignore
# __pycache__ since a change there will always have a change to
# the source file (or initial pyc file) as well. Ignore Git and
# Mercurial internal changes.
extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
*[f"*/{d}/*" for d in _ignore_common_dirs],
*self.exclude_patterns,
],
)
self.should_reload = False
def trigger_reload(self, filename: str) -> None:
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def __enter__(self) -> ReloaderLoop:
self.watches: dict[str, t.Any] = {}
self.observer.start()
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
self.observer.stop()
self.observer.join()
def run(self) -> None:
while not self.should_reload:
self.run_step()
time.sleep(self.interval)
sys.exit(3)
def run_step(self) -> None:
to_delete = set(self.watches)
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
if path not in self.watches:
try:
self.watches[path] = self.observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
self.watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = self.watches.pop(path, None)
if watch is not None:
self.observer.unschedule(watch)
reloader_loops: dict[str, type[ReloaderLoop]] = {
"stat": StatReloaderLoop,
"watchdog": WatchdogReloaderLoop,
}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on() -> None:
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if sys.stdin is None or not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func: t.Callable[[], None],
extra_files: t.Iterable[str] | None = None,
exclude_patterns: t.Iterable[str] | None = None,
interval: int | float = 1,
reloader_type: str = "auto",
) -> None:
"""Run the given function in an independent Python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
reloader = reloader_loops[reloader_type](
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
)
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.daemon = True
# Enter the reloader to set up initial state, then start
# the app thread and reloader update loop.
with reloader:
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass

View file

@ -0,0 +1,34 @@
from .accept import Accept as Accept
from .accept import CharsetAccept as CharsetAccept
from .accept import LanguageAccept as LanguageAccept
from .accept import MIMEAccept as MIMEAccept
from .auth import Authorization as Authorization
from .auth import WWWAuthenticate as WWWAuthenticate
from .cache_control import RequestCacheControl as RequestCacheControl
from .cache_control import ResponseCacheControl as ResponseCacheControl
from .csp import ContentSecurityPolicy as ContentSecurityPolicy
from .etag import ETags as ETags
from .file_storage import FileMultiDict as FileMultiDict
from .file_storage import FileStorage as FileStorage
from .headers import EnvironHeaders as EnvironHeaders
from .headers import Headers as Headers
from .mixins import ImmutableDictMixin as ImmutableDictMixin
from .mixins import ImmutableHeadersMixin as ImmutableHeadersMixin
from .mixins import ImmutableListMixin as ImmutableListMixin
from .mixins import ImmutableMultiDictMixin as ImmutableMultiDictMixin
from .mixins import UpdateDictMixin as UpdateDictMixin
from .range import ContentRange as ContentRange
from .range import IfRange as IfRange
from .range import Range as Range
from .structures import CallbackDict as CallbackDict
from .structures import CombinedMultiDict as CombinedMultiDict
from .structures import HeaderSet as HeaderSet
from .structures import ImmutableDict as ImmutableDict
from .structures import ImmutableList as ImmutableList
from .structures import ImmutableMultiDict as ImmutableMultiDict
from .structures import ImmutableOrderedMultiDict as ImmutableOrderedMultiDict
from .structures import ImmutableTypeConversionDict as ImmutableTypeConversionDict
from .structures import iter_multi_items as iter_multi_items
from .structures import MultiDict as MultiDict
from .structures import OrderedMultiDict as OrderedMultiDict
from .structures import TypeConversionDict as TypeConversionDict

View file

@ -0,0 +1,326 @@
from __future__ import annotations
import codecs
import re
from .structures import ImmutableList
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by specificity
and quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
.. versionchanged:: 1.0.0
:class:`Accept` internal values are no longer ordered
alphabetically for equal quality tags. Instead the initial
order is preserved.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = sorted(
values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True
)
list.__init__(self, values)
def _specificity(self, value):
"""Returns a tuple describing the value's specificity."""
return (value != "*",)
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == "*" or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, str):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, _quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
pairs_str = ", ".join(f"({x!r}, {y})" for x, y in self)
return f"{type(self).__name__}([{pairs_str}])"
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, str):
for idx, (item, _quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = f"{value};q={quality}"
result.append(value)
return ",".join(result)
def __str__(self):
return self.to_header()
def _best_single_match(self, match):
for client_item, quality in self:
if self._value_matches(match, client_item):
# self is sorted by specificity descending, we can exit
return client_item, quality
return None
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the specificity and quality of the client. If two items have the
same quality and specificity, the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
result = default
best_quality = -1
best_specificity = (-1,)
for server_item in matches:
match = self._best_single_match(server_item)
if not match:
continue
client_item, quality = match
specificity = self._specificity(client_item)
if quality <= 0 or quality < best_quality:
continue
# better quality or same quality but more specific => better match
if quality > best_quality or specificity > best_specificity:
result = server_item
best_quality = quality
best_specificity = specificity
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
_mime_split_re = re.compile(r"/|(?:\s*;\s*)")
def _normalize_mime(value):
return _mime_split_re.split(value.lower())
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _specificity(self, value):
return tuple(x != "*" for x in _mime_split_re.split(value))
def _value_matches(self, value, item):
# item comes from the client, can't match if it's invalid.
if "/" not in item:
return False
# value comes from the application, tell the developer when it
# doesn't look valid.
if "/" not in value:
raise ValueError(f"invalid mimetype {value!r}")
# Split the match value into type, subtype, and a sorted list of parameters.
normalized_value = _normalize_mime(value)
value_type, value_subtype = normalized_value[:2]
value_params = sorted(normalized_value[2:])
# "*/*" is the only valid value that can start with "*".
if value_type == "*" and value_subtype != "*":
raise ValueError(f"invalid mimetype {value!r}")
# Split the accept item into type, subtype, and parameters.
normalized_item = _normalize_mime(item)
item_type, item_subtype = normalized_item[:2]
item_params = sorted(normalized_item[2:])
# "*/not-*" from the client is invalid, can't match.
if item_type == "*" and item_subtype != "*":
return False
return (
(item_type == "*" and item_subtype == "*")
or (value_type == "*" and value_subtype == "*")
) or (
item_type == value_type
and (
item_subtype == "*"
or value_subtype == "*"
or (item_subtype == value_subtype and item_params == value_params)
)
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
"text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return "application/xhtml+xml" in self or "application/xml" in self
@property
def accept_json(self):
"""True if this object accepts JSON."""
return "application/json" in self
_locale_delim_re = re.compile(r"[_-]")
def _normalize_lang(value):
"""Process a language tag for matching."""
return _locale_delim_re.split(value.lower())
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for language tags."""
def _value_matches(self, value, item):
return item == "*" or _normalize_lang(value) == _normalize_lang(item)
def best_match(self, matches, default=None):
"""Given a list of supported values, finds the best match from
the list of accepted values.
Language tags are normalized for the purpose of matching, but
are returned unchanged.
If no exact match is found, this will fall back to matching
the first subtag (primary language only), first with the
accepted values then with the match values. This partial is not
applied to any other language subtags.
The default is returned if no exact or fallback match is found.
:param matches: A list of supported languages to find a match.
:param default: The value that is returned if none match.
"""
# Look for an exact match first. If a client accepts "en-US",
# "en-US" is a valid match at this point.
result = super().best_match(matches)
if result is not None:
return result
# Fall back to accepting primary tags. If a client accepts
# "en-US", "en" is a valid match at this point. Need to use
# re.split to account for 2 or 3 letter codes.
fallback = Accept(
[(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
)
result = fallback.best_match(matches)
if result is not None:
return result
# Fall back to matching primary tags. If the client accepts
# "en", "en-US" is a valid match at this point.
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
result = super().best_match(fallback_matches)
# Return a value from the original match list. Find the first
# original value that starts with the matched primary tag.
if result is not None:
return next(item for item in matches if item.startswith(result))
return default
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == "*" or _normalize(value) == _normalize(item)

View file

@ -0,0 +1,54 @@
from collections.abc import Iterable
from collections.abc import Iterator
from typing import overload
from .structures import ImmutableList
class Accept(ImmutableList[tuple[str, int]]):
provided: bool
def __init__(
self, values: Accept | Iterable[tuple[str, float]] | None = None
) -> None: ...
def _specificity(self, value: str) -> tuple[bool, ...]: ...
def _value_matches(self, value: str, item: str) -> bool: ...
@overload # type: ignore
def __getitem__(self, key: str) -> int: ...
@overload
def __getitem__(self, key: int) -> tuple[str, int]: ...
@overload
def __getitem__(self, key: slice) -> Iterable[tuple[str, int]]: ...
def quality(self, key: str) -> int: ...
def __contains__(self, value: str) -> bool: ... # type: ignore
def index(self, key: str) -> int: ... # type: ignore
def find(self, key: str) -> int: ...
def values(self) -> Iterator[str]: ...
def to_header(self) -> str: ...
def _best_single_match(self, match: str) -> tuple[str, int] | None: ...
@overload
def best_match(self, matches: Iterable[str], default: str) -> str: ...
@overload
def best_match(
self, matches: Iterable[str], default: str | None = None
) -> str | None: ...
@property
def best(self) -> str: ...
def _normalize_mime(value: str) -> list[str]: ...
class MIMEAccept(Accept):
def _specificity(self, value: str) -> tuple[bool, ...]: ...
def _value_matches(self, value: str, item: str) -> bool: ...
@property
def accept_html(self) -> bool: ...
@property
def accept_xhtml(self) -> bool: ...
@property
def accept_json(self) -> bool: ...
def _normalize_lang(value: str) -> list[str]: ...
class LanguageAccept(Accept):
def _value_matches(self, value: str, item: str) -> bool: ...
class CharsetAccept(Accept):
def _value_matches(self, value: str, item: str) -> bool: ...

View file

@ -0,0 +1,510 @@
from __future__ import annotations
import base64
import binascii
import typing as t
import warnings
from functools import wraps
from ..http import dump_header
from ..http import parse_dict_header
from ..http import parse_set_header
from ..http import quote_header_value
from .structures import CallbackDict
from .structures import HeaderSet
if t.TYPE_CHECKING:
import typing_extensions as te
class Authorization:
"""Represents the parts of an ``Authorization`` request header.
:attr:`.Request.authorization` returns an instance if the header is set.
An instance can be used with the test :class:`.Client` request methods' ``auth``
parameter to send the header in test requests.
Depending on the auth scheme, either :attr:`parameters` or :attr:`token` will be
set. The ``Basic`` scheme's token is decoded into the ``username`` and ``password``
parameters.
For convenience, ``auth["key"]`` and ``auth.key`` both access the key in the
:attr:`parameters` dict, along with ``auth.get("key")`` and ``"key" in auth``.
.. versionchanged:: 2.3
The ``token`` parameter and attribute was added to support auth schemes that use
a token instead of parameters, such as ``Bearer``.
.. versionchanged:: 2.3
The object is no longer a ``dict``.
.. versionchanged:: 0.5
The object is an immutable dict.
"""
def __init__(
self,
auth_type: str,
data: dict[str, str] | None = None,
token: str | None = None,
) -> None:
self.type = auth_type
"""The authorization scheme, like ``Basic``, ``Digest``, or ``Bearer``."""
if data is None:
data = {}
self.parameters = data
"""A dict of parameters parsed from the header. Either this or :attr:`token`
will have a value for a give scheme.
"""
self.token = token
"""A token parsed from the header. Either this or :attr:`parameters` will have a
value for a given scheme.
.. versionadded:: 2.3
"""
def __getattr__(self, name: str) -> str | None:
return self.parameters.get(name)
def __getitem__(self, name: str) -> str | None:
return self.parameters.get(name)
def get(self, key: str, default: str | None = None) -> str | None:
return self.parameters.get(key, default)
def __contains__(self, key: str) -> bool:
return key in self.parameters
def __eq__(self, other: object) -> bool:
if not isinstance(other, Authorization):
return NotImplemented
return (
other.type == self.type
and other.token == self.token
and other.parameters == self.parameters
)
@classmethod
def from_header(cls, value: str | None) -> te.Self | None:
"""Parse an ``Authorization`` header value and return an instance, or ``None``
if the value is empty.
:param value: The header value to parse.
.. versionadded:: 2.3
"""
if not value:
return None
scheme, _, rest = value.partition(" ")
scheme = scheme.lower()
rest = rest.strip()
if scheme == "basic":
try:
username, _, password = base64.b64decode(rest).decode().partition(":")
except (binascii.Error, UnicodeError):
return None
return cls(scheme, {"username": username, "password": password})
if "=" in rest.rstrip("="):
# = that is not trailing, this is parameters.
return cls(scheme, parse_dict_header(rest), None)
# No = or only trailing =, this is a token.
return cls(scheme, None, rest)
def to_header(self) -> str:
"""Produce an ``Authorization`` header value representing this data.
.. versionadded:: 2.0
"""
if self.type == "basic":
value = base64.b64encode(
f"{self.username}:{self.password}".encode()
).decode("utf8")
return f"Basic {value}"
if self.token is not None:
return f"{self.type.title()} {self.token}"
return f"{self.type.title()} {dump_header(self.parameters)}"
def __str__(self) -> str:
return self.to_header()
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.to_header()}>"
def auth_property(name: str, doc: str | None = None) -> property:
"""A static helper function for Authentication subclasses to add
extra authentication system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
.. deprecated:: 2.3
Will be removed in Werkzeug 3.0.
"""
warnings.warn(
"'auth_property' is deprecated and will be removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
def _set_value(self, value): # type: ignore[no-untyped-def]
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
class WWWAuthenticate:
"""Represents the parts of a ``WWW-Authenticate`` response header.
Set :attr:`.Response.www_authenticate` to an instance of list of instances to set
values for this header in the response. Modifying this instance will modify the
header value.
Depending on the auth scheme, either :attr:`parameters` or :attr:`token` should be
set. The ``Basic`` scheme will encode ``username`` and ``password`` parameters to a
token.
For convenience, ``auth["key"]`` and ``auth.key`` both act on the :attr:`parameters`
dict, and can be used to get, set, or delete parameters. ``auth.get("key")`` and
``"key" in auth`` are also provided.
.. versionchanged:: 2.3
The ``token`` parameter and attribute was added to support auth schemes that use
a token instead of parameters, such as ``Bearer``.
.. versionchanged:: 2.3
The object is no longer a ``dict``.
.. versionchanged:: 2.3
The ``on_update`` parameter was removed.
"""
def __init__(
self,
auth_type: str | None = None,
values: dict[str, str] | None = None,
token: str | None = None,
):
if auth_type is None:
warnings.warn(
"An auth type must be given as the first parameter. Assuming 'basic' is"
" deprecated and will be removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
auth_type = "basic"
self._type = auth_type.lower()
self._parameters: dict[str, str] = CallbackDict( # type: ignore[misc]
values, lambda _: self._trigger_on_update()
)
self._token = token
self._on_update: t.Callable[[WWWAuthenticate], None] | None = None
def _trigger_on_update(self) -> None:
if self._on_update is not None:
self._on_update(self)
@property
def type(self) -> str:
"""The authorization scheme, like ``Basic``, ``Digest``, or ``Bearer``."""
return self._type
@type.setter
def type(self, value: str) -> None:
self._type = value
self._trigger_on_update()
@property
def parameters(self) -> dict[str, str]:
"""A dict of parameters for the header. Only one of this or :attr:`token` should
have a value for a give scheme.
"""
return self._parameters
@parameters.setter
def parameters(self, value: dict[str, str]) -> None:
self._parameters = CallbackDict( # type: ignore[misc]
value, lambda _: self._trigger_on_update()
)
self._trigger_on_update()
@property
def token(self) -> str | None:
"""A dict of parameters for the header. Only one of this or :attr:`token` should
have a value for a give scheme.
"""
return self._token
@token.setter
def token(self, value: str | None) -> None:
"""A token for the header. Only one of this or :attr:`parameters` should have a
value for a given scheme.
.. versionadded:: 2.3
"""
self._token = value
self._trigger_on_update()
def set_basic(self, realm: str = "authentication required") -> None:
"""Clear any existing data and set a ``Basic`` challenge.
.. deprecated:: 2.3
Will be removed in Werkzeug 3.0. Create and assign an instance instead.
"""
warnings.warn(
"The 'set_basic' method is deprecated and will be removed in Werkzeug 3.0."
" Create and assign an instance instead."
)
self._type = "basic"
dict.clear(self.parameters) # type: ignore[arg-type]
dict.update(
self.parameters, # type: ignore[arg-type]
{"realm": realm}, # type: ignore[dict-item]
)
self._token = None
self._trigger_on_update()
def set_digest(
self,
realm: str,
nonce: str,
qop: t.Sequence[str] = ("auth",),
opaque: str | None = None,
algorithm: str | None = None,
stale: bool = False,
) -> None:
"""Clear any existing data and set a ``Digest`` challenge.
.. deprecated:: 2.3
Will be removed in Werkzeug 3.0. Create and assign an instance instead.
"""
warnings.warn(
"The 'set_digest' method is deprecated and will be removed in Werkzeug 3.0."
" Create and assign an instance instead."
)
self._type = "digest"
dict.clear(self.parameters) # type: ignore[arg-type]
parameters = {
"realm": realm,
"nonce": nonce,
"qop": ", ".join(qop),
"stale": "TRUE" if stale else "FALSE",
}
if opaque is not None:
parameters["opaque"] = opaque
if algorithm is not None:
parameters["algorithm"] = algorithm
dict.update(self.parameters, parameters) # type: ignore[arg-type]
self._token = None
self._trigger_on_update()
def __getitem__(self, key: str) -> str | None:
return self.parameters.get(key)
def __setitem__(self, key: str, value: str | None) -> None:
if value is None:
if key in self.parameters:
del self.parameters[key]
else:
self.parameters[key] = value
self._trigger_on_update()
def __delitem__(self, key: str) -> None:
if key in self.parameters:
del self.parameters[key]
self._trigger_on_update()
def __getattr__(self, name: str) -> str | None:
return self[name]
def __setattr__(self, name: str, value: str | None) -> None:
if name in {"_type", "_parameters", "_token", "_on_update"}:
super().__setattr__(name, value)
else:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
def __contains__(self, key: str) -> bool:
return key in self.parameters
def __eq__(self, other: object) -> bool:
if not isinstance(other, WWWAuthenticate):
return NotImplemented
return (
other.type == self.type
and other.token == self.token
and other.parameters == self.parameters
)
def get(self, key: str, default: str | None = None) -> str | None:
return self.parameters.get(key, default)
@classmethod
def from_header(cls, value: str | None) -> te.Self | None:
"""Parse a ``WWW-Authenticate`` header value and return an instance, or ``None``
if the value is empty.
:param value: The header value to parse.
.. versionadded:: 2.3
"""
if not value:
return None
scheme, _, rest = value.partition(" ")
scheme = scheme.lower()
rest = rest.strip()
if "=" in rest.rstrip("="):
# = that is not trailing, this is parameters.
return cls(scheme, parse_dict_header(rest), None)
# No = or only trailing =, this is a token.
return cls(scheme, None, rest)
def to_header(self) -> str:
"""Produce a ``WWW-Authenticate`` header value representing this data."""
if self.token is not None:
return f"{self.type.title()} {self.token}"
if self.type == "digest":
items = []
for key, value in self.parameters.items():
if key in {"realm", "domain", "nonce", "opaque", "qop"}:
value = quote_header_value(value, allow_token=False)
else:
value = quote_header_value(value)
items.append(f"{key}={value}")
return f"Digest {', '.join(items)}"
return f"{self.type.title()} {dump_header(self.parameters)}"
def __str__(self) -> str:
return self.to_header()
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.to_header()}>"
@property
def qop(self) -> set[str]:
"""The ``qop`` parameter as a set.
.. deprecated:: 2.3
Will be removed in Werkzeug 3.0. It will become the same as other
parameters, returning a string.
"""
warnings.warn(
"The 'qop' property is deprecated and will be removed in Werkzeug 3.0."
" It will become the same as other parameters, returning a string.",
DeprecationWarning,
stacklevel=2,
)
def on_update(value: HeaderSet) -> None:
if not value:
if "qop" in self:
del self["qop"]
return
self.parameters["qop"] = value.to_header()
return parse_set_header(self.parameters.get("qop"), on_update)
@property
def stale(self) -> bool | None:
"""The ``stale`` parameter as a boolean.
.. deprecated:: 2.3
Will be removed in Werkzeug 3.0. It will become the same as other
parameters, returning a string.
"""
warnings.warn(
"The 'stale' property is deprecated and will be removed in Werkzeug 3.0."
" It will become the same as other parameters, returning a string.",
DeprecationWarning,
stacklevel=2,
)
if "stale" in self.parameters:
return self.parameters["stale"].lower() == "true"
return None
@stale.setter
def stale(self, value: bool | str | None) -> None:
if value is None:
if "stale" in self.parameters:
del self.parameters["stale"]
return
if isinstance(value, bool):
warnings.warn(
"Setting the 'stale' property to a boolean is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
self.parameters["stale"] = "TRUE" if value else "FALSE"
else:
self.parameters["stale"] = value
auth_property = staticmethod(auth_property)
def _deprecated_dict_method(f): # type: ignore[no-untyped-def]
@wraps(f)
def wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
warnings.warn(
"Treating 'Authorization' and 'WWWAuthenticate' as a dict is deprecated and"
" will be removed in Werkzeug 3.0. Use the 'parameters' attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return f(*args, **kwargs)
return wrapper
for name in (
"__iter__",
"clear",
"copy",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
):
f = _deprecated_dict_method(getattr(dict, name))
setattr(Authorization, name, f)
setattr(WWWAuthenticate, name, f)

View file

@ -0,0 +1,175 @@
from __future__ import annotations
from .mixins import ImmutableDictMixin
from .mixins import UpdateDictMixin
def cache_control_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass.
.. versionchanged:: 2.0
Renamed from ``cache_property``.
"""
return property(
lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
f"accessor for {key!r}",
)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 2.1.0
Setting int properties such as ``max_age`` will convert the
value to an int.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_control_property("no-cache", "*", None)
no_store = cache_control_property("no-store", None, bool)
max_age = cache_control_property("max-age", -1, int)
no_transform = cache_control_property("no-transform", None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
return None
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key, None)
elif value is True:
self[key] = None
else:
if type is not None:
self[key] = type(value)
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return http.dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
return f"<{type(self).__name__} {kv_str}>"
cache_property = staticmethod(cache_control_property)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionchanged:: 2.1.0
Setting int properties such as ``max_age`` will convert the
value to an int.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_control_property("max-stale", "*", int)
min_fresh = cache_control_property("min-fresh", "*", int)
only_if_cached = cache_control_property("only-if-cached", None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionchanged:: 2.1.1
``s_maxage`` converts the value to an int.
.. versionchanged:: 2.1.0
Setting int properties such as ``max_age`` will convert the
value to an int.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_control_property("public", None, bool)
private = cache_control_property("private", "*", None)
must_revalidate = cache_control_property("must-revalidate", None, bool)
proxy_revalidate = cache_control_property("proxy-revalidate", None, bool)
s_maxage = cache_control_property("s-maxage", None, int)
immutable = cache_control_property("immutable", None, bool)
# circular dependencies
from .. import http

View file

@ -0,0 +1,109 @@
from collections.abc import Callable
from collections.abc import Iterable
from collections.abc import Mapping
from typing import TypeVar
from .mixins import ImmutableDictMixin
from .mixins import UpdateDictMixin
T = TypeVar("T")
_CPT = TypeVar("_CPT", str, int, bool)
_OptCPT = _CPT | None
def cache_control_property(key: str, empty: _OptCPT, type: type[_CPT]) -> property: ...
class _CacheControl(UpdateDictMixin[str, _OptCPT], dict[str, _OptCPT]):
provided: bool
def __init__(
self,
values: Mapping[str, _OptCPT] | Iterable[tuple[str, _OptCPT]] = (),
on_update: Callable[[_CacheControl], None] | None = None,
) -> None: ...
@property
def no_cache(self) -> bool | None: ...
@no_cache.setter
def no_cache(self, value: bool | None) -> None: ...
@no_cache.deleter
def no_cache(self) -> None: ...
@property
def no_store(self) -> bool | None: ...
@no_store.setter
def no_store(self, value: bool | None) -> None: ...
@no_store.deleter
def no_store(self) -> None: ...
@property
def max_age(self) -> int | None: ...
@max_age.setter
def max_age(self, value: int | None) -> None: ...
@max_age.deleter
def max_age(self) -> None: ...
@property
def no_transform(self) -> bool | None: ...
@no_transform.setter
def no_transform(self, value: bool | None) -> None: ...
@no_transform.deleter
def no_transform(self) -> None: ...
def _get_cache_value(self, key: str, empty: T | None, type: type[T]) -> T: ...
def _set_cache_value(self, key: str, value: T | None, type: type[T]) -> None: ...
def _del_cache_value(self, key: str) -> None: ...
def to_header(self) -> str: ...
@staticmethod
def cache_property(key: str, empty: _OptCPT, type: type[_CPT]) -> property: ...
class RequestCacheControl(ImmutableDictMixin[str, _OptCPT], _CacheControl):
@property
def max_stale(self) -> int | None: ...
@max_stale.setter
def max_stale(self, value: int | None) -> None: ...
@max_stale.deleter
def max_stale(self) -> None: ...
@property
def min_fresh(self) -> int | None: ...
@min_fresh.setter
def min_fresh(self, value: int | None) -> None: ...
@min_fresh.deleter
def min_fresh(self) -> None: ...
@property
def only_if_cached(self) -> bool | None: ...
@only_if_cached.setter
def only_if_cached(self, value: bool | None) -> None: ...
@only_if_cached.deleter
def only_if_cached(self) -> None: ...
class ResponseCacheControl(_CacheControl):
@property
def public(self) -> bool | None: ...
@public.setter
def public(self, value: bool | None) -> None: ...
@public.deleter
def public(self) -> None: ...
@property
def private(self) -> bool | None: ...
@private.setter
def private(self, value: bool | None) -> None: ...
@private.deleter
def private(self) -> None: ...
@property
def must_revalidate(self) -> bool | None: ...
@must_revalidate.setter
def must_revalidate(self, value: bool | None) -> None: ...
@must_revalidate.deleter
def must_revalidate(self) -> None: ...
@property
def proxy_revalidate(self) -> bool | None: ...
@proxy_revalidate.setter
def proxy_revalidate(self, value: bool | None) -> None: ...
@proxy_revalidate.deleter
def proxy_revalidate(self) -> None: ...
@property
def s_maxage(self) -> int | None: ...
@s_maxage.setter
def s_maxage(self, value: int | None) -> None: ...
@s_maxage.deleter
def s_maxage(self) -> None: ...
@property
def immutable(self) -> bool | None: ...
@immutable.setter
def immutable(self, value: bool | None) -> None: ...
@immutable.deleter
def immutable(self) -> None: ...

View file

@ -0,0 +1,94 @@
from __future__ import annotations
from .mixins import UpdateDictMixin
def csp_property(key):
"""Return a new property object for a content security policy header.
Useful if you want to add support for a csp extension in a
subclass.
"""
return property(
lambda x: x._get_value(key),
lambda x, v: x._set_value(key, v),
lambda x: x._del_value(key),
f"accessor for {key!r}",
)
class ContentSecurityPolicy(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Content Security Policy
header. It has accessors for all the level 3 policies.
Because the csp directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`ContentSecuirtyPolicy` object again
you can convert the object into a string or call the
:meth:`to_header` method. If you plan to subclass it and add your
own items have a look at the sourcecode for that class.
.. versionadded:: 1.0.0
Support for Content Security Policy headers was added.
"""
base_uri = csp_property("base-uri")
child_src = csp_property("child-src")
connect_src = csp_property("connect-src")
default_src = csp_property("default-src")
font_src = csp_property("font-src")
form_action = csp_property("form-action")
frame_ancestors = csp_property("frame-ancestors")
frame_src = csp_property("frame-src")
img_src = csp_property("img-src")
manifest_src = csp_property("manifest-src")
media_src = csp_property("media-src")
navigate_to = csp_property("navigate-to")
object_src = csp_property("object-src")
prefetch_src = csp_property("prefetch-src")
plugin_types = csp_property("plugin-types")
report_to = csp_property("report-to")
report_uri = csp_property("report-uri")
sandbox = csp_property("sandbox")
script_src = csp_property("script-src")
script_src_attr = csp_property("script-src-attr")
script_src_elem = csp_property("script-src-elem")
style_src = csp_property("style-src")
style_src_attr = csp_property("style-src-attr")
style_src_elem = csp_property("style-src-elem")
worker_src = csp_property("worker-src")
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_value(self, key):
"""Used internally by the accessor properties."""
return self.get(key)
def _set_value(self, key, value):
"""Used internally by the accessor properties."""
if value is None:
self.pop(key, None)
else:
self[key] = value
def _del_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
from ..http import dump_csp_header
return dump_csp_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
return f"<{type(self).__name__} {kv_str}>"

View file

@ -0,0 +1,169 @@
from collections.abc import Callable
from collections.abc import Iterable
from collections.abc import Mapping
from .mixins import UpdateDictMixin
def csp_property(key: str) -> property: ...
class ContentSecurityPolicy(UpdateDictMixin[str, str], dict[str, str]):
@property
def base_uri(self) -> str | None: ...
@base_uri.setter
def base_uri(self, value: str | None) -> None: ...
@base_uri.deleter
def base_uri(self) -> None: ...
@property
def child_src(self) -> str | None: ...
@child_src.setter
def child_src(self, value: str | None) -> None: ...
@child_src.deleter
def child_src(self) -> None: ...
@property
def connect_src(self) -> str | None: ...
@connect_src.setter
def connect_src(self, value: str | None) -> None: ...
@connect_src.deleter
def connect_src(self) -> None: ...
@property
def default_src(self) -> str | None: ...
@default_src.setter
def default_src(self, value: str | None) -> None: ...
@default_src.deleter
def default_src(self) -> None: ...
@property
def font_src(self) -> str | None: ...
@font_src.setter
def font_src(self, value: str | None) -> None: ...
@font_src.deleter
def font_src(self) -> None: ...
@property
def form_action(self) -> str | None: ...
@form_action.setter
def form_action(self, value: str | None) -> None: ...
@form_action.deleter
def form_action(self) -> None: ...
@property
def frame_ancestors(self) -> str | None: ...
@frame_ancestors.setter
def frame_ancestors(self, value: str | None) -> None: ...
@frame_ancestors.deleter
def frame_ancestors(self) -> None: ...
@property
def frame_src(self) -> str | None: ...
@frame_src.setter
def frame_src(self, value: str | None) -> None: ...
@frame_src.deleter
def frame_src(self) -> None: ...
@property
def img_src(self) -> str | None: ...
@img_src.setter
def img_src(self, value: str | None) -> None: ...
@img_src.deleter
def img_src(self) -> None: ...
@property
def manifest_src(self) -> str | None: ...
@manifest_src.setter
def manifest_src(self, value: str | None) -> None: ...
@manifest_src.deleter
def manifest_src(self) -> None: ...
@property
def media_src(self) -> str | None: ...
@media_src.setter
def media_src(self, value: str | None) -> None: ...
@media_src.deleter
def media_src(self) -> None: ...
@property
def navigate_to(self) -> str | None: ...
@navigate_to.setter
def navigate_to(self, value: str | None) -> None: ...
@navigate_to.deleter
def navigate_to(self) -> None: ...
@property
def object_src(self) -> str | None: ...
@object_src.setter
def object_src(self, value: str | None) -> None: ...
@object_src.deleter
def object_src(self) -> None: ...
@property
def prefetch_src(self) -> str | None: ...
@prefetch_src.setter
def prefetch_src(self, value: str | None) -> None: ...
@prefetch_src.deleter
def prefetch_src(self) -> None: ...
@property
def plugin_types(self) -> str | None: ...
@plugin_types.setter
def plugin_types(self, value: str | None) -> None: ...
@plugin_types.deleter
def plugin_types(self) -> None: ...
@property
def report_to(self) -> str | None: ...
@report_to.setter
def report_to(self, value: str | None) -> None: ...
@report_to.deleter
def report_to(self) -> None: ...
@property
def report_uri(self) -> str | None: ...
@report_uri.setter
def report_uri(self, value: str | None) -> None: ...
@report_uri.deleter
def report_uri(self) -> None: ...
@property
def sandbox(self) -> str | None: ...
@sandbox.setter
def sandbox(self, value: str | None) -> None: ...
@sandbox.deleter
def sandbox(self) -> None: ...
@property
def script_src(self) -> str | None: ...
@script_src.setter
def script_src(self, value: str | None) -> None: ...
@script_src.deleter
def script_src(self) -> None: ...
@property
def script_src_attr(self) -> str | None: ...
@script_src_attr.setter
def script_src_attr(self, value: str | None) -> None: ...
@script_src_attr.deleter
def script_src_attr(self) -> None: ...
@property
def script_src_elem(self) -> str | None: ...
@script_src_elem.setter
def script_src_elem(self, value: str | None) -> None: ...
@script_src_elem.deleter
def script_src_elem(self) -> None: ...
@property
def style_src(self) -> str | None: ...
@style_src.setter
def style_src(self, value: str | None) -> None: ...
@style_src.deleter
def style_src(self) -> None: ...
@property
def style_src_attr(self) -> str | None: ...
@style_src_attr.setter
def style_src_attr(self, value: str | None) -> None: ...
@style_src_attr.deleter
def style_src_attr(self) -> None: ...
@property
def style_src_elem(self) -> str | None: ...
@style_src_elem.setter
def style_src_elem(self, value: str | None) -> None: ...
@style_src_elem.deleter
def style_src_elem(self) -> None: ...
@property
def worker_src(self) -> str | None: ...
@worker_src.setter
def worker_src(self, value: str | None) -> None: ...
@worker_src.deleter
def worker_src(self) -> None: ...
provided: bool
def __init__(
self,
values: Mapping[str, str] | Iterable[tuple[str, str]] = (),
on_update: Callable[[ContentSecurityPolicy], None] | None = None,
) -> None: ...
def _get_value(self, key: str) -> str | None: ...
def _set_value(self, key: str, value: str) -> None: ...
def _del_value(self, key: str) -> None: ...
def to_header(self) -> str: ...

View file

@ -0,0 +1,95 @@
from __future__ import annotations
from collections.abc import Collection
class ETags(Collection):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
if not star_tag and strong_etags:
self._strong = frozenset(strong_etags)
else:
self._strong = frozenset()
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def is_strong(self, etag):
"""Check if an etag is strong."""
return etag in self._strong
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return self.is_strong(etag)
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
from ..http import unquote_etag
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return "*"
return ", ".join(
[f'"{x}"' for x in self._strong] + [f'W/"{x}"' for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError("either tag or data required, but at least one")
if etag is None:
from ..http import generate_etag
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __bool__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __len__(self):
return len(self._strong)
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"

View file

@ -0,0 +1,30 @@
from collections.abc import Collection
from collections.abc import Iterable
from collections.abc import Iterator
class ETags(Collection[str]):
_strong: frozenset[str]
_weak: frozenset[str]
star_tag: bool
def __init__(
self,
strong_etags: Iterable[str] | None = None,
weak_etags: Iterable[str] | None = None,
star_tag: bool = False,
) -> None: ...
def as_set(self, include_weak: bool = False) -> set[str]: ...
def is_weak(self, etag: str) -> bool: ...
def is_strong(self, etag: str) -> bool: ...
def contains_weak(self, etag: str) -> bool: ...
def contains(self, etag: str) -> bool: ...
def contains_raw(self, etag: str) -> bool: ...
def to_header(self) -> str: ...
def __call__(
self,
etag: str | None = None,
data: bytes | None = None,
include_weak: bool = False,
) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def __contains__(self, item: str) -> bool: ... # type: ignore

View file

@ -0,0 +1,196 @@
from __future__ import annotations
import mimetypes
from io import BytesIO
from os import fsdecode
from os import fspath
from .._internal import _plain_int
from .structures import MultiDict
class FileStorage:
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(
self,
stream=None,
filename=None,
name=None,
content_type=None,
content_length=None,
headers=None,
):
self.name = name
self.stream = stream or BytesIO()
# If no filename is provided, attempt to get the filename from
# the stream object. Python names special streams like
# ``<stderr>`` with angular brackets, skip these streams.
if filename is None:
filename = getattr(stream, "name", None)
if filename is not None:
filename = fsdecode(filename)
if filename and filename[0] == "<" and filename[-1] == ">":
filename = None
else:
filename = fsdecode(filename)
self.filename = filename
if headers is None:
from .headers import Headers
headers = Headers()
self.headers = headers
if content_type is not None:
headers["Content-Type"] = content_type
if content_length is not None:
headers["Content-Length"] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = http.parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get("content-type")
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
if "content-length" in self.headers:
try:
return _plain_int(self.headers["content-length"])
except ValueError:
pass
return 0
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename, :class:`os.PathLike`, or open file
object to write to.
:param buffer_size: Passed as the ``length`` parameter of
:func:`shutil.copyfileobj`.
.. versionchanged:: 1.0
Supports :mod:`pathlib`.
"""
from shutil import copyfileobj
close_dst = False
if hasattr(dst, "__fspath__"):
dst = fspath(dst)
if isinstance(dst, str):
dst = open(dst, "wb")
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __bool__(self):
return bool(self.filename)
def __getattr__(self, name):
try:
return getattr(self.stream, name)
except AttributeError:
# SpooledTemporaryFile doesn't implement IOBase, get the
# attribute from its backing file instead.
# https://github.com/python/cpython/pull/3249
if hasattr(self.stream, "_file"):
return getattr(self.stream._file, name)
raise
def __iter__(self):
return iter(self.stream)
def __repr__(self):
return f"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>"
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, str):
if filename is None:
filename = file
file = open(file, "rb")
if filename and content_type is None:
content_type = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
# circular dependencies
from .. import http

View file

@ -0,0 +1,47 @@
from collections.abc import Iterator
from os import PathLike
from typing import Any
from typing import IO
from .headers import Headers
from .structures import MultiDict
class FileStorage:
name: str | None
stream: IO[bytes]
filename: str | None
headers: Headers
_parsed_content_type: tuple[str, dict[str, str]]
def __init__(
self,
stream: IO[bytes] | None = None,
filename: str | PathLike | None = None,
name: str | None = None,
content_type: str | None = None,
content_length: int | None = None,
headers: Headers | None = None,
) -> None: ...
def _parse_content_type(self) -> None: ...
@property
def content_type(self) -> str: ...
@property
def content_length(self) -> int: ...
@property
def mimetype(self) -> str: ...
@property
def mimetype_params(self) -> dict[str, str]: ...
def save(self, dst: str | PathLike | IO[bytes], buffer_size: int = ...) -> None: ...
def close(self) -> None: ...
def __bool__(self) -> bool: ...
def __getattr__(self, name: str) -> Any: ...
def __iter__(self) -> Iterator[bytes]: ...
def __repr__(self) -> str: ...
class FileMultiDict(MultiDict[str, FileStorage]):
def add_file(
self,
name: str,
file: FileStorage | str | IO[bytes],
filename: str | None = None,
content_type: str | None = None,
) -> None: ...

View file

@ -0,0 +1,566 @@
from __future__ import annotations
import re
import typing as t
import warnings
from .._internal import _missing
from ..exceptions import BadRequestKeyError
from .mixins import ImmutableHeadersMixin
from .structures import iter_multi_items
from .structures import MultiDict
class Headers:
"""An object that stores some headers. It has a dict-like interface,
but is ordered, can store the same key multiple times, and iterating
yields ``(key, value)`` pairs instead of only keys.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new ``Headers`` object, pass it a list, dict, or
other ``Headers`` object with default values. These values are
validated the same way values added later are.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 2.1.0
Default values are validated the same as values added later.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, int):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, str):
raise BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise BadRequestKeyError(key)
def __eq__(self, other):
def lowered(item):
return (item[0].lower(),) + item[1:]
return other.__class__ is self.__class__ and set(
map(lowered, other._list)
) == set(map(lowered, self._list))
__hash__ = None
def get(self, key, default=None, type=None, as_bytes=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
.. versionchanged:: 2.3
The ``as_bytes`` parameter is deprecated and will be removed
in Werkzeug 3.0.
.. versionchanged:: 0.9
The ``as_bytes`` parameter was added.
"""
if as_bytes is not None:
warnings.warn(
"The 'as_bytes' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode("latin1")
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=None):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just like
:meth:`get`, :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
.. versionchanged:: 2.3
The ``as_bytes`` parameter is deprecated and will be removed
in Werkzeug 3.0.
.. versionchanged:: 0.9
The ``as_bytes`` parameter was added.
"""
if as_bytes is not None:
warnings.warn(
"The 'as_bytes' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode("latin1")
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in self.items(lower):
yield key
def values(self):
for _, value in self.items():
yield value
def extend(self, *args, **kwargs):
"""Extend headers in this object with items from another object
containing header items as well as keyword arguments.
To replace existing keys instead of extending, use
:meth:`update` instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionchanged:: 1.0
Support :class:`MultiDict`. Allow passing ``kwargs``.
"""
if len(args) > 1:
raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
if args:
for key, value in iter_multi_items(args[0]):
self.add(key, value)
for key, value in iter_multi_items(kwargs):
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (int, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, int):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _str_header_key(_key)
_value = _str_header_value(_value)
self._list.append((_key, _value))
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _str_header_key(_key)
_value = _str_header_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, _old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first occurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
def setlist(self, key, values):
"""Remove any existing values for a header and add new ones.
:param key: The header key to set.
:param values: An iterable of values to set for the key.
.. versionadded:: 1.0
"""
if values:
values_iter = iter(values)
self.set(key, next(values_iter))
for value in values_iter:
self.add(key, value)
else:
self.remove(key)
def setdefault(self, key, default):
"""Return the first value for the key if it is in the headers,
otherwise set the header to the value given by ``default`` and
return that.
:param key: The header key to get.
:param default: The value to set for the key if it is not in the
headers.
"""
if key in self:
return self[key]
self.set(key, default)
return default
def setlistdefault(self, key, default):
"""Return the list of values for the key if it is in the
headers, otherwise set the header to the list of values given
by ``default`` and return that.
Unlike :meth:`MultiDict.setlistdefault`, modifying the returned
list will not affect the headers.
:param key: The header key to get.
:param default: An iterable of values to set for the key if it
is not in the headers.
.. versionadded:: 1.0
"""
if key not in self:
self.setlist(key, default)
return self.getlist(key)
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, int)):
if isinstance(key, int):
value = [value]
value = [(_str_header_key(k), _str_header_value(v)) for (k, v) in value]
if isinstance(key, int):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def update(self, *args, **kwargs):
"""Replace headers in this object with items from another
headers object and keyword arguments.
To extend existing keys instead of replacing, use :meth:`extend`
instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionadded:: 1.0
"""
if len(args) > 1:
raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
if args:
mapping = args[0]
if isinstance(mapping, (Headers, MultiDict)):
for key in mapping.keys():
self.setlist(key, mapping.getlist(key))
elif isinstance(mapping, dict):
for key, value in mapping.items():
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
else:
for key, value in mapping:
self.set(key, value)
for key, value in kwargs.items():
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
:return: list
"""
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append(f"{key}: {value}")
strs.append("\r\n")
return "\r\n".join(strs)
def __repr__(self):
return f"{type(self).__name__}({list(self)!r})"
def _options_header_vkw(value: str, kw: dict[str, t.Any]):
return http.dump_options_header(
value, {k.replace("_", "-"): v for k, v in kw.items()}
)
def _str_header_key(key: t.Any) -> str:
if not isinstance(key, str):
warnings.warn(
"Header keys must be strings. Passing other types is deprecated and will"
" not be supported in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(key, bytes):
key = key.decode("latin-1")
else:
key = str(key)
return key
_newline_re = re.compile(r"[\r\n]")
def _str_header_value(value: t.Any) -> str:
if isinstance(value, bytes):
warnings.warn(
"Passing bytes as a header value is deprecated and will not be supported in"
" Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
value = value.decode("latin-1")
if not isinstance(value, str):
value = str(value)
if _newline_re.search(value) is not None:
raise ValueError("Header values must not contain newline characters.")
return value
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
__hash__ = None
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
if not isinstance(key, str):
raise KeyError(key)
key = key.upper().replace("-", "_")
if key in {"CONTENT_TYPE", "CONTENT_LENGTH"}:
return self.environ[key]
return self.environ[f"HTTP_{key}"]
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in self.environ.items():
if key.startswith("HTTP_") and key not in {
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
}:
yield key[5:].replace("_", "-").title(), value
elif key in {"CONTENT_TYPE", "CONTENT_LENGTH"} and value:
yield key.replace("_", "-").title(), value
def copy(self):
raise TypeError(f"cannot create {type(self).__name__!r} copies")
# circular dependencies
from .. import http

View file

@ -0,0 +1,109 @@
from collections.abc import Callable
from collections.abc import Iterable
from collections.abc import Iterator
from collections.abc import Mapping
from typing import Literal
from typing import NoReturn
from typing import overload
from typing import TypeVar
from _typeshed import SupportsKeysAndGetItem
from _typeshed.wsgi import WSGIEnvironment
from .mixins import ImmutableHeadersMixin
D = TypeVar("D")
T = TypeVar("T")
class Headers(dict[str, str]):
_list: list[tuple[str, str]]
def __init__(
self,
defaults: Mapping[str, str | Iterable[str]]
| Iterable[tuple[str, str]]
| None = None,
) -> None: ...
@overload
def __getitem__(self, key: str) -> str: ...
@overload
def __getitem__(self, key: int) -> tuple[str, str]: ...
@overload
def __getitem__(self, key: slice) -> Headers: ...
@overload
def __getitem__(self, key: str, _get_mode: Literal[True] = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
@overload # type: ignore
def get(self, key: str, default: str) -> str: ...
@overload
def get(self, key: str, default: str | None = None) -> str | None: ...
@overload
def get(
self, key: str, default: T | None = None, type: Callable[[str], T] = ...
) -> T | None: ...
@overload
def getlist(self, key: str) -> list[str]: ...
@overload
def getlist(self, key: str, type: Callable[[str], T]) -> list[T]: ...
def get_all(self, name: str) -> list[str]: ...
def items( # type: ignore
self, lower: bool = False
) -> Iterator[tuple[str, str]]: ...
def keys(self, lower: bool = False) -> Iterator[str]: ... # type: ignore
def values(self) -> Iterator[str]: ... # type: ignore
def extend(
self,
*args: Mapping[str, str | Iterable[str]] | Iterable[tuple[str, str]],
**kwargs: str | Iterable[str],
) -> None: ...
@overload
def __delitem__(self, key: str | int | slice) -> None: ...
@overload
def __delitem__(self, key: str, _index_operation: Literal[False]) -> None: ...
def remove(self, key: str) -> None: ...
@overload # type: ignore
def pop(self, key: str, default: str | None = None) -> str: ...
@overload
def pop(
self, key: int | None = None, default: tuple[str, str] | None = None
) -> tuple[str, str]: ...
def popitem(self) -> tuple[str, str]: ...
def __contains__(self, key: str) -> bool: ... # type: ignore
def has_key(self, key: str) -> bool: ...
def __iter__(self) -> Iterator[tuple[str, str]]: ... # type: ignore
def add(self, _key: str, _value: str, **kw: str) -> None: ...
def _validate_value(self, value: str) -> None: ...
def add_header(self, _key: str, _value: str, **_kw: str) -> None: ...
def clear(self) -> None: ...
def set(self, _key: str, _value: str, **kw: str) -> None: ...
def setlist(self, key: str, values: Iterable[str]) -> None: ...
def setdefault(self, key: str, default: str) -> str: ...
def setlistdefault(self, key: str, default: Iterable[str]) -> None: ...
@overload
def __setitem__(self, key: str, value: str) -> None: ...
@overload
def __setitem__(self, key: int, value: tuple[str, str]) -> None: ...
@overload
def __setitem__(self, key: slice, value: Iterable[tuple[str, str]]) -> None: ...
@overload
def update(
self, __m: SupportsKeysAndGetItem[str, str], **kwargs: str | Iterable[str]
) -> None: ...
@overload
def update(
self, __m: Iterable[tuple[str, str]], **kwargs: str | Iterable[str]
) -> None: ...
@overload
def update(self, **kwargs: str | Iterable[str]) -> None: ...
def to_wsgi_list(self) -> list[tuple[str, str]]: ...
def copy(self) -> Headers: ...
def __copy__(self) -> Headers: ...
class EnvironHeaders(ImmutableHeadersMixin, Headers):
environ: WSGIEnvironment
def __init__(self, environ: WSGIEnvironment) -> None: ...
def __eq__(self, other: object) -> bool: ...
def __getitem__( # type: ignore
self, key: str, _get_mode: Literal[False] = False
) -> str: ...
def __iter__(self) -> Iterator[tuple[str, str]]: ... # type: ignore
def copy(self) -> NoReturn: ...

View file

@ -0,0 +1,242 @@
from __future__ import annotations
from itertools import repeat
from .._internal import _missing
def is_immutable(self):
raise TypeError(f"{type(self).__name__!r} objects are immutable")
class ImmutableListMixin:
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
def __imul__(self, other):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
def remove(self, item):
is_immutable(self)
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, key=None, reverse=False):
is_immutable(self)
class ImmutableDictMixin:
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super().__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return self.items()
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(self.items(multi=True)),)
def _iter_hashitems(self):
return self.items(multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class ImmutableHeadersMixin:
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key, **kwargs):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def set(self, _key, _value, **kwargs):
is_immutable(self)
def setlist(self, key, values):
is_immutable(self)
def add(self, _key, _value, **kwargs):
is_immutable(self)
def add_header(self, _key, _value, **_kwargs):
is_immutable(self)
def remove(self, key):
is_immutable(self)
def extend(self, *args, **kwargs):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, key=None, default=_missing):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
def setlistdefault(self, key, default):
is_immutable(self)
def _calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
class UpdateDictMixin(dict):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def setdefault(self, key, default=None):
modified = key not in self
rv = super().setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super().pop(key)
else:
rv = super().pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = _calls_update("__setitem__")
__delitem__ = _calls_update("__delitem__")
clear = _calls_update("clear")
popitem = _calls_update("popitem")
update = _calls_update("update")

View file

@ -0,0 +1,97 @@
from collections.abc import Callable
from collections.abc import Hashable
from collections.abc import Iterable
from typing import Any
from typing import NoReturn
from typing import overload
from typing import SupportsIndex
from typing import TypeVar
from _typeshed import SupportsKeysAndGetItem
from .headers import Headers
K = TypeVar("K")
T = TypeVar("T")
V = TypeVar("V")
def is_immutable(self: object) -> NoReturn: ...
class ImmutableListMixin(list[V]):
_hash_cache: int | None
def __hash__(self) -> int: ... # type: ignore
def __delitem__(self, key: SupportsIndex | slice) -> NoReturn: ...
def __iadd__(self, other: t.Any) -> NoReturn: ... # type: ignore
def __imul__(self, other: SupportsIndex) -> NoReturn: ...
def __setitem__(self, key: int | slice, value: V) -> NoReturn: ... # type: ignore
def append(self, value: V) -> NoReturn: ...
def remove(self, value: V) -> NoReturn: ...
def extend(self, values: Iterable[V]) -> NoReturn: ...
def insert(self, pos: SupportsIndex, value: V) -> NoReturn: ...
def pop(self, index: SupportsIndex = -1) -> NoReturn: ...
def reverse(self) -> NoReturn: ...
def sort(
self, key: Callable[[V], Any] | None = None, reverse: bool = False
) -> NoReturn: ...
class ImmutableDictMixin(dict[K, V]):
_hash_cache: int | None
@classmethod
def fromkeys( # type: ignore
cls, keys: Iterable[K], value: V | None = None
) -> ImmutableDictMixin[K, V]: ...
def _iter_hashitems(self) -> Iterable[Hashable]: ...
def __hash__(self) -> int: ... # type: ignore
def setdefault(self, key: K, default: V | None = None) -> NoReturn: ...
def update(self, *args: Any, **kwargs: V) -> NoReturn: ...
def pop(self, key: K, default: V | None = None) -> NoReturn: ... # type: ignore
def popitem(self) -> NoReturn: ...
def __setitem__(self, key: K, value: V) -> NoReturn: ...
def __delitem__(self, key: K) -> NoReturn: ...
def clear(self) -> NoReturn: ...
class ImmutableMultiDictMixin(ImmutableDictMixin[K, V]):
def _iter_hashitems(self) -> Iterable[Hashable]: ...
def add(self, key: K, value: V) -> NoReturn: ...
def popitemlist(self) -> NoReturn: ...
def poplist(self, key: K) -> NoReturn: ...
def setlist(self, key: K, new_list: Iterable[V]) -> NoReturn: ...
def setlistdefault(
self, key: K, default_list: Iterable[V] | None = None
) -> NoReturn: ...
class ImmutableHeadersMixin(Headers):
def __delitem__(self, key: Any, _index_operation: bool = True) -> NoReturn: ...
def __setitem__(self, key: Any, value: Any) -> NoReturn: ...
def set(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
def setlist(self, key: Any, values: Any) -> NoReturn: ...
def add(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
def add_header(self, _key: Any, _value: Any, **_kw: Any) -> NoReturn: ...
def remove(self, key: Any) -> NoReturn: ...
def extend(self, *args: Any, **kwargs: Any) -> NoReturn: ...
def update(self, *args: Any, **kwargs: Any) -> NoReturn: ...
def insert(self, pos: Any, value: Any) -> NoReturn: ...
def pop(self, key: Any = None, default: Any = ...) -> NoReturn: ...
def popitem(self) -> NoReturn: ...
def setdefault(self, key: Any, default: Any) -> NoReturn: ...
def setlistdefault(self, key: Any, default: Any) -> NoReturn: ...
def _calls_update(name: str) -> Callable[[UpdateDictMixin[K, V]], Any]: ...
class UpdateDictMixin(dict[K, V]):
on_update: Callable[[UpdateDictMixin[K, V] | None, None], None]
def setdefault(self, key: K, default: V | None = None) -> V: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: V | T = ...) -> V | T: ...
def __setitem__(self, key: K, value: V) -> None: ...
def __delitem__(self, key: K) -> None: ...
def clear(self) -> None: ...
def popitem(self) -> tuple[K, V]: ...
@overload
def update(self, __m: SupportsKeysAndGetItem[K, V], **kwargs: V) -> None: ...
@overload
def update(self, __m: Iterable[tuple[K, V]], **kwargs: V) -> None: ...
@overload
def update(self, **kwargs: V) -> None: ...

View file

@ -0,0 +1,180 @@
from __future__ import annotations
class IfRange:
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http.http_date(self.date)
if self.etag is not None:
return http.quote_etag(self.etag)
return ""
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
class Range:
"""Represents a ``Range`` header. All methods only support only
bytes as the unit. Stores a list of ranges if given, but the methods
only work if only one range is provided.
:raise ValueError: If the ranges provided are invalid.
.. versionchanged:: 0.15
The ranges passed in are validated.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
for start, end in ranges:
if start is None or (end is not None and (start < 0 or start >= end)):
raise ValueError(f"{(start, end)} is not a valid range.")
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != "bytes" or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if http.is_byte_range_valid(start, end, length):
return start, min(end, length)
return None
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
return None
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(f"{begin}-" if begin >= 0 else str(begin))
else:
ranges.append(f"{begin}-{end - 1}")
return f"{self.units}={','.join(ranges)}"
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range = self.range_for_length(length)
if range is not None:
return f"{self.units} {range[0]}-{range[1] - 1}/{length}"
return None
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
class ContentRange:
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
self.on_update = on_update
self.set(start, stop, length, units)
#: The units to use, usually "bytes"
units = _callback_property("_units")
#: The start point of the range or `None`.
start = _callback_property("_start")
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property("_stop")
#: The length of the range or `None`.
length = _callback_property("_length")
def set(self, start, stop, length=None, units="bytes"):
"""Simple method to update the ranges."""
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ""
if self.length is None:
length = "*"
else:
length = self.length
if self.start is None:
return f"{self.units} */{length}"
return f"{self.units} {self.start}-{self.stop - 1}/{length}"
def __bool__(self):
return self.units is not None
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
# circular dependencies
from .. import http

View file

@ -0,0 +1,57 @@
from collections.abc import Callable
from datetime import datetime
class IfRange:
etag: str | None
date: datetime | None
def __init__(
self, etag: str | None = None, date: datetime | None = None
) -> None: ...
def to_header(self) -> str: ...
class Range:
units: str
ranges: list[tuple[int, int | None]]
def __init__(self, units: str, ranges: list[tuple[int, int | None]]) -> None: ...
def range_for_length(self, length: int | None) -> tuple[int, int] | None: ...
def make_content_range(self, length: int | None) -> ContentRange | None: ...
def to_header(self) -> str: ...
def to_content_range_header(self, length: int | None) -> str | None: ...
def _callback_property(name: str) -> property: ...
class ContentRange:
on_update: Callable[[ContentRange], None] | None
def __init__(
self,
units: str | None,
start: int | None,
stop: int | None,
length: int | None = None,
on_update: Callable[[ContentRange], None] | None = None,
) -> None: ...
@property
def units(self) -> str | None: ...
@units.setter
def units(self, value: str | None) -> None: ...
@property
def start(self) -> int | None: ...
@start.setter
def start(self, value: int | None) -> None: ...
@property
def stop(self) -> int | None: ...
@stop.setter
def stop(self, value: int | None) -> None: ...
@property
def length(self) -> int | None: ...
@length.setter
def length(self, value: int | None) -> None: ...
def set(
self,
start: int | None,
stop: int | None,
length: int | None = None,
units: str | None = "bytes",
) -> None: ...
def unset(self) -> None: ...
def to_header(self) -> str: ...

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,208 @@
from collections.abc import Callable
from collections.abc import Iterable
from collections.abc import Iterator
from collections.abc import Mapping
from typing import Any
from typing import Generic
from typing import Literal
from typing import NoReturn
from typing import overload
from typing import TypeVar
from .mixins import (
ImmutableDictMixin,
ImmutableListMixin,
ImmutableMultiDictMixin,
UpdateDictMixin,
)
D = TypeVar("D")
K = TypeVar("K")
T = TypeVar("T")
V = TypeVar("V")
_CD = TypeVar("_CD", bound="CallbackDict")
def is_immutable(self: object) -> NoReturn: ...
def iter_multi_items(
mapping: Mapping[K, V | Iterable[V]] | Iterable[tuple[K, V]]
) -> Iterator[tuple[K, V]]: ...
class ImmutableList(ImmutableListMixin[V]): ...
class TypeConversionDict(dict[K, V]):
@overload
def get(self, key: K, default: None = ..., type: None = ...) -> V | None: ...
@overload
def get(self, key: K, default: D, type: None = ...) -> D | V: ...
@overload
def get(self, key: K, default: D, type: Callable[[V], T]) -> D | T: ...
@overload
def get(self, key: K, type: Callable[[V], T]) -> T | None: ...
class ImmutableTypeConversionDict(ImmutableDictMixin[K, V], TypeConversionDict[K, V]):
def copy(self) -> TypeConversionDict[K, V]: ...
def __copy__(self) -> ImmutableTypeConversionDict: ...
class MultiDict(TypeConversionDict[K, V]):
def __init__(
self,
mapping: Mapping[K, Iterable[V] | V] | Iterable[tuple[K, V]] | None = None,
) -> None: ...
def __getitem__(self, item: K) -> V: ...
def __setitem__(self, key: K, value: V) -> None: ...
def add(self, key: K, value: V) -> None: ...
@overload
def getlist(self, key: K) -> list[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
def setdefault(self, key: K, default: V | None = None) -> V: ...
def setlistdefault(
self, key: K, default_list: Iterable[V] | None = None
) -> list[V]: ...
def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
def lists(self) -> Iterator[tuple[K, list[V]]]: ...
def values(self) -> Iterator[V]: ... # type: ignore
def listvalues(self) -> Iterator[list[V]]: ...
def copy(self) -> MultiDict[K, V]: ...
def deepcopy(self, memo: Any = None) -> MultiDict[K, V]: ...
@overload
def to_dict(self) -> dict[K, V]: ...
@overload
def to_dict(self, flat: Literal[False]) -> dict[K, list[V]]: ...
def update( # type: ignore
self, mapping: Mapping[K, Iterable[V] | V] | Iterable[tuple[K, V]]
) -> None: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: V | T = ...) -> V | T: ...
def popitem(self) -> tuple[K, V]: ...
def poplist(self, key: K) -> list[V]: ...
def popitemlist(self) -> tuple[K, list[V]]: ...
def __copy__(self) -> MultiDict[K, V]: ...
def __deepcopy__(self, memo: Any) -> MultiDict[K, V]: ...
class _omd_bucket(Generic[K, V]):
prev: _omd_bucket | None
next: _omd_bucket | None
key: K
value: V
def __init__(self, omd: OrderedMultiDict, key: K, value: V) -> None: ...
def unlink(self, omd: OrderedMultiDict) -> None: ...
class OrderedMultiDict(MultiDict[K, V]):
_first_bucket: _omd_bucket | None
_last_bucket: _omd_bucket | None
def __init__(self, mapping: Mapping[K, V] | None = None) -> None: ...
def __eq__(self, other: object) -> bool: ...
def __getitem__(self, key: K) -> V: ...
def __setitem__(self, key: K, value: V) -> None: ...
def __delitem__(self, key: K) -> None: ...
def keys(self) -> Iterator[K]: ... # type: ignore
def __iter__(self) -> Iterator[K]: ...
def values(self) -> Iterator[V]: ... # type: ignore
def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
def lists(self) -> Iterator[tuple[K, list[V]]]: ...
def listvalues(self) -> Iterator[list[V]]: ...
def add(self, key: K, value: V) -> None: ...
@overload
def getlist(self, key: K) -> list[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
def setlistdefault(
self, key: K, default_list: Iterable[V] | None = None
) -> list[V]: ...
def update( # type: ignore
self, mapping: Mapping[K, V] | Iterable[tuple[K, V]]
) -> None: ...
def poplist(self, key: K) -> list[V]: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: V | T = ...) -> V | T: ...
def popitem(self) -> tuple[K, V]: ...
def popitemlist(self) -> tuple[K, list[V]]: ...
class CombinedMultiDict(ImmutableMultiDictMixin[K, V], MultiDict[K, V]): # type: ignore
dicts: list[MultiDict[K, V]]
def __init__(self, dicts: Iterable[MultiDict[K, V]] | None) -> None: ...
@classmethod
def fromkeys(cls, keys: Any, value: Any = None) -> NoReturn: ...
def __getitem__(self, key: K) -> V: ...
@overload # type: ignore
def get(self, key: K) -> V | None: ...
@overload
def get(self, key: K, default: V | T = ...) -> V | T: ...
@overload
def get(
self, key: K, default: T | None = None, type: Callable[[V], T] = ...
) -> T | None: ...
@overload
def getlist(self, key: K) -> list[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
def _keys_impl(self) -> set[K]: ...
def keys(self) -> set[K]: ... # type: ignore
def __iter__(self) -> set[K]: ... # type: ignore
def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
def values(self) -> Iterator[V]: ... # type: ignore
def lists(self) -> Iterator[tuple[K, list[V]]]: ...
def listvalues(self) -> Iterator[list[V]]: ...
def copy(self) -> MultiDict[K, V]: ...
@overload
def to_dict(self) -> dict[K, V]: ...
@overload
def to_dict(self, flat: Literal[False]) -> dict[K, list[V]]: ...
def __contains__(self, key: K) -> bool: ... # type: ignore
def has_key(self, key: K) -> bool: ...
class ImmutableDict(ImmutableDictMixin[K, V], dict[K, V]):
def copy(self) -> dict[K, V]: ...
def __copy__(self) -> ImmutableDict[K, V]: ...
class ImmutableMultiDict( # type: ignore
ImmutableMultiDictMixin[K, V], MultiDict[K, V]
):
def copy(self) -> MultiDict[K, V]: ...
def __copy__(self) -> ImmutableMultiDict[K, V]: ...
class ImmutableOrderedMultiDict( # type: ignore
ImmutableMultiDictMixin[K, V], OrderedMultiDict[K, V]
):
def _iter_hashitems(self) -> Iterator[tuple[int, tuple[K, V]]]: ...
def copy(self) -> OrderedMultiDict[K, V]: ...
def __copy__(self) -> ImmutableOrderedMultiDict[K, V]: ...
class CallbackDict(UpdateDictMixin[K, V], dict[K, V]):
def __init__(
self,
initial: Mapping[K, V] | Iterable[tuple[K, V]] | None = None,
on_update: Callable[[_CD], None] | None = None,
) -> None: ...
class HeaderSet(set[str]):
_headers: list[str]
_set: set[str]
on_update: Callable[[HeaderSet], None] | None
def __init__(
self,
headers: Iterable[str] | None = None,
on_update: Callable[[HeaderSet], None] | None = None,
) -> None: ...
def add(self, header: str) -> None: ...
def remove(self, header: str) -> None: ...
def update(self, iterable: Iterable[str]) -> None: ... # type: ignore
def discard(self, header: str) -> None: ...
def find(self, header: str) -> int: ...
def index(self, header: str) -> int: ...
def clear(self) -> None: ...
def as_set(self, preserve_casing: bool = False) -> set[str]: ...
def to_header(self) -> str: ...
def __getitem__(self, idx: int) -> str: ...
def __delitem__(self, idx: int) -> None: ...
def __setitem__(self, idx: int, value: str) -> None: ...
def __contains__(self, header: str) -> bool: ... # type: ignore
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...

View file

@ -0,0 +1,534 @@
from __future__ import annotations
import getpass
import hashlib
import json
import os
import pkgutil
import re
import sys
import time
import typing as t
import uuid
from contextlib import ExitStack
from io import BytesIO
from itertools import chain
from os.path import basename
from os.path import join
from zlib import adler32
from .._internal import _log
from ..exceptions import NotFound
from ..http import parse_cookie
from ..security import gen_salt
from ..utils import send_file
from ..wrappers.request import Request
from ..wrappers.response import Response
from .console import Console
from .tbtools import DebugFrameSummary
from .tbtools import DebugTraceback
from .tbtools import render_console_html
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin: str) -> str:
return hashlib.sha1(f"{pin} added salt".encode("utf-8", "replace")).hexdigest()[:12]
_machine_id: str | bytes | None = None
def get_machine_id() -> str | bytes | None:
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate() -> str | bytes | None:
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except OSError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except OSError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
if sys.platform == "win32":
import winreg
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
) as rk:
guid: str | bytes
guid_type: int
guid, guid_type = winreg.QueryValueEx(rk, "MachineGuid")
if guid_type == winreg.REG_SZ:
return guid.encode("utf-8")
return guid
except OSError:
pass
return None
_machine_id = _generate()
return _machine_id
class _ConsoleFrame:
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace: dict[str, t.Any]):
self.console = Console(namespace)
self.id = 0
def eval(self, code: str) -> t.Any:
return self.console.eval(code)
def get_pin_and_cookie_name(
app: WSGIApplication,
) -> tuple[str, str] | tuple[None, None]:
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdecimal():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", t.cast(object, app).__class__.__module__)
username: str | None
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", type(app).__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.sha1()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, str):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = f"__wzd{h.hexdigest()[:20]}"
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = f"{int(h.hexdigest(), 16):09d}"[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication:
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The ``evalex`` argument allows evaluating expressions in any frame
of a traceback. This works by preserving each frame with its local
state. Some state, such as context globals, cannot be restored with
the frame by default. When ``evalex`` is enabled,
``environ["werkzeug.debug.preserve_context"]`` will be a callable
that takes a context manager, and can be called multiple times.
Each context manager will be entered before evaluating code in the
frame, then exited again, so they can perform setup and cleanup for
each call.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in this
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
.. versionchanged:: 2.2
Added the ``werkzeug.debug.preserve_context`` environ key.
"""
_pin: str
_pin_cookie: str
def __init__(
self,
app: WSGIApplication,
evalex: bool = False,
request_key: str = "werkzeug.request",
console_path: str = "/console",
console_init_func: t.Callable[[], dict[str, t.Any]] | None = None,
show_hidden_frames: bool = False,
pin_security: bool = True,
pin_logging: bool = True,
) -> None:
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames: dict[int, DebugFrameSummary | _ConsoleFrame] = {}
self.frame_contexts: dict[int, list[t.ContextManager[None]]] = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s", self.pin)
else:
self.pin = None
@property
def pin(self) -> str | None:
if not hasattr(self, "_pin"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin
@pin.setter
def pin(self, value: str) -> None:
self._pin = value
@property
def pin_cookie_name(self) -> str:
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin_cookie
def debug_application(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterator[bytes]:
"""Run the application and conserve the traceback frames."""
contexts: list[t.ContextManager[t.Any]] = []
if self.evalex:
environ["werkzeug.debug.preserve_context"] = contexts.append
app_iter = None
try:
app_iter = self.app(environ, start_response)
yield from app_iter
if hasattr(app_iter, "close"):
app_iter.close()
except Exception as e:
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
tb = DebugTraceback(e, skip=1, hide=not self.show_hidden_frames)
for frame in tb.all_frames:
self.frames[id(frame)] = frame
self.frame_contexts[id(frame)] = contexts
is_trusted = bool(self.check_pin_trust(environ))
html = tb.render_debugger_html(
evalex=self.evalex,
secret=self.secret,
evalex_trusted=is_trusted,
)
response = Response(html, status=500, mimetype="text/html")
try:
yield from response(environ, start_response)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
environ["wsgi.errors"].write("".join(tb.render_traceback_text()))
def execute_command( # type: ignore[return]
self,
request: Request,
command: str,
frame: DebugFrameSummary | _ConsoleFrame,
) -> Response:
"""Execute a command in a console."""
contexts = self.frame_contexts.get(id(frame), [])
with ExitStack() as exit_stack:
for cm in contexts:
exit_stack.enter_context(cm)
return Response(frame.eval(command), mimetype="text/html")
def display_console(self, request: Request) -> Response:
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def get_resource(self, request: Request, filename: str) -> Response:
"""Return a static resource from the shared folder."""
path = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, path)
except OSError:
return NotFound() # type: ignore[return-value]
else:
if data is None:
return NotFound() # type: ignore[return-value]
etag = str(adler32(data) & 0xFFFFFFFF)
return send_file(
BytesIO(data), request.environ, download_name=filename, etag=etag
)
def check_pin_trust(self, environ: WSGIEnvironment) -> bool | None:
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts_str, pin_hash = val.split("|", 1)
try:
ts = int(ts_str)
except ValueError:
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < ts
def _fail_pin_auth(self) -> None:
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request: Request) -> Response:
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
pin = t.cast(str, self.pin)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args["pin"]
if entered_pin.strip().replace("-", "") == pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
f"{int(time.time())}|{hash_pin(pin)}",
httponly=True,
samesite="Strict",
secure=request.is_secure,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self) -> Response:
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s", self.pin)
return Response("")
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
frame = self.frames.get(request.args.get("frm", type=int)) # type: ignore
if cmd == "resource" and arg:
response = self.get_resource(request, arg) # type: ignore
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request) # type: ignore
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request() # type: ignore
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame) # type: ignore
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request) # type: ignore
return response(environ, start_response)

View file

@ -0,0 +1,219 @@
from __future__ import annotations
import code
import sys
import typing as t
from contextvars import ContextVar
from types import CodeType
from markupsafe import escape
from .repr import debug_repr
from .repr import dump
from .repr import helper
_stream: ContextVar[HTMLStringO] = ContextVar("werkzeug.debug.console.stream")
_ipy: ContextVar = ContextVar("werkzeug.debug.console.ipy")
class HTMLStringO:
"""A StringO version that HTML escapes on write."""
def __init__(self) -> None:
self._buffer: list[str] = []
def isatty(self) -> bool:
return False
def close(self) -> None:
pass
def flush(self) -> None:
pass
def seek(self, n: int, mode: int = 0) -> None:
pass
def readline(self) -> str:
if len(self._buffer) == 0:
return ""
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self) -> str:
val = "".join(self._buffer)
del self._buffer[:]
return val
def _write(self, x: str) -> None:
self._buffer.append(x)
def write(self, x: str) -> None:
self._write(escape(x))
def writelines(self, x: t.Iterable[str]) -> None:
self._write(escape("".join(x)))
class ThreadedStream:
"""Thread-local wrapper for sys.stdout for the interactive console."""
@staticmethod
def push() -> None:
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = t.cast(t.TextIO, ThreadedStream())
_stream.set(HTMLStringO())
@staticmethod
def fetch() -> str:
try:
stream = _stream.get()
except LookupError:
return ""
return stream.reset()
@staticmethod
def displayhook(obj: object) -> None:
try:
stream = _stream.get()
except LookupError:
return _displayhook(obj) # type: ignore
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_ipy.get().locals["_"] = obj
stream._write(debug_repr(obj))
def __setattr__(self, name: str, value: t.Any) -> None:
raise AttributeError(f"read only attribute {name}")
def __dir__(self) -> list[str]:
return dir(sys.__stdout__)
def __getattribute__(self, name: str) -> t.Any:
try:
stream = _stream.get()
except LookupError:
stream = sys.__stdout__ # type: ignore[assignment]
return getattr(stream, name)
def __repr__(self) -> str:
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader:
def __init__(self) -> None:
self._storage: dict[int, str] = {}
def register(self, code: CodeType, source: str) -> None:
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code: CodeType) -> str | None:
try:
return self._storage[id(code)]
except KeyError:
return None
class _InteractiveConsole(code.InteractiveInterpreter):
locals: dict[str, t.Any]
def __init__(self, globals: dict[str, t.Any], locals: dict[str, t.Any]) -> None:
self.loader = _ConsoleLoader()
locals = {
**globals,
**locals,
"dump": dump,
"help": helper,
"__loader__": self.loader,
}
super().__init__(locals)
original_compile = self.compile
def compile(source: str, filename: str, symbol: str) -> CodeType | None:
code = original_compile(source, filename, symbol)
if code is not None:
self.loader.register(code, source)
return code
self.compile = compile # type: ignore[assignment]
self.more = False
self.buffer: list[str] = []
def runsource(self, source: str, **kwargs: t.Any) -> str: # type: ignore
source = f"{source.rstrip()}\n"
ThreadedStream.push()
prompt = "... " if self.more else ">>> "
try:
source_to_eval = "".join(self.buffer + [source])
if super().runsource(source_to_eval, "<debugger>", "single"):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return f"{prompt}{escape(source)}{output}"
def runcode(self, code: CodeType) -> None:
try:
exec(code, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self) -> None:
from .tbtools import DebugTraceback
exc = t.cast(BaseException, sys.exc_info()[1])
te = DebugTraceback(exc, skip=1)
sys.stdout._write(te.render_traceback_html()) # type: ignore
def showsyntaxerror(self, filename: str | None = None) -> None:
from .tbtools import DebugTraceback
exc = t.cast(BaseException, sys.exc_info()[1])
te = DebugTraceback(exc, skip=4)
sys.stdout._write(te.render_traceback_html()) # type: ignore
def write(self, data: str) -> None:
sys.stdout.write(data)
class Console:
"""An interactive console."""
def __init__(
self,
globals: dict[str, t.Any] | None = None,
locals: dict[str, t.Any] | None = None,
) -> None:
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code: str) -> str:
_ipy.set(self._ipy)
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout

View file

@ -0,0 +1,283 @@
"""Object representations for debugging purposes. Unlike the default
repr, these expose more information and produce HTML instead of ASCII.
Together with the CSS and JavaScript of the debugger this gives a
colorful and more compact output.
"""
from __future__ import annotations
import codecs
import re
import sys
import typing as t
from collections import deque
from traceback import format_exception_only
from markupsafe import escape
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
HELP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
"""
OBJECT_DUMP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
"""
def debug_repr(obj: object) -> str:
"""Creates a debug repr of an object as HTML string."""
return DebugReprGenerator().repr(obj)
def dump(obj: object = missing) -> None:
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv) # type: ignore
class _Helper:
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self) -> str:
return "Type help(object) for help about object."
def __call__(self, topic: t.Any | None = None) -> None:
if topic is None:
sys.stdout._write(f"<span class=help>{self!r}</span>") # type: ignore
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset() # type: ignore
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = "\n\n".join(paragraphs[1:])
else:
title = "Help"
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {"title": title, "text": text}) # type: ignore
helper = _Helper()
def _add_subclass_info(
inner: str, obj: object, base: t.Type | tuple[t.Type, ...]
) -> str:
if isinstance(base, tuple):
for cls in base:
if type(obj) is cls:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = f'<span class="module">{obj.__class__.__module__}.</span>'
return f"{module}{type(obj).__name__}({inner})"
def _sequence_repr_maker(
left: str, right: str, base: t.Type, limit: int = 8
) -> t.Callable[[DebugReprGenerator, t.Iterable, bool], str]:
def proxy(self: DebugReprGenerator, obj: t.Iterable, recursive: bool) -> str:
if recursive:
return _add_subclass_info(f"{left}...{right}", obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info("".join(buf), obj, base)
return proxy
class DebugReprGenerator:
def __init__(self) -> None:
self._stack: list[t.Any] = []
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.</span>deque([', "])", deque
)
def regex_repr(self, obj: t.Pattern) -> str:
pattern = repr(obj.pattern)
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
pattern = f"r{pattern}"
return f're.compile(<span class="string regex">{pattern}</span>)'
def string_repr(self, obj: str | bytes, limit: int = 70) -> str:
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = "".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, str))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(
self,
d: dict[int, None] | dict[str, int] | dict[str | int, int],
recursive: bool,
limit: int = 5,
) -> str:
if recursive:
return _add_subclass_info("{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(d.items()):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
f' <span class="value">{self.repr(value)}</span></span>'
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info("".join(buf), d, dict)
def object_repr(self, obj: type[dict] | t.Callable | type[list] | None) -> str:
r = repr(obj)
return f'<span class="object">{escape(r)}</span>'
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
if obj is helper:
return f'<span class="help">{helper!r}</span>'
if isinstance(obj, (int, float, complex)):
return f'<span class="number">{obj!r}</span>'
if isinstance(obj, str) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self) -> str:
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception:
info = "?"
return (
'<span class="brokenrepr">'
f"&lt;broken repr ({escape(info.strip())})&gt;</span>"
)
def repr(self, obj: object) -> str:
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj: object) -> str:
repr = None
items: list[tuple[str, str]] | None = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in obj.items():
if not isinstance(key, str):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += f" {object.__repr__(obj)[1:-1]}"
return self.render_object_dump(items, title, repr)
def dump_locals(self, d: dict[str, t.Any]) -> str:
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(
self, items: list[tuple[str, str]], title: str, repr: str | None = None
) -> str:
html_items = []
for key, value in items:
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
"items": "\n".join(html_items),
}

View file

@ -0,0 +1,6 @@
Silk icon set 1.3 by Mark James <mjames@gmail.com>
http://www.famfamfam.com/lab/icons/silk/
License: [CC-BY-2.5](https://creativecommons.org/licenses/by/2.5/)
or [CC-BY-3.0](https://creativecommons.org/licenses/by/3.0/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 507 B

View file

@ -0,0 +1,359 @@
docReady(() => {
if (!EVALEX_TRUSTED) {
initPinBox();
}
// if we are in console mode, show the console.
if (CONSOLE_MODE && EVALEX) {
createInteractiveConsole();
}
const frames = document.querySelectorAll("div.traceback div.frame");
if (EVALEX) {
addConsoleIconToFrames(frames);
}
addEventListenersToElements(document.querySelectorAll("div.detail"), "click", () =>
document.querySelector("div.traceback").scrollIntoView(false)
);
addToggleFrameTraceback(frames);
addToggleTraceTypesOnClick(document.querySelectorAll("h2.traceback"));
addInfoPrompt(document.querySelectorAll("span.nojavascript"));
wrapPlainTraceback();
});
function addToggleFrameTraceback(frames) {
frames.forEach((frame) => {
frame.addEventListener("click", () => {
frame.getElementsByTagName("pre")[0].parentElement.classList.toggle("expanded");
});
})
}
function wrapPlainTraceback() {
const plainTraceback = document.querySelector("div.plain textarea");
const wrapper = document.createElement("pre");
const textNode = document.createTextNode(plainTraceback.textContent);
wrapper.appendChild(textNode);
plainTraceback.replaceWith(wrapper);
}
function initPinBox() {
document.querySelector(".pin-prompt form").addEventListener(
"submit",
function (event) {
event.preventDefault();
const pin = encodeURIComponent(this.pin.value);
const encodedSecret = encodeURIComponent(SECRET);
const btn = this.btn;
btn.disabled = true;
fetch(
`${document.location.pathname}?__debugger__=yes&cmd=pinauth&pin=${pin}&s=${encodedSecret}`
)
.then((res) => res.json())
.then(({auth, exhausted}) => {
if (auth) {
EVALEX_TRUSTED = true;
fadeOut(document.getElementsByClassName("pin-prompt")[0]);
} else {
alert(
`Error: ${
exhausted
? "too many attempts. Restart server to retry."
: "incorrect pin"
}`
);
}
})
.catch((err) => {
alert("Error: Could not verify PIN. Network error?");
console.error(err);
})
.finally(() => (btn.disabled = false));
},
false
);
}
function promptForPin() {
if (!EVALEX_TRUSTED) {
const encodedSecret = encodeURIComponent(SECRET);
fetch(
`${document.location.pathname}?__debugger__=yes&cmd=printpin&s=${encodedSecret}`
);
const pinPrompt = document.getElementsByClassName("pin-prompt")[0];
fadeIn(pinPrompt);
document.querySelector('.pin-prompt input[name="pin"]').focus();
}
}
/**
* Helper function for shell initialization
*/
function openShell(consoleNode, target, frameID) {
promptForPin();
if (consoleNode) {
slideToggle(consoleNode);
return consoleNode;
}
let historyPos = 0;
const history = [""];
const consoleElement = createConsole();
const output = createConsoleOutput();
const form = createConsoleInputForm();
const command = createConsoleInput();
target.parentNode.appendChild(consoleElement);
consoleElement.append(output);
consoleElement.append(form);
form.append(command);
command.focus();
slideToggle(consoleElement);
form.addEventListener("submit", (e) => {
handleConsoleSubmit(e, command, frameID).then((consoleOutput) => {
output.append(consoleOutput);
command.focus();
consoleElement.scrollTo(0, consoleElement.scrollHeight);
const old = history.pop();
history.push(command.value);
if (typeof old !== "undefined") {
history.push(old);
}
historyPos = history.length - 1;
command.value = "";
});
});
command.addEventListener("keydown", (e) => {
if (e.key === "l" && e.ctrlKey) {
output.innerText = "--- screen cleared ---";
} else if (e.key === "ArrowUp" || e.key === "ArrowDown") {
// Handle up arrow and down arrow.
if (e.key === "ArrowUp" && historyPos > 0) {
e.preventDefault();
historyPos--;
} else if (e.key === "ArrowDown" && historyPos < history.length - 1) {
historyPos++;
}
command.value = history[historyPos];
}
return false;
});
return consoleElement;
}
function addEventListenersToElements(elements, event, listener) {
elements.forEach((el) => el.addEventListener(event, listener));
}
/**
* Add extra info
*/
function addInfoPrompt(elements) {
for (let i = 0; i < elements.length; i++) {
elements[i].innerHTML =
"<p>To switch between the interactive traceback and the plaintext " +
'one, you can click on the "Traceback" headline. From the text ' +
"traceback you can also create a paste of it. " +
(!EVALEX
? ""
: "For code execution mouse-over the frame you want to debug and " +
"click on the console icon on the right side." +
"<p>You can execute arbitrary Python code in the stack frames and " +
"there are some extra helpers available for introspection:" +
"<ul><li><code>dump()</code> shows all variables in the frame" +
"<li><code>dump(obj)</code> dumps all that's known about the object</ul>");
elements[i].classList.remove("nojavascript");
}
}
function addConsoleIconToFrames(frames) {
for (let i = 0; i < frames.length; i++) {
let consoleNode = null;
const target = frames[i];
const frameID = frames[i].id.substring(6);
for (let j = 0; j < target.getElementsByTagName("pre").length; j++) {
const img = createIconForConsole();
img.addEventListener("click", (e) => {
e.stopPropagation();
consoleNode = openShell(consoleNode, target, frameID);
return false;
});
target.getElementsByTagName("pre")[j].append(img);
}
}
}
function slideToggle(target) {
target.classList.toggle("active");
}
/**
* toggle traceback types on click.
*/
function addToggleTraceTypesOnClick(elements) {
for (let i = 0; i < elements.length; i++) {
elements[i].addEventListener("click", () => {
document.querySelector("div.traceback").classList.toggle("hidden");
document.querySelector("div.plain").classList.toggle("hidden");
});
elements[i].style.cursor = "pointer";
document.querySelector("div.plain").classList.toggle("hidden");
}
}
function createConsole() {
const consoleNode = document.createElement("pre");
consoleNode.classList.add("console");
consoleNode.classList.add("active");
return consoleNode;
}
function createConsoleOutput() {
const output = document.createElement("div");
output.classList.add("output");
output.innerHTML = "[console ready]";
return output;
}
function createConsoleInputForm() {
const form = document.createElement("form");
form.innerHTML = "&gt;&gt;&gt; ";
return form;
}
function createConsoleInput() {
const command = document.createElement("input");
command.type = "text";
command.setAttribute("autocomplete", "off");
command.setAttribute("spellcheck", false);
command.setAttribute("autocapitalize", "off");
command.setAttribute("autocorrect", "off");
return command;
}
function createIconForConsole() {
const img = document.createElement("img");
img.setAttribute("src", "?__debugger__=yes&cmd=resource&f=console.png");
img.setAttribute("title", "Open an interactive python shell in this frame");
return img;
}
function createExpansionButtonForConsole() {
const expansionButton = document.createElement("a");
expansionButton.setAttribute("href", "#");
expansionButton.setAttribute("class", "toggle");
expansionButton.innerHTML = "&nbsp;&nbsp;";
return expansionButton;
}
function createInteractiveConsole() {
const target = document.querySelector("div.console div.inner");
while (target.firstChild) {
target.removeChild(target.firstChild);
}
openShell(null, target, 0);
}
function handleConsoleSubmit(e, command, frameID) {
// Prevent page from refreshing.
e.preventDefault();
return new Promise((resolve) => {
// Get input command.
const cmd = command.value;
// Setup GET request.
const urlPath = "";
const params = {
__debugger__: "yes",
cmd: cmd,
frm: frameID,
s: SECRET,
};
const paramString = Object.keys(params)
.map((key) => {
return "&" + encodeURIComponent(key) + "=" + encodeURIComponent(params[key]);
})
.join("");
fetch(urlPath + "?" + paramString)
.then((res) => {
return res.text();
})
.then((data) => {
const tmp = document.createElement("div");
tmp.innerHTML = data;
resolve(tmp);
// Handle expandable span for long list outputs.
// Example to test: list(range(13))
let wrapperAdded = false;
const wrapperSpan = document.createElement("span");
const expansionButton = createExpansionButtonForConsole();
tmp.querySelectorAll("span.extended").forEach((spanToWrap) => {
const parentDiv = spanToWrap.parentNode;
if (!wrapperAdded) {
parentDiv.insertBefore(wrapperSpan, spanToWrap);
wrapperAdded = true;
}
parentDiv.removeChild(spanToWrap);
wrapperSpan.append(spanToWrap);
spanToWrap.hidden = true;
expansionButton.addEventListener("click", () => {
spanToWrap.hidden = !spanToWrap.hidden;
expansionButton.classList.toggle("open");
return false;
});
});
// Add expansion button at end of wrapper.
if (wrapperAdded) {
wrapperSpan.append(expansionButton);
}
})
.catch((err) => {
console.error(err);
});
return false;
});
}
function fadeOut(element) {
element.style.opacity = 1;
(function fade() {
element.style.opacity -= 0.1;
if (element.style.opacity < 0) {
element.style.display = "none";
} else {
requestAnimationFrame(fade);
}
})();
}
function fadeIn(element, display) {
element.style.opacity = 0;
element.style.display = display || "block";
(function fade() {
let val = parseFloat(element.style.opacity) + 0.1;
if (val <= 1) {
element.style.opacity = val;
requestAnimationFrame(fade);
}
})();
}
function docReady(fn) {
if (document.readyState === "complete" || document.readyState === "interactive") {
setTimeout(fn, 1);
} else {
document.addEventListener("DOMContentLoaded", fn);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 B

View file

@ -0,0 +1,150 @@
body, input { font-family: sans-serif; color: #000; text-align: center;
margin: 1em; padding: 0; font-size: 15px; }
h1, h2, h3 { font-weight: normal; }
input { background-color: #fff; margin: 0; text-align: left;
outline: none !important; }
input[type="submit"] { padding: 3px 6px; }
a { color: #11557C; }
a:hover { color: #177199; }
pre, code,
textarea { font-family: monospace; font-size: 14px; }
div.debugger { text-align: left; padding: 12px; margin: auto;
background-color: white; }
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
div.detail { cursor: pointer; }
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
font-family: monospace; }
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
color: #86989B; }
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
background-color: #11557C; color: white; }
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
div.plain p { margin: 0; }
div.plain textarea,
div.plain pre { margin: 10px 0 0 0; padding: 4px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.plain textarea { width: 99%; height: 300px; }
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.traceback .library .current { background: white; color: #555; }
div.traceback .expanded .current { background: #E8EFF0; color: black; }
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
div.traceback div.source.expanded pre + pre { border-top: none; }
div.traceback span.ws { display: none; }
div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
div.traceback div.source.expanded pre.before,
div.traceback div.source.expanded pre.after {
display: block;
}
div.traceback div.source.expanded span.ws {
display: inline;
}
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
div.traceback img:hover { background-color: #ddd; cursor: pointer;
border-color: #BFDDE0; }
div.traceback pre:hover img { display: block; }
div.traceback cite.filename { font-style: normal; color: #3B666B; }
pre.console { border: 1px solid #ccc; background: white!important;
color: black; padding: 5px!important;
margin: 3px 0 0 0!important; cursor: default!important;
max-height: 400px; overflow: auto; }
pre.console form { color: #555; }
pre.console input { background-color: transparent; color: #555;
width: 90%; font-family: monospace; font-size: 14px;
border: none!important; }
span.string { color: #30799B; }
span.number { color: #9C1A1C; }
span.help { color: #3A7734; }
span.object { color: #485F6E; }
span.extended { opacity: 0.5; }
span.extended:hover { opacity: 1; }
a.toggle { text-decoration: none; background-repeat: no-repeat;
background-position: center center;
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
a.toggle:hover { background-color: #444; }
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
pre.console div.traceback,
pre.console div.box { margin: 5px 10px; white-space: normal;
border: 1px solid #11557C; padding: 10px;
font-family: sans-serif; }
pre.console div.box h3,
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
background: #11557C; color: white; }
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
margin: 20px -10px -10px -10px;
padding: 10px; border-top: 1px solid #BFDDE0;
background: #E8EFF0; }
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
pre.console div.box table { margin-top: 6px; }
pre.console div.box pre { border: none; }
pre.console div.box pre.help { background-color: white; }
pre.console div.box pre.help:hover { cursor: default; }
pre.console table tr { vertical-align: top; }
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
div.traceback pre, div.console pre {
white-space: pre-wrap; /* css-3 should we be so lucky... */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 ?? */
white-space: -o-pre-wrap; /* Opera 7 ?? */
word-wrap: break-word; /* Internet Explorer 5.5+ */
_white-space: pre; /* IE only hack to re-specify in
addition to word-wrap */
}
div.pin-prompt {
position: absolute;
display: none;
top: 0;
bottom: 0;
left: 0;
right: 0;
background: rgba(255, 255, 255, 0.8);
}
div.pin-prompt .inner {
background: #eee;
padding: 10px 50px;
width: 350px;
margin: 10% auto 0 auto;
border: 1px solid #ccc;
border-radius: 2px;
}
div.exc-divider {
margin: 0.7em 0 0 -1em;
padding: 0.5em;
background: #11557C;
color: #ddd;
border: 1px solid #ddd;
}
.console.active {
max-height: 0!important;
display: none;
}
.hidden {
display: none;
}

View file

@ -0,0 +1,437 @@
from __future__ import annotations
import itertools
import linecache
import os
import re
import sys
import sysconfig
import traceback
import typing as t
from markupsafe import escape
from ..utils import cached_property
from .console import Console
HEADER = """\
<!doctype html>
<html lang=en>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css">
<link rel="shortcut icon"
href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
<script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
<script>
var CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = """\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ """\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<p>
This is the Copy/Paste friendly version of the traceback.
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ """\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = """\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = """\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
def _process_traceback(
exc: BaseException,
te: traceback.TracebackException | None = None,
*,
skip: int = 0,
hide: bool = True,
) -> traceback.TracebackException:
if te is None:
te = traceback.TracebackException.from_exception(exc, lookup_lines=False)
# Get the frames the same way StackSummary.extract did, in order
# to match each frame with the FrameSummary to augment.
frame_gen = traceback.walk_tb(exc.__traceback__)
limit = getattr(sys, "tracebacklimit", None)
if limit is not None:
if limit < 0:
limit = 0
frame_gen = itertools.islice(frame_gen, limit)
if skip:
frame_gen = itertools.islice(frame_gen, skip, None)
del te.stack[:skip]
new_stack: list[DebugFrameSummary] = []
hidden = False
# Match each frame with the FrameSummary that was generated.
# Hide frames using Paste's __traceback_hide__ rules. Replace
# all visible FrameSummary with DebugFrameSummary.
for (f, _), fs in zip(frame_gen, te.stack):
if hide:
hide_value = f.f_locals.get("__traceback_hide__", False)
if hide_value in {"before", "before_and_this"}:
new_stack = []
hidden = False
if hide_value == "before_and_this":
continue
elif hide_value in {"reset", "reset_and_this"}:
hidden = False
if hide_value == "reset_and_this":
continue
elif hide_value in {"after", "after_and_this"}:
hidden = True
if hide_value == "after_and_this":
continue
elif hide_value or hidden:
continue
frame_args: dict[str, t.Any] = {
"filename": fs.filename,
"lineno": fs.lineno,
"name": fs.name,
"locals": f.f_locals,
"globals": f.f_globals,
}
if hasattr(fs, "colno"):
frame_args["colno"] = fs.colno
frame_args["end_colno"] = fs.end_colno # type: ignore[attr-defined]
new_stack.append(DebugFrameSummary(**frame_args))
# The codeop module is used to compile code from the interactive
# debugger. Hide any codeop frames from the bottom of the traceback.
while new_stack:
module = new_stack[0].global_ns.get("__name__")
if module is None:
module = new_stack[0].local_ns.get("__name__")
if module == "codeop":
del new_stack[0]
else:
break
te.stack[:] = new_stack
if te.__context__:
context_exc = t.cast(BaseException, exc.__context__)
te.__context__ = _process_traceback(context_exc, te.__context__, hide=hide)
if te.__cause__:
cause_exc = t.cast(BaseException, exc.__cause__)
te.__cause__ = _process_traceback(cause_exc, te.__cause__, hide=hide)
return te
class DebugTraceback:
__slots__ = ("_te", "_cache_all_tracebacks", "_cache_all_frames")
def __init__(
self,
exc: BaseException,
te: traceback.TracebackException | None = None,
*,
skip: int = 0,
hide: bool = True,
) -> None:
self._te = _process_traceback(exc, te, skip=skip, hide=hide)
def __str__(self) -> str:
return f"<{type(self).__name__} {self._te}>"
@cached_property
def all_tracebacks(
self,
) -> list[tuple[str | None, traceback.TracebackException]]:
out = []
current = self._te
while current is not None:
if current.__cause__ is not None:
chained_msg = (
"The above exception was the direct cause of the"
" following exception"
)
chained_exc = current.__cause__
elif current.__context__ is not None and not current.__suppress_context__:
chained_msg = (
"During handling of the above exception, another"
" exception occurred"
)
chained_exc = current.__context__
else:
chained_msg = None
chained_exc = None
out.append((chained_msg, current))
current = chained_exc
return out
@cached_property
def all_frames(self) -> list[DebugFrameSummary]:
return [
f for _, te in self.all_tracebacks for f in te.stack # type: ignore[misc]
]
def render_traceback_text(self) -> str:
return "".join(self._te.format())
def render_traceback_html(self, include_title: bool = True) -> str:
library_frames = [f.is_library for f in self.all_frames]
mark_library = 0 < sum(library_frames) < len(library_frames)
rows = []
if not library_frames:
classes = "traceback noframe-traceback"
else:
classes = "traceback"
for msg, current in reversed(self.all_tracebacks):
row_parts = []
if msg is not None:
row_parts.append(f'<li><div class="exc-divider">{msg}:</div>')
for frame in current.stack:
frame = t.cast(DebugFrameSummary, frame)
info = f' title="{escape(frame.info)}"' if frame.info else ""
row_parts.append(f"<li{info}>{frame.render_html(mark_library)}")
rows.append("\n".join(row_parts))
is_syntax_error = issubclass(self._te.exc_type, SyntaxError)
if include_title:
if is_syntax_error:
title = "Syntax Error"
else:
title = "Traceback <em>(most recent call last)</em>:"
else:
title = ""
exc_full = escape("".join(self._te.format_exception_only()))
if is_syntax_error:
description = f"<pre class=syntaxerror>{exc_full}</pre>"
else:
description = f"<blockquote>{exc_full}</blockquote>"
return SUMMARY_HTML % {
"classes": classes,
"title": f"<h3>{title}</h3>",
"frames": "\n".join(rows),
"description": description,
}
def render_debugger_html(
self, evalex: bool, secret: str, evalex_trusted: bool
) -> str:
exc_lines = list(self._te.format_exception_only())
plaintext = "".join(self._te.format())
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": escape(exc_lines[0]),
"exception": escape("".join(exc_lines)),
"exception_type": escape(self._te.exc_type.__name__),
"summary": self.render_traceback_html(include_title=False),
"plaintext": escape(plaintext),
"plaintext_cs": re.sub("-{2,}", "-", plaintext),
"secret": secret,
}
class DebugFrameSummary(traceback.FrameSummary):
"""A :class:`traceback.FrameSummary` that can evaluate code in the
frame's namespace.
"""
__slots__ = (
"local_ns",
"global_ns",
"_cache_info",
"_cache_is_library",
"_cache_console",
)
def __init__(
self,
*,
locals: dict[str, t.Any],
globals: dict[str, t.Any],
**kwargs: t.Any,
) -> None:
super().__init__(locals=None, **kwargs)
self.local_ns = locals
self.global_ns = globals
@cached_property
def info(self) -> str | None:
return self.local_ns.get("__traceback_info__")
@cached_property
def is_library(self) -> bool:
return any(
self.filename.startswith((path, os.path.realpath(path)))
for path in sysconfig.get_paths().values()
)
@cached_property
def console(self) -> Console:
return Console(self.global_ns, self.local_ns)
def eval(self, code: str) -> t.Any:
return self.console.eval(code)
def render_html(self, mark_library: bool) -> str:
context = 5
lines = linecache.getlines(self.filename)
line_idx = self.lineno - 1 # type: ignore[operator]
start_idx = max(0, line_idx - context)
stop_idx = min(len(lines), line_idx + context + 1)
rendered_lines = []
def render_line(line: str, cls: str) -> None:
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
colno = getattr(self, "colno", 0)
end_colno = getattr(self, "end_colno", 0)
if cls == "current" and colno and end_colno:
arrow = (
f'\n<span class="ws">{" " * prefix}</span>'
f'{" " * (colno - prefix)}{"^" * (end_colno - colno)}'
)
else:
arrow = ""
rendered_lines.append(
f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
f"{escape(stripped_line) if stripped_line else ' '}"
f"{arrow if arrow else ''}</pre>"
)
if lines:
for line in lines[start_idx:line_idx]:
render_line(line, "before")
render_line(lines[line_idx], "current")
for line in lines[line_idx + 1 : stop_idx]:
render_line(line, "after")
return FRAME_HTML % {
"id": id(self),
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.name),
"lines": "\n".join(rendered_lines),
"library": "library" if mark_library and self.is_library else "",
}
def render_console_html(secret: str, evalex_trusted: bool) -> str:
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
}

View file

@ -0,0 +1,879 @@
"""Implements a number of Python exceptions which can be raised from within
a view to trigger a standard HTTP non-200 response.
Usage Example
-------------
.. code-block:: python
from werkzeug.wrappers.request import Request
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@Request.application
def application(request):
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. However, they are not Werkzeug response objects. You
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you may have to pass an environ (WSGI) or scope
(ASGI) to ``get_response()`` because some errors fetch additional
information relating to the request.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error:
.. code-block:: python
@Request.application
def application(request):
try:
return view(request)
except NotFound as e:
return not_found(request)
except HTTPException as e:
return e
"""
from __future__ import annotations
import typing as t
from datetime import datetime
from markupsafe import escape
from markupsafe import Markup
from ._internal import _get_environ
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
from .datastructures import WWWAuthenticate
from .sansio.response import Response
from .wrappers.request import Request as WSGIRequest
from .wrappers.response import Response as WSGIResponse
class HTTPException(Exception):
"""The base class for all HTTP exceptions. This exception can be called as a WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
.. versionchanged:: 2.1
Removed the ``wrap`` class method.
"""
code: int | None = None
description: str | None = None
def __init__(
self,
description: str | None = None,
response: Response | None = None,
) -> None:
super().__init__()
if description is not None:
self.description = description
self.response = response
@property
def name(self) -> str:
"""The status name."""
from .http import HTTP_STATUS_CODES
return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore
def get_description(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> str:
"""Get the description."""
if self.description is None:
description = ""
else:
description = self.description
description = escape(description).replace("\n", Markup("<br>"))
return f"<p>{description}</p>"
def get_body(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> str:
"""Get the HTML body."""
return (
"<!doctype html>\n"
"<html lang=en>\n"
f"<title>{self.code} {escape(self.name)}</title>\n"
f"<h1>{escape(self.name)}</h1>\n"
f"{self.get_description(environ)}\n"
)
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> list[tuple[str, str]]:
"""Get a list of headers."""
return [("Content-Type", "text/html; charset=utf-8")]
def get_response(
self,
environ: WSGIEnvironment | WSGIRequest | None = None,
scope: dict | None = None,
) -> Response:
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
from .wrappers.response import Response as WSGIResponse # noqa: F811
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ, scope)
return WSGIResponse(self.get_body(environ, scope), self.code, headers)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = t.cast("WSGIResponse", self.get_response(environ))
return response(environ, start_response)
def __str__(self) -> str:
code = self.code if self.code is not None else "???"
return f"{code} {self.name}: {self.description}"
def __repr__(self) -> str:
code = self.code if self.code is not None else "???"
return f"<{type(self).__name__} '{code}: {self.name}'>"
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
"The browser (or proxy) sent a request that this server could "
"not understand."
)
class BadRequestKeyError(BadRequest, KeyError):
"""An exception that is used to signal both a :exc:`KeyError` and a
:exc:`BadRequest`. Used by many of the datastructures.
"""
_description = BadRequest.description
#: Show the KeyError along with the HTTP error message in the
#: response. This should be disabled in production, but can be
#: useful in a debug mode.
show_exception = False
def __init__(self, arg: str | None = None, *args: t.Any, **kwargs: t.Any):
super().__init__(*args, **kwargs)
if arg is None:
KeyError.__init__(self)
else:
KeyError.__init__(self, arg)
@property # type: ignore
def description(self) -> str:
if self.show_exception:
return (
f"{self._description}\n"
f"{KeyError.__name__}: {KeyError.__str__(self)}"
)
return self._description
@description.setter
def description(self, value: str) -> None:
self._description = value
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* ``Unauthorized``
Raise if the user is not authorized to access a resource.
The ``www_authenticate`` argument should be used to set the
``WWW-Authenticate`` header. This is used for HTTP basic auth and
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
to create correctly formatted values. Strictly speaking a 401
response is invalid if it doesn't provide at least one value for
this header, although real clients typically don't care.
:param description: Override the default message used for the body
of the response.
:param www-authenticate: A single value, or list of values, for the
WWW-Authenticate header(s).
.. versionchanged:: 2.0
Serialize multiple ``www_authenticate`` items into multiple
``WWW-Authenticate`` headers, rather than joining them
into a single value, for better interoperability.
.. versionchanged:: 0.15.3
If the ``www_authenticate`` argument is not set, the
``WWW-Authenticate`` header is not set.
.. versionchanged:: 0.15.3
The ``response`` argument was restored.
.. versionchanged:: 0.15.1
``description`` was moved back as the first argument, restoring
its previous position.
.. versionchanged:: 0.15.0
``www_authenticate`` was added as the first argument, ahead of
``description``.
"""
code = 401
description = (
"The server could not verify that you are authorized to access"
" the URL requested. You either supplied the wrong credentials"
" (e.g. a bad password), or your browser doesn't understand"
" how to supply the credentials required."
)
def __init__(
self,
description: str | None = None,
response: Response | None = None,
www_authenticate: None | (WWWAuthenticate | t.Iterable[WWWAuthenticate]) = None,
) -> None:
super().__init__(description, response)
from .datastructures import WWWAuthenticate
if isinstance(www_authenticate, WWWAuthenticate):
www_authenticate = (www_authenticate,)
self.www_authenticate = www_authenticate
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.www_authenticate:
headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate)
return headers
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
"You don't have the permission to access the requested"
" resource. It is either read-protected or not readable by the"
" server."
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
"The requested URL was not found on the server. If you entered"
" the URL manually please check your spelling and try again."
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = "The method is not allowed for the requested URL."
def __init__(
self,
valid_methods: t.Iterable[str] | None = None,
description: str | None = None,
response: Response | None = None,
) -> None:
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
super().__init__(description=description, response=response)
self.valid_methods = valid_methods
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.valid_methods:
headers.append(("Allow", ", ".join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
"The resource identified by the request is only capable of"
" generating response entities which have content"
" characteristics not acceptable according to the accept"
" headers sent in the request."
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
"The server closed the network connection because the browser"
" didn't finish the request within the specified time."
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
"A conflict happened while processing the request. The"
" resource might have been modified while the request was being"
" processed."
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
"The requested URL is no longer available on this server and"
" there is no forwarding address. If you followed a link from a"
" foreign page, please contact the author of this page."
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
"A request with this method requires a valid <code>Content-"
"Length</code> header."
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
"The precondition on the request for the URL failed positive evaluation."
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = "The data value transmitted exceeds the capacity limit."
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
"The length of the requested URL exceeds the capacity limit for"
" this server. The request cannot be processed."
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
"The server does not support the media type transmitted in the request."
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for an invalid part of the file.
.. versionadded:: 0.7
"""
code = 416
description = "The server cannot provide the requested range."
def __init__(
self,
length: int | None = None,
units: str = "bytes",
description: str | None = None,
response: Response | None = None,
) -> None:
"""Takes an optional `Content-Range` header value based on ``length``
parameter.
"""
super().__init__(description=description, response=response)
self.length = length
self.units = units
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.length is not None:
headers.append(("Content-Range", f"{self.units} */{self.length}"))
return headers
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = "The server could not meet the requirements of the Expect header"
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = "This server is a teapot, not a coffee machine"
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
"The request was well-formed but was unable to be followed due"
" to semantic errors."
)
class Locked(HTTPException):
"""*423* `Locked`
Used if the resource that is being accessed is locked.
"""
code = 423
description = "The resource that is being accessed is locked."
class FailedDependency(HTTPException):
"""*424* `Failed Dependency`
Used if the method could not be performed on the resource
because the requested action depended on another action and that action failed.
"""
code = 424
description = (
"The method could not be performed on the resource because the"
" requested action depended on another action and that action"
" failed."
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
"This request is required to be conditional; try using"
' "If-Match" or "If-Unmodified-Since".'
)
class _RetryAfter(HTTPException):
"""Adds an optional ``retry_after`` parameter which will set the
``Retry-After`` header. May be an :class:`int` number of seconds or
a :class:`~datetime.datetime`.
"""
def __init__(
self,
description: str | None = None,
response: Response | None = None,
retry_after: datetime | int | None = None,
) -> None:
super().__init__(description, response)
self.retry_after = retry_after
def get_headers(
self,
environ: WSGIEnvironment | None = None,
scope: dict | None = None,
) -> list[tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.retry_after:
if isinstance(self.retry_after, datetime):
from .http import http_date
value = http_date(self.retry_after)
else:
value = str(self.retry_after)
headers.append(("Retry-After", value))
return headers
class TooManyRequests(_RetryAfter):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives
responses, and this request exceeds that rate. (The server may use
any convenient method to identify users and their request rates).
The server may include a "Retry-After" header to indicate how long
the user should wait before retrying.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 429
description = "This user has exceeded an allotted request count. Try again later."
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = "One or more header fields exceeds the maximum size."
class UnavailableForLegalReasons(HTTPException):
"""*451* `Unavailable For Legal Reasons`
This status code indicates that the server is denying access to the
resource as a consequence of a legal demand.
"""
code = 451
description = "Unavailable for legal reasons."
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
.. versionchanged:: 1.0.0
Added the :attr:`original_exception` attribute.
"""
code = 500
description = (
"The server encountered an internal error and was unable to"
" complete your request. Either the server is overloaded or"
" there is an error in the application."
)
def __init__(
self,
description: str | None = None,
response: Response | None = None,
original_exception: BaseException | None = None,
) -> None:
#: The original exception that caused this 500 error. Can be
#: used by frameworks to provide context when handling
#: unexpected errors.
self.original_exception = original_exception
super().__init__(description=description, response=response)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = "The server does not support the action requested by the browser."
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
"The proxy server received an invalid response from an upstream server."
)
class ServiceUnavailable(_RetryAfter):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily
unavailable.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 503
description = (
"The server is temporarily unable to service your request due"
" to maintenance downtime or capacity problems. Please try"
" again later."
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = "The connection to an upstream server timed out."
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
"The server does not support the HTTP protocol version used in the request."
)
default_exceptions: dict[int, type[HTTPException]] = {}
def _find_exceptions() -> None:
for obj in globals().values():
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter:
"""When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(
self,
mapping: dict[int, type[HTTPException]] | None = None,
extra: dict[int, type[HTTPException]] | None = None,
) -> None:
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(
self, code: int | Response, *args: t.Any, **kwargs: t.Any
) -> t.NoReturn:
from .sansio.response import Response
if isinstance(code, Response):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError(f"no exception for {code!r}")
raise self.mapping[code](*args, **kwargs)
def abort(status: int | Response, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
application.
If a status code is given, it will be looked up in the list of
exceptions and will raise that exception. If passed a WSGI application,
it will wrap it in a proxy WSGI exception and raise that::
abort(404) # 404 Not Found
abort(Response('Hello World'))
"""
_aborter(status, *args, **kwargs)
_aborter: Aborter = Aborter()

View file

@ -0,0 +1,547 @@
from __future__ import annotations
import typing as t
import warnings
from io import BytesIO
from urllib.parse import parse_qsl
from ._internal import _plain_int
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .exceptions import RequestEntityTooLarge
from .http import parse_options_header
from .sansio.multipart import Data
from .sansio.multipart import Epilogue
from .sansio.multipart import Field
from .sansio.multipart import File
from .sansio.multipart import MultipartDecoder
from .sansio.multipart import NeedData
from .wsgi import get_content_length
from .wsgi import get_input_stream
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None # type: ignore
if t.TYPE_CHECKING:
import typing as te
from _typeshed.wsgi import WSGIEnvironment
t_parse_result = t.Tuple[t.IO[bytes], MultiDict, MultiDict]
class TStreamFactory(te.Protocol):
def __call__(
self,
total_content_length: int | None,
content_type: str | None,
filename: str | None,
content_length: int | None = None,
) -> t.IO[bytes]:
...
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def default_stream_factory(
total_content_length: int | None,
content_type: str | None,
filename: str | None,
content_length: int | None = None,
) -> t.IO[bytes]:
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return t.cast(t.IO[bytes], SpooledTemporaryFile(max_size=max_size, mode="rb+"))
elif total_content_length is None or total_content_length > max_size:
return t.cast(t.IO[bytes], TemporaryFile("rb+"))
return BytesIO()
def parse_form_data(
environ: WSGIEnvironment,
stream_factory: TStreamFactory | None = None,
charset: str | None = None,
errors: str | None = None,
max_form_memory_size: int | None = None,
max_content_length: int | None = None,
cls: type[MultiDict] | None = None,
silent: bool = True,
*,
max_form_parts: int | None = None,
) -> t_parse_result:
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:param max_form_parts: The maximum number of multipart parts to be parsed. If this
is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
:return: A tuple in the form ``(stream, form, files)``.
.. versionchanged:: 2.3
Added the ``max_form_parts`` parameter.
.. versionchanged:: 2.3
The ``charset`` and ``errors`` parameters are deprecated and will be removed in
Werkzeug 3.0.
.. versionadded:: 0.5.1
Added the ``silent`` parameter.
.. versionadded:: 0.5
Added the ``max_form_memory_size``, ``max_content_length``, and ``cls``
parameters.
"""
return FormDataParser(
stream_factory=stream_factory,
charset=charset,
errors=errors,
max_form_memory_size=max_form_memory_size,
max_content_length=max_content_length,
max_form_parts=max_form_parts,
silent=silent,
cls=cls,
).parse_from_environ(environ)
class FormDataParser:
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:param max_form_parts: The maximum number of multipart parts to be parsed. If this
is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
.. versionchanged:: 2.3
The ``charset`` and ``errors`` parameters are deprecated and will be removed in
Werkzeug 3.0.
.. versionchanged:: 2.3
The ``parse_functions`` attribute and ``get_parse_func`` methods are deprecated
and will be removed in Werkzeug 3.0.
.. versionchanged:: 2.2.3
Added the ``max_form_parts`` parameter.
.. versionadded:: 0.8
"""
def __init__(
self,
stream_factory: TStreamFactory | None = None,
charset: str | None = None,
errors: str | None = None,
max_form_memory_size: int | None = None,
max_content_length: int | None = None,
cls: type[MultiDict] | None = None,
silent: bool = True,
*,
max_form_parts: int | None = None,
) -> None:
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
if charset is not None:
warnings.warn(
"The 'charset' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
charset = "utf-8"
self.charset = charset
if errors is not None:
warnings.warn(
"The 'errors' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
errors = "replace"
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
self.max_form_parts = max_form_parts
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(
self, mimetype: str, options: dict[str, str]
) -> None | (
t.Callable[
[FormDataParser, t.IO[bytes], str, int | None, dict[str, str]],
t_parse_result,
]
):
warnings.warn(
"The 'get_parse_func' method is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
if mimetype == "multipart/form-data":
return type(self)._parse_multipart
elif mimetype == "application/x-www-form-urlencoded":
return type(self)._parse_urlencoded
elif mimetype == "application/x-url-encoded":
warnings.warn(
"The 'application/x-url-encoded' mimetype is invalid, and will not be"
" treated as 'application/x-www-form-urlencoded' in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
return type(self)._parse_urlencoded
elif mimetype in self.parse_functions:
warnings.warn(
"The 'parse_functions' attribute is deprecated and will be removed in"
" Werkzeug 3.0. Override 'parse' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.parse_functions[mimetype]
return None
def parse_from_environ(self, environ: WSGIEnvironment) -> t_parse_result:
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
stream = get_input_stream(environ, max_content_length=self.max_content_length)
content_length = get_content_length(environ)
mimetype, options = parse_options_header(environ.get("CONTENT_TYPE"))
return self.parse(
stream,
content_length=content_length,
mimetype=mimetype,
options=options,
)
def parse(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: int | None,
options: dict[str, str] | None = None,
) -> t_parse_result:
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
.. versionchanged:: 2.3
The ``application/x-url-encoded`` content type is deprecated and will not be
treated as ``application/x-www-form-urlencoded`` in Werkzeug 3.0.
"""
if mimetype == "multipart/form-data":
parse_func = self._parse_multipart
elif mimetype == "application/x-www-form-urlencoded":
parse_func = self._parse_urlencoded
elif mimetype == "application/x-url-encoded":
warnings.warn(
"The 'application/x-url-encoded' mimetype is invalid, and will not be"
" treated as 'application/x-www-form-urlencoded' in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
parse_func = self._parse_urlencoded
elif mimetype in self.parse_functions:
warnings.warn(
"The 'parse_functions' attribute is deprecated and will be removed in"
" Werkzeug 3.0. Override 'parse' instead.",
DeprecationWarning,
stacklevel=2,
)
parse_func = self.parse_functions[mimetype].__get__(self, type(self))
else:
return stream, self.cls(), self.cls()
if options is None:
options = {}
try:
return parse_func(stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
def _parse_multipart(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: int | None,
options: dict[str, str],
) -> t_parse_result:
charset = self.charset if self.charset != "utf-8" else None
errors = self.errors if self.errors != "replace" else None
parser = MultiPartParser(
stream_factory=self.stream_factory,
charset=charset,
errors=errors,
max_form_memory_size=self.max_form_memory_size,
max_form_parts=self.max_form_parts,
cls=self.cls,
)
boundary = options.get("boundary", "").encode("ascii")
if not boundary:
raise ValueError("Missing boundary")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
def _parse_urlencoded(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: int | None,
options: dict[str, str],
) -> t_parse_result:
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
raise RequestEntityTooLarge()
try:
items = parse_qsl(
stream.read().decode(),
keep_blank_values=True,
encoding=self.charset,
errors="werkzeug.url_quote",
)
except ValueError as e:
raise RequestEntityTooLarge() from e
return stream, self.cls(items), self.cls()
parse_functions: dict[
str,
t.Callable[
[FormDataParser, t.IO[bytes], str, int | None, dict[str, str]],
t_parse_result,
],
] = {}
class MultiPartParser:
def __init__(
self,
stream_factory: TStreamFactory | None = None,
charset: str | None = None,
errors: str | None = None,
max_form_memory_size: int | None = None,
cls: type[MultiDict] | None = None,
buffer_size: int = 64 * 1024,
max_form_parts: int | None = None,
) -> None:
if charset is not None:
warnings.warn(
"The 'charset' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
charset = "utf-8"
self.charset = charset
if errors is not None:
warnings.warn(
"The 'errors' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
errors = "replace"
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_form_parts = max_form_parts
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
self.buffer_size = buffer_size
def fail(self, message: str) -> te.NoReturn:
raise ValueError(message)
def get_part_charset(self, headers: Headers) -> str:
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
parameters = parse_options_header(content_type)[1]
ct_charset = parameters.get("charset", "").lower()
# A safe list of encodings. Modern clients should only send ASCII or UTF-8.
# This list will not be extended further.
if ct_charset in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
return ct_charset
return self.charset
def start_file_streaming(
self, event: File, total_content_length: int | None
) -> t.IO[bytes]:
content_type = event.headers.get("content-type")
try:
content_length = _plain_int(event.headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=event.filename,
content_type=content_type,
content_length=content_length,
)
return container
def parse(
self, stream: t.IO[bytes], boundary: bytes, content_length: int | None
) -> tuple[MultiDict, MultiDict]:
current_part: Field | File
container: t.IO[bytes] | list[bytes]
_write: t.Callable[[bytes], t.Any]
parser = MultipartDecoder(
boundary,
max_form_memory_size=self.max_form_memory_size,
max_parts=self.max_form_parts,
)
fields = []
files = []
for data in _chunk_iter(stream.read, self.buffer_size):
parser.receive_data(data)
event = parser.next_event()
while not isinstance(event, (Epilogue, NeedData)):
if isinstance(event, Field):
current_part = event
container = []
_write = container.append
elif isinstance(event, File):
current_part = event
container = self.start_file_streaming(event, content_length)
_write = container.write
elif isinstance(event, Data):
_write(event.data)
if not event.more_data:
if isinstance(current_part, Field):
value = b"".join(container).decode(
self.get_part_charset(current_part.headers), self.errors
)
fields.append((current_part.name, value))
else:
container = t.cast(t.IO[bytes], container)
container.seek(0)
files.append(
(
current_part.name,
FileStorage(
container,
current_part.filename,
current_part.name,
headers=current_part.headers,
),
)
)
event = parser.next_event()
return self.cls(fields), self.cls(files)
def _chunk_iter(read: t.Callable[[int], bytes], size: int) -> t.Iterator[bytes | None]:
"""Read data in chunks for multipart/form-data parsing. Stop if no data is read.
Yield ``None`` at the end to signal end of parsing.
"""
while True:
data = read(size)
if not data:
break
yield data
yield None

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,643 @@
from __future__ import annotations
import copy
import math
import operator
import typing as t
from contextvars import ContextVar
from functools import partial
from functools import update_wrapper
from operator import attrgetter
from .wsgi import ClosingIterator
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
T = t.TypeVar("T")
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def release_local(local: Local | LocalStack) -> None:
"""Release the data for the current context in a :class:`Local` or
:class:`LocalStack` without using a :class:`LocalManager`.
This should not be needed for modern use cases, and may be removed
in the future.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local:
"""Create a namespace of context-local data. This wraps a
:class:`ContextVar` containing a :class:`dict` value.
This may incur a performance penalty compared to using individual
context vars, as it has to copy data to avoid mutating the dict
between nested contexts.
:param context_var: The :class:`~contextvars.ContextVar` to use as
storage for this local. If not given, one will be created.
Context vars not created at the global scope may interfere with
garbage collection.
.. versionchanged:: 2.0
Uses ``ContextVar`` instead of a custom storage implementation.
"""
__slots__ = ("__storage",)
def __init__(self, context_var: ContextVar[dict[str, t.Any]] | None = None) -> None:
if context_var is None:
# A ContextVar not created at global scope interferes with
# Python's garbage collection. However, a local only makes
# sense defined at the global scope as well, in which case
# the GC issue doesn't seem relevant.
context_var = ContextVar(f"werkzeug.Local<{id(self)}>.storage")
object.__setattr__(self, "_Local__storage", context_var)
def __iter__(self) -> t.Iterator[tuple[str, t.Any]]:
return iter(self.__storage.get({}).items())
def __call__(self, name: str, *, unbound_message: str | None = None) -> LocalProxy:
"""Create a :class:`LocalProxy` that access an attribute on this
local namespace.
:param name: Proxy this attribute.
:param unbound_message: The error message that the proxy will
show if the attribute isn't set.
"""
return LocalProxy(self, name, unbound_message=unbound_message)
def __release_local__(self) -> None:
self.__storage.set({})
def __getattr__(self, name: str) -> t.Any:
values = self.__storage.get({})
if name in values:
return values[name]
raise AttributeError(name)
def __setattr__(self, name: str, value: t.Any) -> None:
values = self.__storage.get({}).copy()
values[name] = value
self.__storage.set(values)
def __delattr__(self, name: str) -> None:
values = self.__storage.get({})
if name in values:
values = values.copy()
del values[name]
self.__storage.set(values)
else:
raise AttributeError(name)
class LocalStack(t.Generic[T]):
"""Create a stack of context-local data. This wraps a
:class:`ContextVar` containing a :class:`list` value.
This may incur a performance penalty compared to using individual
context vars, as it has to copy data to avoid mutating the list
between nested contexts.
:param context_var: The :class:`~contextvars.ContextVar` to use as
storage for this local. If not given, one will be created.
Context vars not created at the global scope may interfere with
garbage collection.
.. versionchanged:: 2.0
Uses ``ContextVar`` instead of a custom storage implementation.
.. versionadded:: 0.6.1
"""
__slots__ = ("_storage",)
def __init__(self, context_var: ContextVar[list[T]] | None = None) -> None:
if context_var is None:
# A ContextVar not created at global scope interferes with
# Python's garbage collection. However, a local only makes
# sense defined at the global scope as well, in which case
# the GC issue doesn't seem relevant.
context_var = ContextVar(f"werkzeug.LocalStack<{id(self)}>.storage")
self._storage = context_var
def __release_local__(self) -> None:
self._storage.set([])
def push(self, obj: T) -> list[T]:
"""Add a new item to the top of the stack."""
stack = self._storage.get([]).copy()
stack.append(obj)
self._storage.set(stack)
return stack
def pop(self) -> T | None:
"""Remove the top item from the stack and return it. If the
stack is empty, return ``None``.
"""
stack = self._storage.get([])
if len(stack) == 0:
return None
rv = stack[-1]
self._storage.set(stack[:-1])
return rv
@property
def top(self) -> T | None:
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
stack = self._storage.get([])
if len(stack) == 0:
return None
return stack[-1]
def __call__(
self, name: str | None = None, *, unbound_message: str | None = None
) -> LocalProxy:
"""Create a :class:`LocalProxy` that accesses the top of this
local stack.
:param name: If given, the proxy access this attribute of the
top item, rather than the item itself.
:param unbound_message: The error message that the proxy will
show if the stack is empty.
"""
return LocalProxy(self, name, unbound_message=unbound_message)
class LocalManager:
"""Manage releasing the data for the current context in one or more
:class:`Local` and :class:`LocalStack` objects.
This should not be needed for modern use cases, and may be removed
in the future.
:param locals: A local or list of locals to manage.
.. versionchanged:: 2.1
The ``ident_func`` was removed.
.. versionchanged:: 0.7
The ``ident_func`` parameter was added.
.. versionchanged:: 0.6.1
The :func:`release_local` function can be used instead of a
manager.
"""
__slots__ = ("locals",)
def __init__(
self,
locals: None | (Local | LocalStack | t.Iterable[Local | LocalStack]) = None,
) -> None:
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals) # type: ignore[arg-type]
def cleanup(self) -> None:
"""Release the data in the locals for this context. Call this at
the end of each request or use :meth:`make_middleware`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app: WSGIApplication) -> WSGIApplication:
"""Wrap a WSGI application so that local data is released
automatically after the response has been sent for a request.
"""
def application(
environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func: WSGIApplication) -> WSGIApplication:
"""Like :meth:`make_middleware` but used as a decorator on the
WSGI application function.
.. code-block:: python
@manager.middleware
def application(environ, start_response):
...
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self) -> str:
return f"<{type(self).__name__} storages: {len(self.locals)}>"
class _ProxyLookup:
"""Descriptor that handles proxied attribute lookup for
:class:`LocalProxy`.
:param f: The built-in function this attribute is accessed through.
Instead of looking up the special method, the function call
is redone on the object.
:param fallback: Return this function if the proxy is unbound
instead of raising a :exc:`RuntimeError`.
:param is_attr: This proxied name is an attribute, not a function.
Call the fallback immediately to get the value.
:param class_value: Value to return when accessed from the
``LocalProxy`` class directly. Used for ``__doc__`` so building
docs still works.
"""
__slots__ = ("bind_f", "fallback", "is_attr", "class_value", "name")
def __init__(
self,
f: t.Callable | None = None,
fallback: t.Callable | None = None,
class_value: t.Any | None = None,
is_attr: bool = False,
) -> None:
bind_f: t.Callable[[LocalProxy, t.Any], t.Callable] | None
if hasattr(f, "__get__"):
# A Python function, can be turned into a bound method.
def bind_f(instance: LocalProxy, obj: t.Any) -> t.Callable:
return f.__get__(obj, type(obj)) # type: ignore
elif f is not None:
# A C function, use partial to bind the first argument.
def bind_f(instance: LocalProxy, obj: t.Any) -> t.Callable:
return partial(f, obj)
else:
# Use getattr, which will produce a bound method.
bind_f = None
self.bind_f = bind_f
self.fallback = fallback
self.class_value = class_value
self.is_attr = is_attr
def __set_name__(self, owner: LocalProxy, name: str) -> None:
self.name = name
def __get__(self, instance: LocalProxy, owner: type | None = None) -> t.Any:
if instance is None:
if self.class_value is not None:
return self.class_value
return self
try:
obj = instance._get_current_object()
except RuntimeError:
if self.fallback is None:
raise
fallback = self.fallback.__get__(instance, owner)
if self.is_attr:
# __class__ and __doc__ are attributes, not methods.
# Call the fallback to get the value.
return fallback()
return fallback
if self.bind_f is not None:
return self.bind_f(instance, obj)
return getattr(obj, self.name)
def __repr__(self) -> str:
return f"proxy {self.name}"
def __call__(self, instance: LocalProxy, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Support calling unbound methods from the class. For example,
this happens with ``copy.copy``, which does
``type(x).__copy__(x)``. ``type(x)`` can't be proxied, so it
returns the proxy type and descriptor.
"""
return self.__get__(instance, type(instance))(*args, **kwargs)
class _ProxyIOp(_ProxyLookup):
"""Look up an augmented assignment method on a proxied object. The
method is wrapped to return the proxy instead of the object.
"""
__slots__ = ()
def __init__(
self, f: t.Callable | None = None, fallback: t.Callable | None = None
) -> None:
super().__init__(f, fallback)
def bind_f(instance: LocalProxy, obj: t.Any) -> t.Callable:
def i_op(self: t.Any, other: t.Any) -> LocalProxy:
f(self, other) # type: ignore
return instance
return i_op.__get__(obj, type(obj)) # type: ignore
self.bind_f = bind_f
def _l_to_r_op(op: F) -> F:
"""Swap the argument order to turn an l-op into an r-op."""
def r_op(obj: t.Any, other: t.Any) -> t.Any:
return op(other, obj)
return t.cast(F, r_op)
def _identity(o: T) -> T:
return o
class LocalProxy(t.Generic[T]):
"""A proxy to the object bound to a context-local object. All
operations on the proxy are forwarded to the bound object. If no
object is bound, a ``RuntimeError`` is raised.
:param local: The context-local object that provides the proxied
object.
:param name: Proxy this attribute from the proxied object.
:param unbound_message: The error message to show if the
context-local object is unbound.
Proxy a :class:`~contextvars.ContextVar` to make it easier to
access. Pass a name to proxy that attribute.
.. code-block:: python
_request_var = ContextVar("request")
request = LocalProxy(_request_var)
session = LocalProxy(_request_var, "session")
Proxy an attribute on a :class:`Local` namespace by calling the
local with the attribute name:
.. code-block:: python
data = Local()
user = data("user")
Proxy the top item on a :class:`LocalStack` by calling the local.
Pass a name to proxy that attribute.
.. code-block::
app_stack = LocalStack()
current_app = app_stack()
g = app_stack("g")
Pass a function to proxy the return value from that function. This
was previously used to access attributes of local objects before
that was supported directly.
.. code-block:: python
session = LocalProxy(lambda: request.session)
``__repr__`` and ``__class__`` are proxied, so ``repr(x)`` and
``isinstance(x, cls)`` will look like the proxied object. Use
``issubclass(type(x), LocalProxy)`` to check if an object is a
proxy.
.. code-block:: python
repr(user) # <User admin>
isinstance(user, User) # True
issubclass(type(user), LocalProxy) # True
.. versionchanged:: 2.2.2
``__wrapped__`` is set when wrapping an object, not only when
wrapping a function, to prevent doctest from failing.
.. versionchanged:: 2.2
Can proxy a ``ContextVar`` or ``LocalStack`` directly.
.. versionchanged:: 2.2
The ``name`` parameter can be used with any proxied object, not
only ``Local``.
.. versionchanged:: 2.2
Added the ``unbound_message`` parameter.
.. versionchanged:: 2.0
Updated proxied attributes and methods to reflect the current
data model.
.. versionchanged:: 0.6.1
The class can be instantiated with a callable.
"""
__slots__ = ("__wrapped", "_get_current_object")
_get_current_object: t.Callable[[], T]
"""Return the current object this proxy is bound to. If the proxy is
unbound, this raises a ``RuntimeError``.
This should be used if you need to pass the object to something that
doesn't understand the proxy. It can also be useful for performance
if you are accessing the object multiple times in a function, rather
than going through the proxy multiple times.
"""
def __init__(
self,
local: ContextVar[T] | Local | LocalStack[T] | t.Callable[[], T],
name: str | None = None,
*,
unbound_message: str | None = None,
) -> None:
if name is None:
get_name = _identity
else:
get_name = attrgetter(name) # type: ignore[assignment]
if unbound_message is None:
unbound_message = "object is not bound"
if isinstance(local, Local):
if name is None:
raise TypeError("'name' is required when proxying a 'Local' object.")
def _get_current_object() -> T:
try:
return get_name(local) # type: ignore[return-value]
except AttributeError:
raise RuntimeError(unbound_message) from None
elif isinstance(local, LocalStack):
def _get_current_object() -> T:
obj = local.top # type: ignore[union-attr]
if obj is None:
raise RuntimeError(unbound_message)
return get_name(obj)
elif isinstance(local, ContextVar):
def _get_current_object() -> T:
try:
obj = local.get() # type: ignore[union-attr]
except LookupError:
raise RuntimeError(unbound_message) from None
return get_name(obj)
elif callable(local):
def _get_current_object() -> T:
return get_name(local()) # type: ignore
else:
raise TypeError(f"Don't know how to proxy '{type(local)}'.")
object.__setattr__(self, "_LocalProxy__wrapped", local)
object.__setattr__(self, "_get_current_object", _get_current_object)
__doc__ = _ProxyLookup( # type: ignore
class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
)
__wrapped__ = _ProxyLookup(
fallback=lambda self: self._LocalProxy__wrapped, is_attr=True
)
# __del__ should only delete the proxy
__repr__ = _ProxyLookup( # type: ignore
repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
)
__str__ = _ProxyLookup(str) # type: ignore
__bytes__ = _ProxyLookup(bytes)
__format__ = _ProxyLookup() # type: ignore
__lt__ = _ProxyLookup(operator.lt)
__le__ = _ProxyLookup(operator.le)
__eq__ = _ProxyLookup(operator.eq) # type: ignore
__ne__ = _ProxyLookup(operator.ne) # type: ignore
__gt__ = _ProxyLookup(operator.gt)
__ge__ = _ProxyLookup(operator.ge)
__hash__ = _ProxyLookup(hash) # type: ignore
__bool__ = _ProxyLookup(bool, fallback=lambda self: False)
__getattr__ = _ProxyLookup(getattr)
# __getattribute__ triggered through __getattr__
__setattr__ = _ProxyLookup(setattr) # type: ignore
__delattr__ = _ProxyLookup(delattr) # type: ignore
__dir__ = _ProxyLookup(dir, fallback=lambda self: []) # type: ignore
# __get__ (proxying descriptor not supported)
# __set__ (descriptor)
# __delete__ (descriptor)
# __set_name__ (descriptor)
# __objclass__ (descriptor)
# __slots__ used by proxy itself
# __dict__ (__getattr__)
# __weakref__ (__getattr__)
# __init_subclass__ (proxying metaclass not supported)
# __prepare__ (metaclass)
__class__ = _ProxyLookup(
fallback=lambda self: type(self), is_attr=True
) # type: ignore
__instancecheck__ = _ProxyLookup(lambda self, other: isinstance(other, self))
__subclasscheck__ = _ProxyLookup(lambda self, other: issubclass(other, self))
# __class_getitem__ triggered through __getitem__
__call__ = _ProxyLookup(lambda self, *args, **kwargs: self(*args, **kwargs))
__len__ = _ProxyLookup(len)
__length_hint__ = _ProxyLookup(operator.length_hint)
__getitem__ = _ProxyLookup(operator.getitem)
__setitem__ = _ProxyLookup(operator.setitem)
__delitem__ = _ProxyLookup(operator.delitem)
# __missing__ triggered through __getitem__
__iter__ = _ProxyLookup(iter)
__next__ = _ProxyLookup(next)
__reversed__ = _ProxyLookup(reversed)
__contains__ = _ProxyLookup(operator.contains)
__add__ = _ProxyLookup(operator.add)
__sub__ = _ProxyLookup(operator.sub)
__mul__ = _ProxyLookup(operator.mul)
__matmul__ = _ProxyLookup(operator.matmul)
__truediv__ = _ProxyLookup(operator.truediv)
__floordiv__ = _ProxyLookup(operator.floordiv)
__mod__ = _ProxyLookup(operator.mod)
__divmod__ = _ProxyLookup(divmod)
__pow__ = _ProxyLookup(pow)
__lshift__ = _ProxyLookup(operator.lshift)
__rshift__ = _ProxyLookup(operator.rshift)
__and__ = _ProxyLookup(operator.and_)
__xor__ = _ProxyLookup(operator.xor)
__or__ = _ProxyLookup(operator.or_)
__radd__ = _ProxyLookup(_l_to_r_op(operator.add))
__rsub__ = _ProxyLookup(_l_to_r_op(operator.sub))
__rmul__ = _ProxyLookup(_l_to_r_op(operator.mul))
__rmatmul__ = _ProxyLookup(_l_to_r_op(operator.matmul))
__rtruediv__ = _ProxyLookup(_l_to_r_op(operator.truediv))
__rfloordiv__ = _ProxyLookup(_l_to_r_op(operator.floordiv))
__rmod__ = _ProxyLookup(_l_to_r_op(operator.mod))
__rdivmod__ = _ProxyLookup(_l_to_r_op(divmod))
__rpow__ = _ProxyLookup(_l_to_r_op(pow))
__rlshift__ = _ProxyLookup(_l_to_r_op(operator.lshift))
__rrshift__ = _ProxyLookup(_l_to_r_op(operator.rshift))
__rand__ = _ProxyLookup(_l_to_r_op(operator.and_))
__rxor__ = _ProxyLookup(_l_to_r_op(operator.xor))
__ror__ = _ProxyLookup(_l_to_r_op(operator.or_))
__iadd__ = _ProxyIOp(operator.iadd)
__isub__ = _ProxyIOp(operator.isub)
__imul__ = _ProxyIOp(operator.imul)
__imatmul__ = _ProxyIOp(operator.imatmul)
__itruediv__ = _ProxyIOp(operator.itruediv)
__ifloordiv__ = _ProxyIOp(operator.ifloordiv)
__imod__ = _ProxyIOp(operator.imod)
__ipow__ = _ProxyIOp(operator.ipow)
__ilshift__ = _ProxyIOp(operator.ilshift)
__irshift__ = _ProxyIOp(operator.irshift)
__iand__ = _ProxyIOp(operator.iand)
__ixor__ = _ProxyIOp(operator.ixor)
__ior__ = _ProxyIOp(operator.ior)
__neg__ = _ProxyLookup(operator.neg)
__pos__ = _ProxyLookup(operator.pos)
__abs__ = _ProxyLookup(abs)
__invert__ = _ProxyLookup(operator.invert)
__complex__ = _ProxyLookup(complex)
__int__ = _ProxyLookup(int)
__float__ = _ProxyLookup(float)
__index__ = _ProxyLookup(operator.index)
__round__ = _ProxyLookup(round)
__trunc__ = _ProxyLookup(math.trunc)
__floor__ = _ProxyLookup(math.floor)
__ceil__ = _ProxyLookup(math.ceil)
__enter__ = _ProxyLookup()
__exit__ = _ProxyLookup()
__await__ = _ProxyLookup()
__aiter__ = _ProxyLookup()
__anext__ = _ProxyLookup()
__aenter__ = _ProxyLookup()
__aexit__ = _ProxyLookup()
__copy__ = _ProxyLookup(copy.copy)
__deepcopy__ = _ProxyLookup(copy.deepcopy)
# __getnewargs_ex__ (pickle through proxy not supported)
# __getnewargs__ (pickle)
# __getstate__ (pickle)
# __setstate__ (pickle)
# __reduce__ (pickle)
# __reduce_ex__ (pickle)

View file

@ -0,0 +1,80 @@
"""
Application Dispatcher
======================
This middleware creates a single WSGI application that dispatches to
multiple other WSGI applications mounted at different URL paths.
A common example is writing a Single Page Application, where you have a
backend API and a frontend written in JavaScript that does the routing
in the browser rather than requesting different pages from the server.
The frontend is a single HTML and JS file that should be served for any
path besides "/api".
This example dispatches to an API app under "/api", an admin app
under "/admin", and an app that serves frontend files for all other
requests::
app = DispatcherMiddleware(serve_frontend, {
'/api': api_app,
'/admin': admin_app,
})
In production, you might instead handle this at the HTTP server level,
serving files or proxying to application servers based on location. The
API and admin apps would each be deployed with a separate WSGI server,
and the static files would be served directly by the HTTP server.
.. autoclass:: DispatcherMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class DispatcherMiddleware:
"""Combine multiple applications as a single WSGI application.
Requests are dispatched to an application based on the path it is
mounted under.
:param app: The WSGI application to dispatch to if the request
doesn't match a mounted path.
:param mounts: Maps path prefixes to applications for dispatching.
"""
def __init__(
self,
app: WSGIApplication,
mounts: dict[str, WSGIApplication] | None = None,
) -> None:
self.app = app
self.mounts = mounts or {}
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
script = environ.get("PATH_INFO", "")
path_info = ""
while "/" in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit("/", 1)
path_info = f"/{last_item}{path_info}"
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get("SCRIPT_NAME", "")
environ["SCRIPT_NAME"] = original_script_name + script
environ["PATH_INFO"] = path_info
return app(environ, start_response)

View file

@ -0,0 +1,235 @@
"""
Basic HTTP Proxy
================
.. autoclass:: ProxyMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import typing as t
from http import client
from urllib.parse import quote
from urllib.parse import urlsplit
from ..datastructures import EnvironHeaders
from ..http import is_hop_by_hop_header
from ..wsgi import get_input_stream
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProxyMiddleware:
"""Proxy requests under a path to an external server, routing other
requests to the app.
This middleware can only proxy HTTP requests, as HTTP is the only
protocol handled by the WSGI server. Other protocols, such as
WebSocket requests, cannot be proxied at this layer. This should
only be used for development, in production a real proxy server
should be used.
The middleware takes a dict mapping a path prefix to a dict
describing the host to be proxied to::
app = ProxyMiddleware(app, {
"/static/": {
"target": "http://127.0.0.1:5001/",
}
})
Each host has the following options:
``target``:
The target URL to dispatch to. This is required.
``remove_prefix``:
Whether to remove the prefix from the URL before dispatching it
to the target. The default is ``False``.
``host``:
``"<auto>"`` (default):
The host header is automatically rewritten to the URL of the
target.
``None``:
The host header is unmodified from the client request.
Any other value:
The host header is overwritten with the value.
``headers``:
A dictionary of headers to be sent with the request to the
target. The default is ``{}``.
``ssl_context``:
A :class:`ssl.SSLContext` defining how to verify requests if the
target is HTTPS. The default is ``None``.
In the example above, everything under ``"/static/"`` is proxied to
the server on port 5001. The host header is rewritten to the target,
and the ``"/static/"`` prefix is removed from the URLs.
:param app: The WSGI application to wrap.
:param targets: Proxy target configurations. See description above.
:param chunk_size: Size of chunks to read from input stream and
write to target.
:param timeout: Seconds before an operation to a target fails.
.. versionadded:: 0.14
"""
def __init__(
self,
app: WSGIApplication,
targets: t.Mapping[str, dict[str, t.Any]],
chunk_size: int = 2 << 13,
timeout: int = 10,
) -> None:
def _set_defaults(opts: dict[str, t.Any]) -> dict[str, t.Any]:
opts.setdefault("remove_prefix", False)
opts.setdefault("host", "<auto>")
opts.setdefault("headers", {})
opts.setdefault("ssl_context", None)
return opts
self.app = app
self.targets = {
f"/{k.strip('/')}/": _set_defaults(v) for k, v in targets.items()
}
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(
self, opts: dict[str, t.Any], path: str, prefix: str
) -> WSGIApplication:
target = urlsplit(opts["target"])
# socket can handle unicode host, but header must be ascii
host = target.hostname.encode("idna").decode("ascii")
def application(
environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
headers = list(EnvironHeaders(environ).items())
headers[:] = [
(k, v)
for k, v in headers
if not is_hop_by_hop_header(k)
and k.lower() not in ("content-length", "host")
]
headers.append(("Connection", "close"))
if opts["host"] == "<auto>":
headers.append(("Host", host))
elif opts["host"] is None:
headers.append(("Host", environ["HTTP_HOST"]))
else:
headers.append(("Host", opts["host"]))
headers.extend(opts["headers"].items())
remote_path = path
if opts["remove_prefix"]:
remote_path = remote_path[len(prefix) :].lstrip("/")
remote_path = f"{target.path.rstrip('/')}/{remote_path}"
content_length = environ.get("CONTENT_LENGTH")
chunked = False
if content_length not in ("", None):
headers.append(("Content-Length", content_length)) # type: ignore
elif content_length is not None:
headers.append(("Transfer-Encoding", "chunked"))
chunked = True
try:
if target.scheme == "http":
con = client.HTTPConnection(
host, target.port or 80, timeout=self.timeout
)
elif target.scheme == "https":
con = client.HTTPSConnection(
host,
target.port or 443,
timeout=self.timeout,
context=opts["ssl_context"],
)
else:
raise RuntimeError(
"Target scheme must be 'http' or 'https', got"
f" {target.scheme!r}."
)
con.connect()
# safe = https://url.spec.whatwg.org/#url-path-segment-string
# as well as percent for things that are already quoted
remote_url = quote(remote_path, safe="!$&'()*+,/:;=@%")
querystring = environ["QUERY_STRING"]
if querystring:
remote_url = f"{remote_url}?{querystring}"
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
for k, v in headers:
if k.lower() == "connection":
v = "close"
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while True:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b"%x\r\n%s\r\n" % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except OSError:
from ..exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response(
f"{resp.status} {resp.reason}",
[
(k.title(), v)
for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)
],
)
def read() -> t.Iterator[bytes]:
while True:
try:
data = resp.read(self.chunk_size)
except OSError:
break
if not data:
break
yield data
return read()
return application
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
path = environ["PATH_INFO"]
app = self.app
for prefix, opts in self.targets.items():
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)

View file

@ -0,0 +1,420 @@
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import typing as t
from types import TracebackType
from urllib.parse import urlparse
from warnings import warn
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_type(context: str, obj: object, need: t.Type = str) -> None:
if type(obj) is not need:
warn(
f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
WSGIWarning,
stacklevel=3,
)
class InputStream:
def __init__(self, stream: t.IO[bytes]) -> None:
self._stream = stream
def read(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self) -> t.Iterator[bytes]:
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self) -> None:
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class ErrorStream:
def __init__(self, stream: t.IO[str]) -> None:
self._stream = stream
def write(self, s: str) -> None:
check_type("wsgi.error.write()", s, str)
self._stream.write(s)
def flush(self) -> None:
self._stream.flush()
def writelines(self, seq: t.Iterable[str]) -> None:
for line in seq:
self.write(line)
def close(self) -> None:
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class GuardedWrite:
def __init__(self, write: t.Callable[[bytes], object], chunks: list[int]) -> None:
self._write = write
self._chunks = chunks
def __call__(self, s: bytes) -> None:
check_type("write()", s, bytes)
self._write(s)
self._chunks.append(len(s))
class GuardedIterator:
def __init__(
self,
iterator: t.Iterable[bytes],
headers_set: tuple[int, Headers],
chunks: list[int],
) -> None:
self._iterator = iterator
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self) -> GuardedIterator:
return self
def __next__(self) -> bytes:
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_type("application iterator items", rv, bytes)
self.chunks.append(len(rv))
return rv
def close(self) -> None:
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
f"Entity header {key!r} found in 304 response.", HTTPWarning
)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
f"{status_code} responses must have an empty content length.",
HTTPWarning,
)
if bytes_sent:
warn(f"{status_code} responses must not have a body.", HTTPWarning)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the"
" client do not match.",
WSGIWarning,
)
def __del__(self) -> None:
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.", WSGIWarning
)
except Exception:
pass
class LintMiddleware:
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it checks are:
- invalid status codes
- non-bytes sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app: WSGIApplication) -> None:
self.app = app
def check_environ(self, environ: WSGIEnvironment) -> None:
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
f"Required environment key {key!r} not found",
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
f"'SCRIPT_NAME' does not start with a slash: {script_name!r}",
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
f"'PATH_INFO' does not start with a slash: {path_info!r}",
WSGIWarning,
stacklevel=3,
)
def check_start_response(
self,
status: str,
headers: list[tuple[str, str]],
exc_info: None | (tuple[type[BaseException], BaseException, TracebackType]),
) -> tuple[int, Headers]:
check_type("status", status, str)
status_code_str = status.split(None, 1)[0]
if len(status_code_str) != 3 or not status_code_str.isdecimal():
warn("Status code must be three digits.", WSGIWarning, stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
f"Invalid value for status {status!r}. Valid status strings are three"
" digits, a space and a status explanation.",
WSGIWarning,
stacklevel=3,
)
status_code = int(status_code_str)
if status_code < 100:
warn("Status code < 100 detected.", WSGIWarning, stacklevel=3)
if type(headers) is not list:
warn("Header list is not a list.", WSGIWarning, stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn("Header items must be 2-item tuples.", WSGIWarning, stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(
"Header keys and values must be strings.", WSGIWarning, stacklevel=3
)
if name.lower() == "status":
warn(
"The status header is not supported due to"
" conflicts with the CGI spec.",
WSGIWarning,
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn("Invalid value for exc_info.", WSGIWarning, stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers: Headers) -> None:
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
"Weak etag indicator should be upper case.",
HTTPWarning,
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn("Unquoted etag emitted.", HTTPWarning, stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
"Absolute URLs required for location header.",
HTTPWarning,
stacklevel=4,
)
def check_iterator(self, app_iter: t.Iterable[bytes]) -> None:
if isinstance(app_iter, str):
warn(
"The application returned a string. The response will send one"
" character at a time to the client, which will kill performance."
" Return a list or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Iterable[bytes]:
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn(
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
)
environ: WSGIEnvironment = args[0]
start_response: StartResponse = args[1]
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set: list[t.Any] = []
chunks: list[int] = []
def checking_start_response(
*args: t.Any, **kwargs: t.Any
) -> t.Callable[[bytes], None]:
if len(args) not in {2, 3}:
warn(
f"Invalid number of arguments: {len(args)}, expected 2 or 3.",
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.", WSGIWarning)
status: str = args[0]
headers: list[tuple[str, str]] = args[1]
exc_info: None | (
tuple[type[BaseException], BaseException, TracebackType]
) = (args[2] if len(args) == 3 else None)
headers_set[:] = self.check_start_response(status, headers, exc_info)
return GuardedWrite(start_response(status, headers, exc_info), chunks)
app_iter = self.app(environ, t.cast("StartResponse", checking_start_response))
self.check_iterator(app_iter)
return GuardedIterator(
app_iter, t.cast(t.Tuple[int, Headers], headers_set), chunks
)

View file

@ -0,0 +1,141 @@
"""
Application Profiler
====================
This module provides a middleware that profiles each request with the
:mod:`cProfile` module. This can help identify bottlenecks in your code
that may be slowing down your application.
.. autoclass:: ProfilerMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import os.path
import sys
import time
import typing as t
from pstats import Stats
try:
from cProfile import Profile
except ImportError:
from profile import Profile # type: ignore
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProfilerMiddleware:
"""Wrap a WSGI application and profile the execution of each
request. Responses are buffered so that timings are more exact.
If ``stream`` is given, :class:`pstats.Stats` are written to it
after each request. If ``profile_dir`` is given, :mod:`cProfile`
data files are saved to that directory, one file per request.
The filename can be customized by passing ``filename_format``. If
it is a string, it will be formatted using :meth:`str.format` with
the following fields available:
- ``{method}`` - The request method; GET, POST, etc.
- ``{path}`` - The request path or 'root' should one not exist.
- ``{elapsed}`` - The elapsed time of the request.
- ``{time}`` - The time of the request.
If it is a callable, it will be called with the WSGI ``environ``
dict and should return a filename.
:param app: The WSGI application to wrap.
:param stream: Write stats to this stream. Disable with ``None``.
:param sort_by: A tuple of columns to sort stats by. See
:meth:`pstats.Stats.sort_stats`.
:param restrictions: A tuple of restrictions to filter stats by. See
:meth:`pstats.Stats.print_stats`.
:param profile_dir: Save profile data files to this directory.
:param filename_format: Format string for profile data file names,
or a callable returning a name. See explanation above.
.. code-block:: python
from werkzeug.middleware.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
.. versionchanged:: 0.15
Stats are written even if ``profile_dir`` is given, and can be
disable by passing ``stream=None``.
.. versionadded:: 0.15
Added ``filename_format``.
.. versionadded:: 0.9
Added ``restrictions`` and ``profile_dir``.
"""
def __init__(
self,
app: WSGIApplication,
stream: t.IO[str] | None = sys.stdout,
sort_by: t.Iterable[str] = ("time", "calls"),
restrictions: t.Iterable[str | int | float] = (),
profile_dir: str | None = None,
filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
) -> None:
self._app = app
self._stream = stream
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
self._filename_format = filename_format
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
response_body: list[bytes] = []
def catching_start_response(status, headers, exc_info=None): # type: ignore
start_response(status, headers, exc_info)
return response_body.append
def runapp() -> None:
app_iter = self._app(
environ, t.cast("StartResponse", catching_start_response)
)
response_body.extend(app_iter)
if hasattr(app_iter, "close"):
app_iter.close()
profile = Profile()
start = time.time()
profile.runcall(runapp)
body = b"".join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
if callable(self._filename_format):
filename = self._filename_format(environ)
else:
filename = self._filename_format.format(
method=environ["REQUEST_METHOD"],
path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root",
elapsed=elapsed * 1000.0,
time=time.time(),
)
filename = os.path.join(self._profile_dir, filename)
profile.dump_stats(filename)
if self._stream is not None:
stats = Stats(profile, stream=self._stream)
stats.sort_stats(*self._sort_by)
print("-" * 80, file=self._stream)
path_info = environ.get("PATH_INFO", "")
print(f"PATH: {path_info!r}", file=self._stream)
stats.print_stats(*self._restrictions)
print(f"{'-' * 80}\n", file=self._stream)
return [body]

View file

@ -0,0 +1,182 @@
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be used if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import typing as t
from ..http import parse_list_header
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProxyFix:
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 1.0
The ``num_proxies`` argument and attribute; the ``get_remote_addr`` method; and
the environ keys ``orig_remote_addr``, ``orig_wsgi_url_scheme``, and
``orig_http_host`` were removed.
.. versionchanged:: 0.15
All headers support multiple values. Each header is configured with a separate
number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the ``werkzeug.proxy_fix.orig`` dict.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(
self,
app: WSGIApplication,
x_for: int = 1,
x_proto: int = 1,
x_host: int = 0,
x_port: int = 0,
x_prefix: int = 0,
) -> None:
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
def _get_real_value(self, trusted: int, value: str | None) -> str | None:
"""Get the real value from a list header based on the configured
number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Comma separated list header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionchanged:: 1.0
Renamed from ``_get_trusted_comma``.
.. versionadded:: 0.15
"""
if not (trusted and value):
return None
values = parse_list_header(value)
if len(values) >= trusted:
return values[-trusted]
return None
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
}
}
)
x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_real_value(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
if x_host:
environ["HTTP_HOST"] = environ["SERVER_NAME"] = x_host
# "]" to check for IPv6 address without port
if ":" in x_host and not x_host.endswith("]"):
environ["SERVER_NAME"], environ["SERVER_PORT"] = x_host.rsplit(":", 1)
x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
if x_port:
host = environ.get("HTTP_HOST")
if host:
# "]" to check for IPv6 address without port
if ":" in host and not host.endswith("]"):
host = host.rsplit(":", 1)[0]
environ["HTTP_HOST"] = f"{host}:{x_port}"
environ["SERVER_PORT"] = x_port
x_prefix = self._get_real_value(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)

View file

@ -0,0 +1,282 @@
"""
Serve Shared Static Files
=========================
.. autoclass:: SharedDataMiddleware
:members: is_allowed
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import annotations
import importlib.util
import mimetypes
import os
import posixpath
import typing as t
from datetime import datetime
from datetime import timezone
from io import BytesIO
from time import time
from zlib import adler32
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
from ..utils import get_content_type
from ..wsgi import get_path_info
from ..wsgi import wrap_file
_TOpener = t.Callable[[], t.Tuple[t.IO[bytes], datetime, int]]
_TLoader = t.Callable[[t.Optional[str]], t.Tuple[t.Optional[str], t.Optional[_TOpener]]]
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class SharedDataMiddleware:
"""A WSGI middleware which provides static content for development
environments or simple server setups. Its usage is quite simple::
import os
from werkzeug.middleware.shared_data import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. Files can also be
mounted on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/static': ('myapplication', 'static')
})
This will then serve the ``static`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non-ASCII filenames. If the
encoding on the file system happens to match the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
:param fallback_mimetype: The fallback mimetype for unknown files.
.. versionchanged:: 1.0
The default ``fallback_mimetype`` is
``application/octet-stream``. If a filename looks like a text
mimetype, the ``utf-8`` charset is added to it.
.. versionadded:: 0.6
Added ``fallback_mimetype``.
.. versionchanged:: 0.5
Added ``cache_timeout``.
"""
def __init__(
self,
app: WSGIApplication,
exports: (
dict[str, str | tuple[str, str]]
| t.Iterable[tuple[str, str | tuple[str, str]]]
),
disallow: None = None,
cache: bool = True,
cache_timeout: int = 60 * 60 * 12,
fallback_mimetype: str = "application/octet-stream",
) -> None:
self.app = app
self.exports: list[tuple[str, _TLoader]] = []
self.cache = cache
self.cache_timeout = cache_timeout
if isinstance(exports, dict):
exports = exports.items()
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, str):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError(f"unknown def {value!r}")
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename: str) -> bool:
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename: str) -> _TOpener:
return lambda: (
open(filename, "rb"),
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
int(os.path.getsize(filename)),
)
def get_file_loader(self, filename: str) -> _TLoader:
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package: str, package_path: str) -> _TLoader:
load_time = datetime.now(timezone.utc)
spec = importlib.util.find_spec(package)
reader = spec.loader.get_resource_reader(package) # type: ignore[union-attr]
def loader(
path: str | None,
) -> tuple[str | None, _TOpener | None]:
if path is None:
return None, None
path = safe_join(package_path, path)
if path is None:
return None, None
basename = posixpath.basename(path)
try:
resource = reader.open_resource(path)
except OSError:
return None, None
if isinstance(resource, BytesIO):
return (
basename,
lambda: (resource, load_time, len(resource.getvalue())),
)
return (
basename,
lambda: (
resource,
datetime.fromtimestamp(
os.path.getmtime(resource.name), tz=timezone.utc
),
os.path.getsize(resource.name),
),
)
return loader
def get_directory_loader(self, directory: str) -> _TLoader:
def loader(
path: str | None,
) -> tuple[str | None, _TOpener | None]:
if path is not None:
path = safe_join(directory, path)
if path is None:
return None, None
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
real_filename = os.fsencode(real_filename)
timestamp = mtime.timestamp()
checksum = adler32(real_filename) & 0xFFFFFFFF
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
path = get_path_info(environ)
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith("/"):
search_path += "/"
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path) :])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename): # type: ignore
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename) # type: ignore
mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename) # type: ignore
headers += [
("Etag", f'"{etag}"'),
("Cache-Control", f"max-age={timeout}, public"),
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response("304 Not Modified", headers)
return []
headers.append(("Expires", http_date(time() + timeout)))
else:
headers.append(("Cache-Control", "public"))
headers.extend(
(
("Content-Type", mime_type),
("Content-Length", str(file_size)),
("Last-Modified", http_date(mtime)),
)
)
start_response("200 OK", headers)
return wrap_file(environ, f)

View file

@ -0,0 +1,133 @@
"""When it comes to combining multiple controller or view functions
(however you want to call them) you need a dispatcher. A simple way
would be applying regular expression tests on the ``PATH_INFO`` and
calling registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates a URL map for an application with
two subdomains (www and kb) and some URL rules:
.. code-block:: python
m = Map([
# Static URLs
Rule('/', endpoint='static/index'),
Rule('/about', endpoint='static/about'),
Rule('/help', endpoint='static/help'),
# Knowledge Base
Subdomain('kb', [
Rule('/', endpoint='kb/index'),
Rule('/browse/', endpoint='kb/browse'),
Rule('/browse/<int:id>/', endpoint='kb/browse'),
Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
])
], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The
endpoint in the rules can be anything, for example import paths or
unique identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
.. code-block:: python
c = m.bind('example.com')
c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
c.build("kb/browse", dict())
'http://kb.example.com/browse/'
c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
c.build("static/about")
'/about'
c.build("static/index", force_external=True)
'http://www.example.com/'
c = m.bind('example.com', subdomain='kb')
c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
.. code-block:: python
c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
.. code-block:: python
c = m.bind('example.com')
c.match("/")
('static/index', {})
c.match("/about")
('static/about', {})
c = m.bind('example.com', '/', 'kb')
c.match("/")
('kb/index', {})
c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a ``NotFound`` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a ``RequestRedirect`` exception. Both are subclasses of
``HTTPException`` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for ``GET`` and ``HEAD`` but
routing tried to match a ``POST`` request) a ``MethodNotAllowed``
exception is raised.
"""
from .converters import AnyConverter as AnyConverter
from .converters import BaseConverter as BaseConverter
from .converters import FloatConverter as FloatConverter
from .converters import IntegerConverter as IntegerConverter
from .converters import PathConverter as PathConverter
from .converters import UnicodeConverter as UnicodeConverter
from .converters import UUIDConverter as UUIDConverter
from .converters import ValidationError as ValidationError
from .exceptions import BuildError as BuildError
from .exceptions import NoMatch as NoMatch
from .exceptions import RequestAliasRedirect as RequestAliasRedirect
from .exceptions import RequestPath as RequestPath
from .exceptions import RequestRedirect as RequestRedirect
from .exceptions import RoutingException as RoutingException
from .exceptions import WebsocketMismatch as WebsocketMismatch
from .map import Map as Map
from .map import MapAdapter as MapAdapter
from .matcher import StateMachineMatcher as StateMachineMatcher
from .rules import EndpointPrefix as EndpointPrefix
from .rules import parse_converter_args as parse_converter_args
from .rules import Rule as Rule
from .rules import RuleFactory as RuleFactory
from .rules import RuleTemplate as RuleTemplate
from .rules import RuleTemplateFactory as RuleTemplateFactory
from .rules import Subdomain as Subdomain
from .rules import Submount as Submount

View file

@ -0,0 +1,270 @@
from __future__ import annotations
import re
import typing as t
import uuid
import warnings
from urllib.parse import quote
if t.TYPE_CHECKING:
from .map import Map
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class BaseConverter:
"""Base class for all converters.
.. versionchanged:: 2.3
``part_isolating`` defaults to ``False`` if ``regex`` contains a ``/``.
"""
regex = "[^/]+"
weight = 100
part_isolating = True
def __init_subclass__(cls, **kwargs: t.Any) -> None:
super().__init_subclass__(**kwargs)
# If the converter isn't inheriting its regex, disable part_isolating by default
# if the regex contains a / character.
if "regex" in cls.__dict__ and "part_isolating" not in cls.__dict__:
cls.part_isolating = "/" not in cls.regex
def __init__(self, map: Map, *args: t.Any, **kwargs: t.Any) -> None:
self.map = map
def to_python(self, value: str) -> t.Any:
return value
def to_url(self, value: t.Any) -> str:
if isinstance(value, (bytes, bytearray)):
warnings.warn(
"Passing bytes as a URL value is deprecated and will not be supported"
" in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=7,
)
return quote(value, safe="!$&'()*+,/:;=@")
# safe = https://url.spec.whatwg.org/#url-path-segment-string
return quote(str(value), encoding=self.map.charset, safe="!$&'()*+,/:;=@")
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(
self,
map: Map,
minlength: int = 1,
maxlength: int | None = None,
length: int | None = None,
) -> None:
super().__init__(map)
if length is not None:
length_regex = f"{{{int(length)}}}"
else:
if maxlength is None:
maxlength_value = ""
else:
maxlength_value = str(int(maxlength))
length_regex = f"{{{int(minlength)},{maxlength_value}}}"
self.regex = f"[^/]{length_regex}"
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
.. versionchanged:: 2.2
Value is validated when building a URL.
"""
def __init__(self, map: Map, *items: str) -> None:
super().__init__(map)
self.items = set(items)
self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
def to_url(self, value: t.Any) -> str:
if value in self.items:
return str(value)
valid_values = ", ".join(f"'{item}'" for item in sorted(self.items))
raise ValueError(f"'{value}' is not one of {valid_values}")
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = "[^/].*?"
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
num_convert: t.Callable = int
def __init__(
self,
map: Map,
fixed_digits: int = 0,
min: int | None = None,
max: int | None = None,
signed: bool = False,
) -> None:
if signed:
self.regex = self.signed_regex
super().__init__(map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value: str) -> t.Any:
if self.fixed_digits and len(value) != self.fixed_digits:
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or (
self.max is not None and value > self.max
):
raise ValidationError()
return value
def to_url(self, value: t.Any) -> str:
value = str(self.num_convert(value))
if self.fixed_digits:
value = value.zfill(self.fixed_digits)
return value
@property
def signed_regex(self) -> str:
return f"-?{self.regex}"
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule("/page/<int:page>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/page/<int(signed=True):page>")
:param map: The :class:`Map`.
:param fixed_digits: The number of fixed digits in the URL. If you
set this to ``4`` for example, the rule will only match if the
URL looks like ``/0001/``. The default is variable length.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+"
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(
self,
map: Map,
min: float | None = None,
max: float | None = None,
signed: bool = False,
) -> None:
super().__init__(map, min=min, max=max, signed=signed) # type: ignore
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = (
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
)
def to_python(self, value: str) -> uuid.UUID:
return uuid.UUID(value)
def to_url(self, value: uuid.UUID) -> str:
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS: t.Mapping[str, type[BaseConverter]] = {
"default": UnicodeConverter,
"string": UnicodeConverter,
"any": AnyConverter,
"path": PathConverter,
"int": IntegerConverter,
"float": FloatConverter,
"uuid": UUIDConverter,
}

View file

@ -0,0 +1,148 @@
from __future__ import annotations
import difflib
import typing as t
from ..exceptions import BadRequest
from ..exceptions import HTTPException
from ..utils import cached_property
from ..utils import redirect
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIEnvironment
from .map import MapAdapter
from .rules import Rule
from ..wrappers.request import Request
from ..wrappers.response import Response
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 308
def __init__(self, new_url: str) -> None:
super().__init__(new_url)
self.new_url = new_url
def get_response(
self,
environ: WSGIEnvironment | Request | None = None,
scope: dict | None = None,
) -> Response:
return redirect(self.new_url, self.code)
class RequestPath(RoutingException):
"""Internal exception."""
__slots__ = ("path_info",)
def __init__(self, path_info: str) -> None:
super().__init__()
self.path_info = path_info
class RequestAliasRedirect(RoutingException): # noqa: B903
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values: t.Mapping[str, t.Any], endpoint: str) -> None:
super().__init__()
self.matched_values = matched_values
self.endpoint = endpoint
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(
self,
endpoint: str,
values: t.Mapping[str, t.Any],
method: str | None,
adapter: MapAdapter | None = None,
) -> None:
super().__init__(endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self) -> Rule | None:
return self.closest_rule(self.adapter)
def closest_rule(self, adapter: MapAdapter | None) -> Rule | None:
def _score_rule(rule: Rule) -> float:
return sum(
[
0.98
* difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods),
]
)
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
return None
def __str__(self) -> str:
message = [f"Could not build url for endpoint {self.endpoint!r}"]
if self.method:
message.append(f" ({self.method!r})")
if self.values:
message.append(f" with values {sorted(self.values)!r}")
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if (
self.method
and self.suggested.methods is not None
and self.method not in self.suggested.methods
):
message.append(
" Did you mean to use methods"
f" {sorted(self.suggested.methods)!r}?"
)
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
f" Did you forget to specify values {sorted(missing_values)!r}?"
)
else:
message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
return "".join(message)
class WebsocketMismatch(BadRequest):
"""The only matched rule is either a WebSocket and the request is
HTTP, or the rule is HTTP and the request is a WebSocket.
"""
class NoMatch(Exception):
__slots__ = ("have_match_for", "websocket_mismatch")
def __init__(self, have_match_for: set[str], websocket_mismatch: bool) -> None:
self.have_match_for = have_match_for
self.websocket_mismatch = websocket_mismatch

View file

@ -0,0 +1,977 @@
from __future__ import annotations
import typing as t
import warnings
from pprint import pformat
from threading import Lock
from urllib.parse import quote
from urllib.parse import urljoin
from urllib.parse import urlunsplit
from .._internal import _get_environ
from .._internal import _wsgi_decoding_dance
from ..datastructures import ImmutableDict
from ..datastructures import MultiDict
from ..exceptions import BadHost
from ..exceptions import HTTPException
from ..exceptions import MethodNotAllowed
from ..exceptions import NotFound
from ..urls import _urlencode
from ..wsgi import get_host
from .converters import DEFAULT_CONVERTERS
from .exceptions import BuildError
from .exceptions import NoMatch
from .exceptions import RequestAliasRedirect
from .exceptions import RequestPath
from .exceptions import RequestRedirect
from .exceptions import WebsocketMismatch
from .matcher import StateMachineMatcher
from .rules import _simple_rule_re
from .rules import Rule
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
from .converters import BaseConverter
from .rules import RuleFactory
from ..wrappers.request import Request
class Map:
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: If a rule ends with a slash but the matched
URL does not, redirect to the URL with a trailing slash.
:param merge_slashes: Merge consecutive slashes when matching or
building URLs. Matches will redirect to the normalized URL.
Slashes in variable parts are not merged.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionchanged:: 2.3
The ``charset`` and ``encoding_errors`` parameters are deprecated and will be
removed in Werkzeug 3.0.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match.
.. versionchanged:: 1.0
The ``merge_slashes`` parameter was added.
.. versionchanged:: 0.7
The ``encoding_errors`` and ``host_matching`` parameters were added.
.. versionchanged:: 0.5
The ``sort_parameters`` and ``sort_key`` paramters were added.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
#: The type of lock to use when updating.
#:
#: .. versionadded:: 1.0
lock_class = Lock
def __init__(
self,
rules: t.Iterable[RuleFactory] | None = None,
default_subdomain: str = "",
charset: str | None = None,
strict_slashes: bool = True,
merge_slashes: bool = True,
redirect_defaults: bool = True,
converters: t.Mapping[str, type[BaseConverter]] | None = None,
sort_parameters: bool = False,
sort_key: t.Callable[[t.Any], t.Any] | None = None,
encoding_errors: str | None = None,
host_matching: bool = False,
) -> None:
self._matcher = StateMachineMatcher(merge_slashes)
self._rules_by_endpoint: dict[str, list[Rule]] = {}
self._remap = True
self._remap_lock = self.lock_class()
self.default_subdomain = default_subdomain
if charset is not None:
warnings.warn(
"The 'charset' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
charset = "utf-8"
self.charset = charset
if encoding_errors is not None:
warnings.warn(
"The 'encoding_errors' parameter is deprecated and will be"
" removed in Werkzeug 3.0.",
DeprecationWarning,
stacklevel=2,
)
else:
encoding_errors = "replace"
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint: str, *arguments: str) -> bool:
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
@property
def _rules(self) -> list[Rule]:
return [rule for rules in self._rules_by_endpoint.values() for rule in rules]
def iter_rules(self, endpoint: str | None = None) -> t.Iterator[Rule]:
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory: RuleFactory) -> None:
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
if not rule.build_only:
self._matcher.add(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name: str,
script_name: str | None = None,
subdomain: str | None = None,
url_scheme: str = "http",
default_method: str = "GET",
path_info: str | None = None,
query_args: t.Mapping[str, t.Any] | str | None = None,
) -> MapAdapter:
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionchanged:: 0.7
Added ``query_args``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
# Port isn't part of IDNA, and might push a name over the 63 octet limit.
server_name, port_sep, port = server_name.partition(":")
try:
server_name = server_name.encode("idna").decode("ascii")
except UnicodeError as e:
raise BadHost() from e
return MapAdapter(
self,
f"{server_name}{port_sep}{port}",
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(
self,
environ: WSGIEnvironment | Request,
server_name: str | None = None,
subdomain: str | None = None,
) -> MapAdapter:
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 1.0.0
If the passed server name specifies port 443, it will match
if the incoming scheme is ``https`` without a port.
.. versionchanged:: 1.0.0
A warning is shown when the passed server name does not
match the incoming WSGI server name.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
env = _get_environ(environ)
wsgi_server_name = get_host(env).lower()
scheme = env["wsgi.url_scheme"]
upgrade = any(
v.strip() == "upgrade"
for v in env.get("HTTP_CONNECTION", "").lower().split(",")
)
if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
scheme = "wss" if scheme == "https" else "ws"
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
# strip standard port to match get_host()
if scheme in {"http", "ws"} and server_name.endswith(":80"):
server_name = server_name[:-3]
elif scheme in {"https", "wss"} and server_name.endswith(":443"):
server_name = server_name[:-4]
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accessed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
warnings.warn(
f"Current server name {wsgi_server_name!r} doesn't match configured"
f" server name {server_name!r}",
stacklevel=2,
)
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name: str) -> str | None:
val = env.get(name)
if val is not None:
return _wsgi_decoding_dance(val, self.charset)
return None
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
scheme,
env["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self) -> None:
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._matcher.update()
for rules in self._rules_by_endpoint.values():
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self) -> str:
rules = self.iter_rules()
return f"{type(self).__name__}({pformat(list(rules))})"
class MapAdapter:
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(
self,
map: Map,
server_name: str,
script_name: str,
subdomain: str | None,
url_scheme: str,
path_info: str,
default_method: str,
query_args: t.Mapping[str, t.Any] | str | None = None,
):
self.map = map
self.server_name = server_name
if not script_name.endswith("/"):
script_name += "/"
self.script_name = script_name
self.subdomain = subdomain
self.url_scheme = url_scheme
self.path_info = path_info
self.default_method = default_method
self.query_args = query_args
self.websocket = self.url_scheme in {"ws", "wss"}
def dispatch(
self,
view_func: t.Callable[[str, t.Mapping[str, t.Any]], WSGIApplication],
path_info: str | None = None,
method: str | None = None,
catch_http_exceptions: bool = False,
) -> WSGIApplication:
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
@t.overload
def match( # type: ignore
self,
path_info: str | None = None,
method: str | None = None,
return_rule: t.Literal[False] = False,
query_args: t.Mapping[str, t.Any] | str | None = None,
websocket: bool | None = None,
) -> tuple[str, t.Mapping[str, t.Any]]:
...
@t.overload
def match(
self,
path_info: str | None = None,
method: str | None = None,
return_rule: t.Literal[True] = True,
query_args: t.Mapping[str, t.Any] | str | None = None,
websocket: bool | None = None,
) -> tuple[Rule, t.Mapping[str, t.Any]]:
...
def match(
self,
path_info: str | None = None,
method: str | None = None,
return_rule: bool = False,
query_args: t.Mapping[str, t.Any] | str | None = None,
websocket: bool | None = None,
) -> tuple[str | Rule, t.Mapping[str, t.Any]]:
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you receive a ``WebsocketMismatch`` exception if the only
match is a WebSocket rule but the bind is an HTTP request, or
if the match is an HTTP rule but the bind is a WebSocket
request.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. They will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
:param websocket: Match WebSocket instead of HTTP requests. A
websocket request has a ``ws`` or ``wss``
:attr:`url_scheme`. This overrides that detection.
.. versionadded:: 1.0
Added ``websocket``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionadded:: 0.7
Added ``query_args``.
.. versionadded:: 0.6
Added ``return_rule``.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
if query_args is None:
query_args = self.query_args or {}
method = (method or self.default_method).upper()
if websocket is None:
websocket = self.websocket
domain_part = self.server_name
if not self.map.host_matching and self.subdomain is not None:
domain_part = self.subdomain
path_part = f"/{path_info.lstrip('/')}" if path_info else ""
try:
result = self.map._matcher.match(domain_part, path_part, method, websocket)
except RequestPath as e:
# safe = https://url.spec.whatwg.org/#url-path-segment-string
new_path = quote(
e.path_info, safe="!$&'()*+,/:;=@", encoding=self.map.charset
)
raise RequestRedirect(
self.make_redirect_url(new_path, query_args)
) from None
except RequestAliasRedirect as e:
raise RequestRedirect(
self.make_alias_redirect_url(
f"{domain_part}|{path_part}",
e.endpoint,
e.matched_values,
method,
query_args,
)
) from None
except NoMatch as e:
if e.have_match_for:
raise MethodNotAllowed(valid_methods=list(e.have_match_for)) from None
if e.websocket_mismatch:
raise WebsocketMismatch() from None
raise NotFound() from None
else:
rule, rv = result
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv, query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, str):
def _handle_match(match: t.Match[str]) -> str:
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
if self.subdomain:
netloc = f"{self.subdomain}.{self.server_name}"
else:
netloc = self.server_name
raise RequestRedirect(
urljoin(
f"{self.url_scheme or 'http'}://{netloc}{self.script_name}",
redirect_url,
)
)
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
def test(self, path_info: str | None = None, method: str | None = None) -> bool:
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info: str | None = None) -> t.Iterable[str]:
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods # type: ignore
except HTTPException:
pass
return []
def get_host(self, domain_part: str | None) -> str:
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return domain_part
if domain_part is None:
subdomain = self.subdomain
else:
subdomain = domain_part
if subdomain:
return f"{subdomain}.{self.server_name}"
else:
return self.server_name
def get_default_redirect(
self,
rule: Rule,
method: str,
values: t.MutableMapping[str, t.Any],
query_args: t.Mapping[str, t.Any] | str,
) -> str | None:
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and r.suitable_for(values, method):
values.update(r.defaults) # type: ignore
domain_part, path = r.build(values) # type: ignore
return self.make_redirect_url(path, query_args, domain_part=domain_part)
return None
def encode_query_args(self, query_args: t.Mapping[str, t.Any] | str) -> str:
if not isinstance(query_args, str):
return _urlencode(query_args, encoding=self.map.charset)
return query_args
def make_redirect_url(
self,
path_info: str,
query_args: t.Mapping[str, t.Any] | str | None = None,
domain_part: str | None = None,
) -> str:
"""Creates a redirect URL.
:internal:
"""
if query_args is None:
query_args = self.query_args
if query_args:
query_str = self.encode_query_args(query_args)
else:
query_str = None
scheme = self.url_scheme or "http"
host = self.get_host(domain_part)
path = "/".join((self.script_name.strip("/"), path_info.lstrip("/")))
return urlunsplit((scheme, host, path, query_str, None))
def make_alias_redirect_url(
self,
path: str,
endpoint: str,
values: t.Mapping[str, t.Any],
method: str,
query_args: t.Mapping[str, t.Any] | str,
) -> str:
"""Internally called to make an alias redirect URL."""
url = self.build(
endpoint, values, method, append_unknown=False, force_external=True
)
if query_args:
url += f"?{self.encode_query_args(query_args)}"
assert url != path, "detected invalid alias setting. No canonical URL found"
return url
def _partial_build(
self,
endpoint: str,
values: t.Mapping[str, t.Any],
method: str | None,
append_unknown: bool,
) -> tuple[str, str, bool] | None:
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(
endpoint, values, self.default_method, append_unknown
)
if rv is not None:
return rv
# Default method did not match or a specific method is passed.
# Check all for first match with matching host. If no matching
# host is found, go with first result.
first_match = None
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
build_rv = rule.build(values, append_unknown)
if build_rv is not None:
rv = (build_rv[0], build_rv[1], rule.websocket)
if self.map.host_matching:
if rv[0] == self.server_name:
return rv
elif first_match is None:
first_match = rv
else:
return rv
return first_match
def build(
self,
endpoint: str,
values: t.Mapping[str, t.Any] | None = None,
method: str | None = None,
force_external: bool = False,
append_unknown: bool = True,
url_scheme: str | None = None,
) -> str:
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytes back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to strings and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
:param url_scheme: Scheme to use in place of the bound
:attr:`url_scheme`.
.. versionchanged:: 2.0
Added the ``url_scheme`` parameter.
.. versionadded:: 0.6
Added the ``append_unknown`` parameter.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
values = {
k: (v[0] if len(v) == 1 else v)
for k, v in dict.items(values)
if len(v) != 0
}
else: # plain dict
values = {k: v for k, v in values.items() if v is not None}
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path, websocket = rv
host = self.get_host(domain_part)
if url_scheme is None:
url_scheme = self.url_scheme
# Always build WebSocket routes with the scheme (browsers
# require full URLs). If bound to a WebSocket, ensure that HTTP
# routes are built with an HTTP scheme.
secure = url_scheme in {"https", "wss"}
if websocket:
force_external = True
url_scheme = "wss" if secure else "ws"
elif url_scheme:
url_scheme = "https" if secure else "http"
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}"
scheme = f"{url_scheme}:" if url_scheme else ""
return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"

View file

@ -0,0 +1,202 @@
from __future__ import annotations
import re
import typing as t
from dataclasses import dataclass
from dataclasses import field
from .converters import ValidationError
from .exceptions import NoMatch
from .exceptions import RequestAliasRedirect
from .exceptions import RequestPath
from .rules import Rule
from .rules import RulePart
class SlashRequired(Exception):
pass
@dataclass
class State:
"""A representation of a rule state.
This includes the *rules* that correspond to the state and the
possible *static* and *dynamic* transitions to the next state.
"""
dynamic: list[tuple[RulePart, State]] = field(default_factory=list)
rules: list[Rule] = field(default_factory=list)
static: dict[str, State] = field(default_factory=dict)
class StateMachineMatcher:
def __init__(self, merge_slashes: bool) -> None:
self._root = State()
self.merge_slashes = merge_slashes
def add(self, rule: Rule) -> None:
state = self._root
for part in rule._parts:
if part.static:
state.static.setdefault(part.content, State())
state = state.static[part.content]
else:
for test_part, new_state in state.dynamic:
if test_part == part:
state = new_state
break
else:
new_state = State()
state.dynamic.append((part, new_state))
state = new_state
state.rules.append(rule)
def update(self) -> None:
# For every state the dynamic transitions should be sorted by
# the weight of the transition
state = self._root
def _update_state(state: State) -> None:
state.dynamic.sort(key=lambda entry: entry[0].weight)
for new_state in state.static.values():
_update_state(new_state)
for _, new_state in state.dynamic:
_update_state(new_state)
_update_state(state)
def match(
self, domain: str, path: str, method: str, websocket: bool
) -> tuple[Rule, t.MutableMapping[str, t.Any]]:
# To match to a rule we need to start at the root state and
# try to follow the transitions until we find a match, or find
# there is no transition to follow.
have_match_for = set()
websocket_mismatch = False
def _match(
state: State, parts: list[str], values: list[str]
) -> tuple[Rule, list[str]] | None:
# This function is meant to be called recursively, and will attempt
# to match the head part to the state's transitions.
nonlocal have_match_for, websocket_mismatch
# The base case is when all parts have been matched via
# transitions. Hence if there is a rule with methods &
# websocket that work return it and the dynamic values
# extracted.
if parts == []:
for rule in state.rules:
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
elif rule.websocket != websocket:
websocket_mismatch = True
else:
return rule, values
# Test if there is a match with this path with a
# trailing slash, if so raise an exception to report
# that matching is possible with an additional slash
if "" in state.static:
for rule in state.static[""].rules:
if websocket == rule.websocket and (
rule.methods is None or method in rule.methods
):
if rule.strict_slashes:
raise SlashRequired()
else:
return rule, values
return None
part = parts[0]
# To match this part try the static transitions first
if part in state.static:
rv = _match(state.static[part], parts[1:], values)
if rv is not None:
return rv
# No match via the static transitions, so try the dynamic
# ones.
for test_part, new_state in state.dynamic:
target = part
remaining = parts[1:]
# A final part indicates a transition that always
# consumes the remaining parts i.e. transitions to a
# final state.
if test_part.final:
target = "/".join(parts)
remaining = []
match = re.compile(test_part.content).match(target)
if match is not None:
if test_part.suffixed:
# If a part_isolating=False part has a slash suffix, remove the
# suffix from the match and check for the slash redirect next.
suffix = match.groups()[-1]
if suffix == "/":
remaining = [""]
converter_groups = sorted(
match.groupdict().items(), key=lambda entry: entry[0]
)
groups = [
value
for key, value in converter_groups
if key[:11] == "__werkzeug_"
]
rv = _match(new_state, remaining, values + groups)
if rv is not None:
return rv
# If there is no match and the only part left is a
# trailing slash ("") consider rules that aren't
# strict-slashes as these should match if there is a final
# slash part.
if parts == [""]:
for rule in state.rules:
if rule.strict_slashes:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
elif rule.websocket != websocket:
websocket_mismatch = True
else:
return rule, values
return None
try:
rv = _match(self._root, [domain, *path.split("/")], [])
except SlashRequired:
raise RequestPath(f"{path}/") from None
if self.merge_slashes and rv is None:
# Try to match again, but with slashes merged
path = re.sub("/{2,}?", "/", path)
try:
rv = _match(self._root, [domain, *path.split("/")], [])
except SlashRequired:
raise RequestPath(f"{path}/") from None
if rv is None:
raise NoMatch(have_match_for, websocket_mismatch)
else:
raise RequestPath(f"{path}")
elif rv is not None:
rule, values = rv
result = {}
for name, value in zip(rule._converters.keys(), values):
try:
value = rule._converters[name].to_python(value)
except ValidationError:
raise NoMatch(have_match_for, websocket_mismatch) from None
result[str(name)] = value
if rule.defaults:
result.update(rule.defaults)
if rule.alias and rule.map.redirect_defaults:
raise RequestAliasRedirect(result, rule.endpoint)
return rule, result
raise NoMatch(have_match_for, websocket_mismatch)

View file

@ -0,0 +1,913 @@
from __future__ import annotations
import ast
import re
import typing as t
from dataclasses import dataclass
from string import Template
from types import CodeType
from urllib.parse import quote
from ..datastructures import iter_multi_items
from ..urls import _urlencode
from .converters import ValidationError
if t.TYPE_CHECKING:
from .converters import BaseConverter
from .map import Map
class Weighting(t.NamedTuple):
number_static_weights: int
static_weights: list[tuple[int, int]]
number_argument_weights: int
argument_weights: list[int]
@dataclass
class RulePart:
"""A part of a rule.
Rules can be represented by parts as delimited by `/` with
instances of this class representing those parts. The *content* is
either the raw content if *static* or a regex string to match
against. The *weight* can be used to order parts when matching.
"""
content: str
final: bool
static: bool
suffixed: bool
weight: Weighting
_part_re = re.compile(
r"""
(?:
(?P<slash>/) # a slash
|
(?P<static>[^</]+) # static rule data
|
(?:
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<arguments>.*?)\))? # converter arguments
: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
)
)
""",
re.VERBOSE,
)
_simple_rule_re = re.compile(r"<([^>]+)>")
_converter_args_re = re.compile(
r"""
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
[\w\d_.]+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
""",
re.VERBOSE,
)
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
def _find(value: str, target: str, pos: int) -> int:
"""Find the *target* in *value* after *pos*.
Returns the *value* length if *target* isn't found.
"""
try:
return value.index(target, pos)
except ValueError:
return len(value)
def _pythonize(value: str) -> None | bool | int | float | str:
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value) # type: ignore
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in "\"'":
value = value[1:-1]
return str(value)
def parse_converter_args(argstr: str) -> tuple[t.Tuple, dict[str, t.Any]]:
argstr += ","
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group("stringval")
if value is None:
value = item.group("value")
value = _pythonize(value)
if not item.group("name"):
args.append(value)
else:
name = item.group("name")
kwargs[name] = value
return tuple(args), kwargs
class RuleFactory:
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map: Map) -> t.Iterable[Rule]:
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain: str, rules: t.Iterable[RuleFactory]) -> None:
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path: str, rules: t.Iterable[RuleFactory]) -> None:
self.path = path.rstrip("/")
self.rules = rules
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
self.prefix = prefix
self.rules = rules
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate:
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules: t.Iterable[Rule]) -> None:
self.rules = list(rules)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> RuleTemplateFactory:
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(
self, rules: t.Iterable[RuleFactory], context: dict[str, t.Any]
) -> None:
self.rules = rules
self.context = context
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in rule.defaults.items():
if isinstance(value, str):
value = Template(value).substitute(self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = Template(rule.subdomain).substitute(self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, str):
new_endpoint = Template(new_endpoint).substitute(self.context)
yield Rule(
Template(rule.rule).substitute(self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes,
)
def _prefix_names(src: str) -> ast.stmt:
"""ast parse and prefix names with `.` to avoid collision with user vars"""
tree = ast.parse(src).body[0]
if isinstance(tree, ast.Expr):
tree = tree.value # type: ignore
for node in ast.walk(tree):
if isinstance(node, ast.Name):
node.id = f".{node.id}"
return tree
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
_IF_KWARGS_URL_ENCODE_CODE = """\
if kwargs:
params = self._encode_query_vars(kwargs)
q = "?" if params else ""
else:
q = params = ""
"""
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` they will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`merge_slashes`
Override :attr:`Map.merge_slashes` for this rule.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return f'foo/{Foo.get_slug_for_id(id)}'
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
`websocket`
If ``True``, this rule is only matches for WebSocket (``ws://``,
``wss://``) requests. By default, rules will only match for HTTP
requests.
.. versionchanged:: 2.1
Percent-encoded newlines (``%0a``), which are decoded by WSGI
servers, are considered when routing instead of terminating the
match early.
.. versionadded:: 1.0
Added ``websocket``.
.. versionadded:: 1.0
Added ``merge_slashes``.
.. versionadded:: 0.7
Added ``alias`` and ``host``.
.. versionchanged:: 0.6.1
``HEAD`` is added to ``methods`` if ``GET`` is present.
"""
def __init__(
self,
string: str,
defaults: t.Mapping[str, t.Any] | None = None,
subdomain: str | None = None,
methods: t.Iterable[str] | None = None,
build_only: bool = False,
endpoint: str | None = None,
strict_slashes: bool | None = None,
merge_slashes: bool | None = None,
redirect_to: str | t.Callable[..., str] | None = None,
alias: bool = False,
host: str | None = None,
websocket: bool = False,
) -> None:
if not string.startswith("/"):
raise ValueError("urls must start with a leading slash")
self.rule = string
self.is_leaf = not string.endswith("/")
self.is_branch = string.endswith("/")
self.map: Map = None # type: ignore
self.strict_slashes = strict_slashes
self.merge_slashes = merge_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
self.websocket = websocket
if methods is not None:
if isinstance(methods, str):
raise TypeError("'methods' should be a list of strings.")
methods = {x.upper() for x in methods}
if "HEAD" not in methods and "GET" in methods:
methods.add("HEAD")
if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
raise ValueError(
"WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
)
self.methods = methods
self.endpoint: str = endpoint # type: ignore
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._converters: dict[str, BaseConverter] = {}
self._trace: list[tuple[bool, str]] = []
self._parts: list[RulePart] = []
def empty(self) -> Rule:
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self) -> t.Mapping[str, t.Any]:
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(
defaults=defaults,
subdomain=self.subdomain,
methods=self.methods,
build_only=self.build_only,
endpoint=self.endpoint,
strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to,
alias=self.alias,
host=self.host,
)
def get_rules(self, map: Map) -> t.Iterator[Rule]:
yield self
def refresh(self) -> None:
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map: Map, rebind: bool = False) -> None:
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError(f"url rule {self!r} already bound to map {self.map!r}")
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.merge_slashes is None:
self.merge_slashes = map.merge_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(
self,
variable_name: str,
converter_name: str,
args: t.Tuple,
kwargs: t.Mapping[str, t.Any],
) -> BaseConverter:
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError(f"the converter {converter_name!r} does not exist")
return self.map.converters[converter_name](self.map, *args, **kwargs)
def _encode_query_vars(self, query_vars: t.Mapping[str, t.Any]) -> str:
items: t.Iterable[tuple[str, str]] = iter_multi_items(query_vars)
if self.map.sort_parameters:
items = sorted(items, key=self.map.sort_key)
return _urlencode(items, encoding=self.map.charset)
def _parse_rule(self, rule: str) -> t.Iterable[RulePart]:
content = ""
static = True
argument_weights = []
static_weights: list[tuple[int, int]] = []
final = False
convertor_number = 0
pos = 0
while pos < len(rule):
match = _part_re.match(rule, pos)
if match is None:
raise ValueError(f"malformed url rule: {rule!r}")
data = match.groupdict()
if data["static"] is not None:
static_weights.append((len(static_weights), -len(data["static"])))
self._trace.append((False, data["static"]))
content += data["static"] if static else re.escape(data["static"])
if data["variable"] is not None:
if static:
# Switching content to represent regex, hence the need to escape
content = re.escape(content)
static = False
c_args, c_kwargs = parse_converter_args(data["arguments"] or "")
convobj = self.get_converter(
data["variable"], data["converter"] or "default", c_args, c_kwargs
)
self._converters[data["variable"]] = convobj
self.arguments.add(data["variable"])
if not convobj.part_isolating:
final = True
content += f"(?P<__werkzeug_{convertor_number}>{convobj.regex})"
convertor_number += 1
argument_weights.append(convobj.weight)
self._trace.append((True, data["variable"]))
if data["slash"] is not None:
self._trace.append((False, "/"))
if final:
content += "/"
else:
if not static:
content += r"\Z"
weight = Weighting(
-len(static_weights),
static_weights,
-len(argument_weights),
argument_weights,
)
yield RulePart(
content=content,
final=final,
static=static,
suffixed=False,
weight=weight,
)
content = ""
static = True
argument_weights = []
static_weights = []
final = False
convertor_number = 0
pos = match.end()
suffixed = False
if final and content[-1] == "/":
# If a converter is part_isolating=False (matches slashes) and ends with a
# slash, augment the regex to support slash redirects.
suffixed = True
content = content[:-1] + "(?<!/)(/?)"
if not static:
content += r"\Z"
weight = Weighting(
-len(static_weights),
static_weights,
-len(argument_weights),
argument_weights,
)
yield RulePart(
content=content,
final=final,
static=static,
suffixed=suffixed,
weight=weight,
)
if suffixed:
yield RulePart(
content="", final=False, static=True, suffixed=False, weight=weight
)
def compile(self) -> None:
"""Compiles the regular expression and stores it."""
assert self.map is not None, "rule not bound"
if self.map.host_matching:
domain_rule = self.host or ""
else:
domain_rule = self.subdomain or ""
self._parts = []
self._trace = []
self._converters = {}
if domain_rule == "":
self._parts = [
RulePart(
content="",
final=False,
static=True,
suffixed=False,
weight=Weighting(0, [], 0, []),
)
]
else:
self._parts.extend(self._parse_rule(domain_rule))
self._trace.append((False, "|"))
rule = self.rule
if self.merge_slashes:
rule = re.sub("/{2,}?", "/", self.rule)
self._parts.extend(self._parse_rule(rule))
self._build: t.Callable[..., tuple[str, str]]
self._build = self._compile_builder(False).__get__(self, None)
self._build_unknown: t.Callable[..., tuple[str, str]]
self._build_unknown = self._compile_builder(True).__get__(self, None)
@staticmethod
def _get_func_code(code: CodeType, name: str) -> t.Callable[..., tuple[str, str]]:
globs: dict[str, t.Any] = {}
locs: dict[str, t.Any] = {}
exec(code, globs, locs)
return locs[name] # type: ignore
def _compile_builder(
self, append_unknown: bool = True
) -> t.Callable[..., tuple[str, str]]:
defaults = self.defaults or {}
dom_ops: list[tuple[bool, str]] = []
url_ops: list[tuple[bool, str]] = []
opl = dom_ops
for is_dynamic, data in self._trace:
if data == "|" and opl is dom_ops:
opl = url_ops
continue
# this seems like a silly case to ever come up but:
# if a default is given for a value that appears in the rule,
# resolve it to a constant ahead of time
if is_dynamic and data in defaults:
data = self._converters[data].to_url(defaults[data])
opl.append((False, data))
elif not is_dynamic:
# safe = https://url.spec.whatwg.org/#url-path-segment-string
opl.append(
(
False,
quote(data, safe="!$&'()*+,/:;=@", encoding=self.map.charset),
)
)
else:
opl.append((True, data))
def _convert(elem: str) -> ast.stmt:
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
ret.args = [ast.Name(str(elem), ast.Load())] # type: ignore # str for py2
return ret
def _parts(ops: list[tuple[bool, str]]) -> list[ast.AST]:
parts = [
_convert(elem) if is_dynamic else ast.Constant(elem)
for is_dynamic, elem in ops
]
parts = parts or [ast.Constant("")]
# constant fold
ret = [parts[0]]
for p in parts[1:]:
if isinstance(p, ast.Constant) and isinstance(ret[-1], ast.Constant):
ret[-1] = ast.Constant(ret[-1].value + p.value)
else:
ret.append(p)
return ret
dom_parts = _parts(dom_ops)
url_parts = _parts(url_ops)
if not append_unknown:
body = []
else:
body = [_IF_KWARGS_URL_ENCODE_AST]
url_parts.extend(_URL_ENCODE_AST_NAMES)
def _join(parts: list[ast.AST]) -> ast.AST:
if len(parts) == 1: # shortcut
return parts[0]
return ast.JoinedStr(parts)
body.append(
ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
)
pargs = [
elem
for is_dynamic, elem in dom_ops + url_ops
if is_dynamic and elem not in defaults
]
kargs = [str(k) for k in defaults]
func_ast: ast.FunctionDef = _prefix_names("def _(): pass") # type: ignore
func_ast.name = f"<builder:{self.rule!r}>"
func_ast.args.args.append(ast.arg(".self", None))
for arg in pargs + kargs:
func_ast.args.args.append(ast.arg(arg, None))
func_ast.args.kwarg = ast.arg(".kwargs", None)
for _ in kargs:
func_ast.args.defaults.append(ast.Constant(""))
func_ast.body = body
# Use `ast.parse` instead of `ast.Module` for better portability, since the
# signature of `ast.Module` can change.
module = ast.parse("")
module.body = [func_ast]
# mark everything as on line 1, offset 0
# less error-prone than `ast.fix_missing_locations`
# bad line numbers cause an assert to fail in debug builds
for node in ast.walk(module):
if "lineno" in node._attributes:
node.lineno = 1
if "end_lineno" in node._attributes:
node.end_lineno = node.lineno
if "col_offset" in node._attributes:
node.col_offset = 0
if "end_col_offset" in node._attributes:
node.end_col_offset = node.col_offset
code = compile(module, "<werkzeug routing>", "exec")
return self._get_func_code(code, func_ast.name)
def build(
self, values: t.Mapping[str, t.Any], append_unknown: bool = True
) -> tuple[str, str] | None:
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
try:
if append_unknown:
return self._build_unknown(**values)
else:
return self._build(**values)
except ValidationError:
return None
def provides_defaults_for(self, rule: Rule) -> bool:
"""Check if this rule has defaults for a given rule.
:internal:
"""
return bool(
not self.build_only
and self.defaults
and self.endpoint == rule.endpoint
and self != rule
and self.arguments == rule.arguments
)
def suitable_for(
self, values: t.Mapping[str, t.Any], method: str | None = None
) -> bool:
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if (
method is not None
and self.methods is not None
and method not in self.methods
):
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure that either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in defaults.items():
if key in values and value != values[key]:
return False
return True
def build_compare_key(self) -> tuple[int, int, int]:
"""The build compare key for sorting.
:internal:
"""
return (1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()))
def __eq__(self, other: object) -> bool:
return isinstance(other, type(self)) and self._trace == other._trace
__hash__ = None # type: ignore
def __str__(self) -> str:
return self.rule
def __repr__(self) -> str:
if self.map is None:
return f"<{type(self).__name__} (unbound)>"
parts = []
for is_dynamic, data in self._trace:
if is_dynamic:
parts.append(f"<{data}>")
else:
parts.append(data)
parts = "".join(parts).lstrip("|")
methods = f" ({', '.join(self.methods)})" if self.methods is not None else ""
return f"<{type(self).__name__} {parts!r}{methods} -> {self.endpoint}>"

Some files were not shown because too many files have changed in this diff Show more