done
This commit is contained in:
4
lib/python3.11/site-packages/werkzeug/__init__.py
Normal file
4
lib/python3.11/site-packages/werkzeug/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .serving import run_simple as run_simple
|
||||
from .test import Client as Client
|
||||
from .wrappers import Request as Request
|
||||
from .wrappers import Response as Response
|
||||
211
lib/python3.11/site-packages/werkzeug/_internal.py
Normal file
211
lib/python3.11/site-packages/werkzeug/_internal.py
Normal file
@ -0,0 +1,211 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from .wrappers.request import Request
|
||||
|
||||
_logger: logging.Logger | None = None
|
||||
|
||||
|
||||
class _Missing:
|
||||
def __repr__(self) -> str:
|
||||
return "no value"
|
||||
|
||||
def __reduce__(self) -> str:
|
||||
return "_missing"
|
||||
|
||||
|
||||
_missing = _Missing()
|
||||
|
||||
|
||||
def _wsgi_decoding_dance(s: str) -> str:
|
||||
return s.encode("latin1").decode(errors="replace")
|
||||
|
||||
|
||||
def _wsgi_encoding_dance(s: str) -> str:
|
||||
return s.encode().decode("latin1")
|
||||
|
||||
|
||||
def _get_environ(obj: WSGIEnvironment | Request) -> WSGIEnvironment:
|
||||
env = getattr(obj, "environ", obj)
|
||||
assert isinstance(
|
||||
env, dict
|
||||
), f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
|
||||
return env
|
||||
|
||||
|
||||
def _has_level_handler(logger: logging.Logger) -> bool:
|
||||
"""Check if there is a handler in the logging chain that will handle
|
||||
the given logger's effective level.
|
||||
"""
|
||||
level = logger.getEffectiveLevel()
|
||||
current = logger
|
||||
|
||||
while current:
|
||||
if any(handler.level <= level for handler in current.handlers):
|
||||
return True
|
||||
|
||||
if not current.propagate:
|
||||
break
|
||||
|
||||
current = current.parent # type: ignore
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class _ColorStreamHandler(logging.StreamHandler): # type: ignore[type-arg]
|
||||
"""On Windows, wrap stream with Colorama for ANSI style support."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
try:
|
||||
import colorama
|
||||
except ImportError:
|
||||
stream = None
|
||||
else:
|
||||
stream = colorama.AnsiToWin32(sys.stderr)
|
||||
|
||||
super().__init__(stream)
|
||||
|
||||
|
||||
def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
"""Log a message to the 'werkzeug' logger.
|
||||
|
||||
The logger is created the first time it is needed. If there is no
|
||||
level set, it is set to :data:`logging.INFO`. If there is no handler
|
||||
for the logger's effective level, a :class:`logging.StreamHandler`
|
||||
is added.
|
||||
"""
|
||||
global _logger
|
||||
|
||||
if _logger is None:
|
||||
_logger = logging.getLogger("werkzeug")
|
||||
|
||||
if _logger.level == logging.NOTSET:
|
||||
_logger.setLevel(logging.INFO)
|
||||
|
||||
if not _has_level_handler(_logger):
|
||||
_logger.addHandler(_ColorStreamHandler())
|
||||
|
||||
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
|
||||
|
||||
|
||||
@t.overload
|
||||
def _dt_as_utc(dt: None) -> None: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def _dt_as_utc(dt: datetime) -> datetime: ...
|
||||
|
||||
|
||||
def _dt_as_utc(dt: datetime | None) -> datetime | None:
|
||||
if dt is None:
|
||||
return dt
|
||||
|
||||
if dt.tzinfo is None:
|
||||
return dt.replace(tzinfo=timezone.utc)
|
||||
elif dt.tzinfo != timezone.utc:
|
||||
return dt.astimezone(timezone.utc)
|
||||
|
||||
return dt
|
||||
|
||||
|
||||
_TAccessorValue = t.TypeVar("_TAccessorValue")
|
||||
|
||||
|
||||
class _DictAccessorProperty(t.Generic[_TAccessorValue]):
|
||||
"""Baseclass for `environ_property` and `header_property`."""
|
||||
|
||||
read_only = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
default: _TAccessorValue | None = None,
|
||||
load_func: t.Callable[[str], _TAccessorValue] | None = None,
|
||||
dump_func: t.Callable[[_TAccessorValue], str] | None = None,
|
||||
read_only: bool | None = None,
|
||||
doc: str | None = None,
|
||||
) -> None:
|
||||
self.name = name
|
||||
self.default = default
|
||||
self.load_func = load_func
|
||||
self.dump_func = dump_func
|
||||
if read_only is not None:
|
||||
self.read_only = read_only
|
||||
self.__doc__ = doc
|
||||
|
||||
def lookup(self, instance: t.Any) -> t.MutableMapping[str, t.Any]:
|
||||
raise NotImplementedError
|
||||
|
||||
@t.overload
|
||||
def __get__(
|
||||
self, instance: None, owner: type
|
||||
) -> _DictAccessorProperty[_TAccessorValue]: ...
|
||||
|
||||
@t.overload
|
||||
def __get__(self, instance: t.Any, owner: type) -> _TAccessorValue: ...
|
||||
|
||||
def __get__(
|
||||
self, instance: t.Any | None, owner: type
|
||||
) -> _TAccessorValue | _DictAccessorProperty[_TAccessorValue]:
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
storage = self.lookup(instance)
|
||||
|
||||
if self.name not in storage:
|
||||
return self.default # type: ignore
|
||||
|
||||
value = storage[self.name]
|
||||
|
||||
if self.load_func is not None:
|
||||
try:
|
||||
return self.load_func(value)
|
||||
except (ValueError, TypeError):
|
||||
return self.default # type: ignore
|
||||
|
||||
return value # type: ignore
|
||||
|
||||
def __set__(self, instance: t.Any, value: _TAccessorValue) -> None:
|
||||
if self.read_only:
|
||||
raise AttributeError("read only property")
|
||||
|
||||
if self.dump_func is not None:
|
||||
self.lookup(instance)[self.name] = self.dump_func(value)
|
||||
else:
|
||||
self.lookup(instance)[self.name] = value
|
||||
|
||||
def __delete__(self, instance: t.Any) -> None:
|
||||
if self.read_only:
|
||||
raise AttributeError("read only property")
|
||||
|
||||
self.lookup(instance).pop(self.name, None)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {self.name}>"
|
||||
|
||||
|
||||
_plain_int_re = re.compile(r"-?\d+", re.ASCII)
|
||||
|
||||
|
||||
def _plain_int(value: str) -> int:
|
||||
"""Parse an int only if it is only ASCII digits and ``-``.
|
||||
|
||||
This disallows ``+``, ``_``, and non-ASCII digits, which are accepted by ``int`` but
|
||||
are not allowed in HTTP header values.
|
||||
|
||||
Any leading or trailing whitespace is stripped
|
||||
"""
|
||||
value = value.strip()
|
||||
if _plain_int_re.fullmatch(value) is None:
|
||||
raise ValueError
|
||||
|
||||
return int(value)
|
||||
471
lib/python3.11/site-packages/werkzeug/_reloader.py
Normal file
471
lib/python3.11/site-packages/werkzeug/_reloader.py
Normal file
@ -0,0 +1,471 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import fnmatch
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import typing as t
|
||||
from itertools import chain
|
||||
from pathlib import PurePath
|
||||
|
||||
from ._internal import _log
|
||||
|
||||
# The various system prefixes where imports are found. Base values are
|
||||
# different when running in a virtualenv. All reloaders will ignore the
|
||||
# base paths (usually the system installation). The stat reloader won't
|
||||
# scan the virtualenv paths, it will only include modules that are
|
||||
# already imported.
|
||||
_ignore_always = tuple({sys.base_prefix, sys.base_exec_prefix})
|
||||
prefix = {*_ignore_always, sys.prefix, sys.exec_prefix}
|
||||
|
||||
if hasattr(sys, "real_prefix"):
|
||||
# virtualenv < 20
|
||||
prefix.add(sys.real_prefix)
|
||||
|
||||
_stat_ignore_scan = tuple(prefix)
|
||||
del prefix
|
||||
_ignore_common_dirs = {
|
||||
"__pycache__",
|
||||
".git",
|
||||
".hg",
|
||||
".tox",
|
||||
".nox",
|
||||
".pytest_cache",
|
||||
".mypy_cache",
|
||||
}
|
||||
|
||||
|
||||
def _iter_module_paths() -> t.Iterator[str]:
|
||||
"""Find the filesystem paths associated with imported modules."""
|
||||
# List is in case the value is modified by the app while updating.
|
||||
for module in list(sys.modules.values()):
|
||||
name = getattr(module, "__file__", None)
|
||||
|
||||
if name is None or name.startswith(_ignore_always):
|
||||
continue
|
||||
|
||||
while not os.path.isfile(name):
|
||||
# Zip file, find the base file without the module path.
|
||||
old = name
|
||||
name = os.path.dirname(name)
|
||||
|
||||
if name == old: # skip if it was all directories somehow
|
||||
break
|
||||
else:
|
||||
yield name
|
||||
|
||||
|
||||
def _remove_by_pattern(paths: set[str], exclude_patterns: set[str]) -> None:
|
||||
for pattern in exclude_patterns:
|
||||
paths.difference_update(fnmatch.filter(paths, pattern))
|
||||
|
||||
|
||||
def _find_stat_paths(
|
||||
extra_files: set[str], exclude_patterns: set[str]
|
||||
) -> t.Iterable[str]:
|
||||
"""Find paths for the stat reloader to watch. Returns imported
|
||||
module files, Python files under non-system paths. Extra files and
|
||||
Python files under extra directories can also be scanned.
|
||||
|
||||
System paths have to be excluded for efficiency. Non-system paths,
|
||||
such as a project root or ``sys.path.insert``, should be the paths
|
||||
of interest to the user anyway.
|
||||
"""
|
||||
paths = set()
|
||||
|
||||
for path in chain(list(sys.path), extra_files):
|
||||
path = os.path.abspath(path)
|
||||
|
||||
if os.path.isfile(path):
|
||||
# zip file on sys.path, or extra file
|
||||
paths.add(path)
|
||||
continue
|
||||
|
||||
parent_has_py = {os.path.dirname(path): True}
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
# Optimizations: ignore system prefixes, __pycache__ will
|
||||
# have a py or pyc module at the import path, ignore some
|
||||
# common known dirs such as version control and tool caches.
|
||||
if (
|
||||
root.startswith(_stat_ignore_scan)
|
||||
or os.path.basename(root) in _ignore_common_dirs
|
||||
):
|
||||
dirs.clear()
|
||||
continue
|
||||
|
||||
has_py = False
|
||||
|
||||
for name in files:
|
||||
if name.endswith((".py", ".pyc")):
|
||||
has_py = True
|
||||
paths.add(os.path.join(root, name))
|
||||
|
||||
# Optimization: stop scanning a directory if neither it nor
|
||||
# its parent contained Python files.
|
||||
if not (has_py or parent_has_py[os.path.dirname(root)]):
|
||||
dirs.clear()
|
||||
continue
|
||||
|
||||
parent_has_py[root] = has_py
|
||||
|
||||
paths.update(_iter_module_paths())
|
||||
_remove_by_pattern(paths, exclude_patterns)
|
||||
return paths
|
||||
|
||||
|
||||
def _find_watchdog_paths(
|
||||
extra_files: set[str], exclude_patterns: set[str]
|
||||
) -> t.Iterable[str]:
|
||||
"""Find paths for the stat reloader to watch. Looks at the same
|
||||
sources as the stat reloader, but watches everything under
|
||||
directories instead of individual files.
|
||||
"""
|
||||
dirs = set()
|
||||
|
||||
for name in chain(list(sys.path), extra_files):
|
||||
name = os.path.abspath(name)
|
||||
|
||||
if os.path.isfile(name):
|
||||
name = os.path.dirname(name)
|
||||
|
||||
dirs.add(name)
|
||||
|
||||
for name in _iter_module_paths():
|
||||
dirs.add(os.path.dirname(name))
|
||||
|
||||
_remove_by_pattern(dirs, exclude_patterns)
|
||||
return _find_common_roots(dirs)
|
||||
|
||||
|
||||
def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
|
||||
root: dict[str, dict[str, t.Any]] = {}
|
||||
|
||||
for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
|
||||
node = root
|
||||
|
||||
for chunk in chunks:
|
||||
node = node.setdefault(chunk, {})
|
||||
|
||||
node.clear()
|
||||
|
||||
rv = set()
|
||||
|
||||
def _walk(node: t.Mapping[str, dict[str, t.Any]], path: tuple[str, ...]) -> None:
|
||||
for prefix, child in node.items():
|
||||
_walk(child, path + (prefix,))
|
||||
|
||||
# If there are no more nodes, and a path has been accumulated, add it.
|
||||
# Path may be empty if the "" entry is in sys.path.
|
||||
if not node and path:
|
||||
rv.add(os.path.join(*path))
|
||||
|
||||
_walk(root, ())
|
||||
return rv
|
||||
|
||||
|
||||
def _get_args_for_reloading() -> list[str]:
|
||||
"""Determine how the script was executed, and return the args needed
|
||||
to execute it again in a new process.
|
||||
"""
|
||||
if sys.version_info >= (3, 10):
|
||||
# sys.orig_argv, added in Python 3.10, contains the exact args used to invoke
|
||||
# Python. Still replace argv[0] with sys.executable for accuracy.
|
||||
return [sys.executable, *sys.orig_argv[1:]]
|
||||
|
||||
rv = [sys.executable]
|
||||
py_script = sys.argv[0]
|
||||
args = sys.argv[1:]
|
||||
# Need to look at main module to determine how it was executed.
|
||||
__main__ = sys.modules["__main__"]
|
||||
|
||||
# The value of __package__ indicates how Python was called. It may
|
||||
# not exist if a setuptools script is installed as an egg. It may be
|
||||
# set incorrectly for entry points created with pip on Windows.
|
||||
if getattr(__main__, "__package__", None) is None or (
|
||||
os.name == "nt"
|
||||
and __main__.__package__ == ""
|
||||
and not os.path.exists(py_script)
|
||||
and os.path.exists(f"{py_script}.exe")
|
||||
):
|
||||
# Executed a file, like "python app.py".
|
||||
py_script = os.path.abspath(py_script)
|
||||
|
||||
if os.name == "nt":
|
||||
# Windows entry points have ".exe" extension and should be
|
||||
# called directly.
|
||||
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
|
||||
py_script += ".exe"
|
||||
|
||||
if (
|
||||
os.path.splitext(sys.executable)[1] == ".exe"
|
||||
and os.path.splitext(py_script)[1] == ".exe"
|
||||
):
|
||||
rv.pop(0)
|
||||
|
||||
rv.append(py_script)
|
||||
else:
|
||||
# Executed a module, like "python -m werkzeug.serving".
|
||||
if os.path.isfile(py_script):
|
||||
# Rewritten by Python from "-m script" to "/path/to/script.py".
|
||||
py_module = t.cast(str, __main__.__package__)
|
||||
name = os.path.splitext(os.path.basename(py_script))[0]
|
||||
|
||||
if name != "__main__":
|
||||
py_module += f".{name}"
|
||||
else:
|
||||
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
|
||||
py_module = py_script
|
||||
|
||||
rv.extend(("-m", py_module.lstrip(".")))
|
||||
|
||||
rv.extend(args)
|
||||
return rv
|
||||
|
||||
|
||||
class ReloaderLoop:
|
||||
name = ""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
extra_files: t.Iterable[str] | None = None,
|
||||
exclude_patterns: t.Iterable[str] | None = None,
|
||||
interval: int | float = 1,
|
||||
) -> None:
|
||||
self.extra_files: set[str] = {os.path.abspath(x) for x in extra_files or ()}
|
||||
self.exclude_patterns: set[str] = set(exclude_patterns or ())
|
||||
self.interval = interval
|
||||
|
||||
def __enter__(self) -> ReloaderLoop:
|
||||
"""Do any setup, then run one step of the watch to populate the
|
||||
initial filesystem state.
|
||||
"""
|
||||
self.run_step()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
|
||||
"""Clean up any resources associated with the reloader."""
|
||||
pass
|
||||
|
||||
def run(self) -> None:
|
||||
"""Continually run the watch step, sleeping for the configured
|
||||
interval after each step.
|
||||
"""
|
||||
while True:
|
||||
self.run_step()
|
||||
time.sleep(self.interval)
|
||||
|
||||
def run_step(self) -> None:
|
||||
"""Run one step for watching the filesystem. Called once to set
|
||||
up initial state, then repeatedly to update it.
|
||||
"""
|
||||
pass
|
||||
|
||||
def restart_with_reloader(self) -> int:
|
||||
"""Spawn a new Python interpreter with the same arguments as the
|
||||
current one, but running the reloader thread.
|
||||
"""
|
||||
while True:
|
||||
_log("info", f" * Restarting with {self.name}")
|
||||
args = _get_args_for_reloading()
|
||||
new_environ = os.environ.copy()
|
||||
new_environ["WERKZEUG_RUN_MAIN"] = "true"
|
||||
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
|
||||
|
||||
if exit_code != 3:
|
||||
return exit_code
|
||||
|
||||
def trigger_reload(self, filename: str) -> None:
|
||||
self.log_reload(filename)
|
||||
sys.exit(3)
|
||||
|
||||
def log_reload(self, filename: str | bytes) -> None:
|
||||
filename = os.path.abspath(filename)
|
||||
_log("info", f" * Detected change in {filename!r}, reloading")
|
||||
|
||||
|
||||
class StatReloaderLoop(ReloaderLoop):
|
||||
name = "stat"
|
||||
|
||||
def __enter__(self) -> ReloaderLoop:
|
||||
self.mtimes: dict[str, float] = {}
|
||||
return super().__enter__()
|
||||
|
||||
def run_step(self) -> None:
|
||||
for name in _find_stat_paths(self.extra_files, self.exclude_patterns):
|
||||
try:
|
||||
mtime = os.stat(name).st_mtime
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
old_time = self.mtimes.get(name)
|
||||
|
||||
if old_time is None:
|
||||
self.mtimes[name] = mtime
|
||||
continue
|
||||
|
||||
if mtime > old_time:
|
||||
self.trigger_reload(name)
|
||||
|
||||
|
||||
class WatchdogReloaderLoop(ReloaderLoop):
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
from watchdog.events import EVENT_TYPE_CLOSED
|
||||
from watchdog.events import EVENT_TYPE_CREATED
|
||||
from watchdog.events import EVENT_TYPE_DELETED
|
||||
from watchdog.events import EVENT_TYPE_MODIFIED
|
||||
from watchdog.events import EVENT_TYPE_MOVED
|
||||
from watchdog.events import FileModifiedEvent
|
||||
from watchdog.events import PatternMatchingEventHandler
|
||||
from watchdog.observers import Observer
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
trigger_reload = self.trigger_reload
|
||||
|
||||
class EventHandler(PatternMatchingEventHandler):
|
||||
def on_any_event(self, event: FileModifiedEvent): # type: ignore
|
||||
if event.event_type not in {
|
||||
EVENT_TYPE_CLOSED,
|
||||
EVENT_TYPE_CREATED,
|
||||
EVENT_TYPE_DELETED,
|
||||
EVENT_TYPE_MODIFIED,
|
||||
EVENT_TYPE_MOVED,
|
||||
}:
|
||||
# skip events that don't involve changes to the file
|
||||
return
|
||||
|
||||
trigger_reload(event.src_path)
|
||||
|
||||
reloader_name = Observer.__name__.lower() # type: ignore[attr-defined]
|
||||
|
||||
if reloader_name.endswith("observer"):
|
||||
reloader_name = reloader_name[:-8]
|
||||
|
||||
self.name = f"watchdog ({reloader_name})"
|
||||
self.observer = Observer()
|
||||
# Extra patterns can be non-Python files, match them in addition
|
||||
# to all Python files in default and extra directories. Ignore
|
||||
# __pycache__ since a change there will always have a change to
|
||||
# the source file (or initial pyc file) as well. Ignore Git and
|
||||
# Mercurial internal changes.
|
||||
extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
|
||||
self.event_handler = EventHandler(
|
||||
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
|
||||
ignore_patterns=[
|
||||
*[f"*/{d}/*" for d in _ignore_common_dirs],
|
||||
*self.exclude_patterns,
|
||||
],
|
||||
)
|
||||
self.should_reload = False
|
||||
|
||||
def trigger_reload(self, filename: str | bytes) -> None:
|
||||
# This is called inside an event handler, which means throwing
|
||||
# SystemExit has no effect.
|
||||
# https://github.com/gorakhargosh/watchdog/issues/294
|
||||
self.should_reload = True
|
||||
self.log_reload(filename)
|
||||
|
||||
def __enter__(self) -> ReloaderLoop:
|
||||
self.watches: dict[str, t.Any] = {}
|
||||
self.observer.start()
|
||||
return super().__enter__()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
def run(self) -> None:
|
||||
while not self.should_reload:
|
||||
self.run_step()
|
||||
time.sleep(self.interval)
|
||||
|
||||
sys.exit(3)
|
||||
|
||||
def run_step(self) -> None:
|
||||
to_delete = set(self.watches)
|
||||
|
||||
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
|
||||
if path not in self.watches:
|
||||
try:
|
||||
self.watches[path] = self.observer.schedule(
|
||||
self.event_handler, path, recursive=True
|
||||
)
|
||||
except OSError:
|
||||
# Clear this path from list of watches We don't want
|
||||
# the same error message showing again in the next
|
||||
# iteration.
|
||||
self.watches[path] = None
|
||||
|
||||
to_delete.discard(path)
|
||||
|
||||
for path in to_delete:
|
||||
watch = self.watches.pop(path, None)
|
||||
|
||||
if watch is not None:
|
||||
self.observer.unschedule(watch)
|
||||
|
||||
|
||||
reloader_loops: dict[str, type[ReloaderLoop]] = {
|
||||
"stat": StatReloaderLoop,
|
||||
"watchdog": WatchdogReloaderLoop,
|
||||
}
|
||||
|
||||
try:
|
||||
__import__("watchdog.observers")
|
||||
except ImportError:
|
||||
reloader_loops["auto"] = reloader_loops["stat"]
|
||||
else:
|
||||
reloader_loops["auto"] = reloader_loops["watchdog"]
|
||||
|
||||
|
||||
def ensure_echo_on() -> None:
|
||||
"""Ensure that echo mode is enabled. Some tools such as PDB disable
|
||||
it which causes usability issues after a reload."""
|
||||
# tcgetattr will fail if stdin isn't a tty
|
||||
if sys.stdin is None or not sys.stdin.isatty():
|
||||
return
|
||||
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
attributes = termios.tcgetattr(sys.stdin)
|
||||
|
||||
if not attributes[3] & termios.ECHO:
|
||||
attributes[3] |= termios.ECHO
|
||||
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
|
||||
|
||||
|
||||
def run_with_reloader(
|
||||
main_func: t.Callable[[], None],
|
||||
extra_files: t.Iterable[str] | None = None,
|
||||
exclude_patterns: t.Iterable[str] | None = None,
|
||||
interval: int | float = 1,
|
||||
reloader_type: str = "auto",
|
||||
) -> None:
|
||||
"""Run the given function in an independent Python interpreter."""
|
||||
import signal
|
||||
|
||||
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
|
||||
reloader = reloader_loops[reloader_type](
|
||||
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
|
||||
)
|
||||
|
||||
try:
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||
ensure_echo_on()
|
||||
t = threading.Thread(target=main_func, args=())
|
||||
t.daemon = True
|
||||
|
||||
# Enter the reloader to set up initial state, then start
|
||||
# the app thread and reloader update loop.
|
||||
with reloader:
|
||||
t.start()
|
||||
reloader.run()
|
||||
else:
|
||||
sys.exit(reloader.restart_with_reloader())
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from .accept import Accept as Accept
|
||||
from .accept import CharsetAccept as CharsetAccept
|
||||
from .accept import LanguageAccept as LanguageAccept
|
||||
from .accept import MIMEAccept as MIMEAccept
|
||||
from .auth import Authorization as Authorization
|
||||
from .auth import WWWAuthenticate as WWWAuthenticate
|
||||
from .cache_control import RequestCacheControl as RequestCacheControl
|
||||
from .cache_control import ResponseCacheControl as ResponseCacheControl
|
||||
from .csp import ContentSecurityPolicy as ContentSecurityPolicy
|
||||
from .etag import ETags as ETags
|
||||
from .file_storage import FileMultiDict as FileMultiDict
|
||||
from .file_storage import FileStorage as FileStorage
|
||||
from .headers import EnvironHeaders as EnvironHeaders
|
||||
from .headers import Headers as Headers
|
||||
from .mixins import ImmutableDictMixin as ImmutableDictMixin
|
||||
from .mixins import ImmutableHeadersMixin as ImmutableHeadersMixin
|
||||
from .mixins import ImmutableListMixin as ImmutableListMixin
|
||||
from .mixins import ImmutableMultiDictMixin as ImmutableMultiDictMixin
|
||||
from .mixins import UpdateDictMixin as UpdateDictMixin
|
||||
from .range import ContentRange as ContentRange
|
||||
from .range import IfRange as IfRange
|
||||
from .range import Range as Range
|
||||
from .structures import CallbackDict as CallbackDict
|
||||
from .structures import CombinedMultiDict as CombinedMultiDict
|
||||
from .structures import HeaderSet as HeaderSet
|
||||
from .structures import ImmutableDict as ImmutableDict
|
||||
from .structures import ImmutableList as ImmutableList
|
||||
from .structures import ImmutableMultiDict as ImmutableMultiDict
|
||||
from .structures import ImmutableTypeConversionDict as ImmutableTypeConversionDict
|
||||
from .structures import iter_multi_items as iter_multi_items
|
||||
from .structures import MultiDict as MultiDict
|
||||
from .structures import TypeConversionDict as TypeConversionDict
|
||||
|
||||
|
||||
def __getattr__(name: str) -> t.Any:
|
||||
import warnings
|
||||
|
||||
if name == "OrderedMultiDict":
|
||||
from .structures import _OrderedMultiDict
|
||||
|
||||
warnings.warn(
|
||||
"'OrderedMultiDict' is deprecated and will be removed in Werkzeug"
|
||||
" 3.2. Use 'MultiDict' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _OrderedMultiDict
|
||||
|
||||
if name == "ImmutableOrderedMultiDict":
|
||||
from .structures import _ImmutableOrderedMultiDict
|
||||
|
||||
warnings.warn(
|
||||
"'OrderedMultiDict' is deprecated and will be removed in Werkzeug"
|
||||
" 3.2. Use 'ImmutableMultiDict' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _ImmutableOrderedMultiDict
|
||||
|
||||
raise AttributeError(name)
|
||||
350
lib/python3.11/site-packages/werkzeug/datastructures/accept.py
Normal file
350
lib/python3.11/site-packages/werkzeug/datastructures/accept.py
Normal file
@ -0,0 +1,350 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
import collections.abc as cabc
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
from .structures import ImmutableList
|
||||
|
||||
|
||||
class Accept(ImmutableList[tuple[str, float]]):
|
||||
"""An :class:`Accept` object is just a list subclass for lists of
|
||||
``(value, quality)`` tuples. It is automatically sorted by specificity
|
||||
and quality.
|
||||
|
||||
All :class:`Accept` objects work similar to a list but provide extra
|
||||
functionality for working with the data. Containment checks are
|
||||
normalized to the rules of that header:
|
||||
|
||||
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
|
||||
>>> a.best
|
||||
'ISO-8859-1'
|
||||
>>> 'iso-8859-1' in a
|
||||
True
|
||||
>>> 'UTF8' in a
|
||||
True
|
||||
>>> 'utf7' in a
|
||||
False
|
||||
|
||||
To get the quality for an item you can use normal item lookup:
|
||||
|
||||
>>> print a['utf-8']
|
||||
0.7
|
||||
>>> a['utf7']
|
||||
0
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
:class:`Accept` objects are forced immutable now.
|
||||
|
||||
.. versionchanged:: 1.0.0
|
||||
:class:`Accept` internal values are no longer ordered
|
||||
alphabetically for equal quality tags. Instead the initial
|
||||
order is preserved.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, values: Accept | cabc.Iterable[tuple[str, float]] | None = ()
|
||||
) -> None:
|
||||
if values is None:
|
||||
super().__init__()
|
||||
self.provided = False
|
||||
elif isinstance(values, Accept):
|
||||
self.provided = values.provided
|
||||
super().__init__(values)
|
||||
else:
|
||||
self.provided = True
|
||||
values = sorted(
|
||||
values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True
|
||||
)
|
||||
super().__init__(values)
|
||||
|
||||
def _specificity(self, value: str) -> tuple[bool, ...]:
|
||||
"""Returns a tuple describing the value's specificity."""
|
||||
return (value != "*",)
|
||||
|
||||
def _value_matches(self, value: str, item: str) -> bool:
|
||||
"""Check if a value matches a given accept item."""
|
||||
return item == "*" or item.lower() == value.lower()
|
||||
|
||||
@t.overload
|
||||
def __getitem__(self, key: str) -> float: ...
|
||||
@t.overload
|
||||
def __getitem__(self, key: t.SupportsIndex) -> tuple[str, float]: ...
|
||||
@t.overload
|
||||
def __getitem__(self, key: slice) -> list[tuple[str, float]]: ...
|
||||
def __getitem__(
|
||||
self, key: str | t.SupportsIndex | slice
|
||||
) -> float | tuple[str, float] | list[tuple[str, float]]:
|
||||
"""Besides index lookup (getting item n) you can also pass it a string
|
||||
to get the quality for the item. If the item is not in the list, the
|
||||
returned quality is ``0``.
|
||||
"""
|
||||
if isinstance(key, str):
|
||||
return self.quality(key)
|
||||
return list.__getitem__(self, key)
|
||||
|
||||
def quality(self, key: str) -> float:
|
||||
"""Returns the quality of the key.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
In previous versions you had to use the item-lookup syntax
|
||||
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
|
||||
"""
|
||||
for item, quality in self:
|
||||
if self._value_matches(key, item):
|
||||
return quality
|
||||
return 0
|
||||
|
||||
def __contains__(self, value: str) -> bool: # type: ignore[override]
|
||||
for item, _quality in self:
|
||||
if self._value_matches(value, item):
|
||||
return True
|
||||
return False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
pairs_str = ", ".join(f"({x!r}, {y})" for x, y in self)
|
||||
return f"{type(self).__name__}([{pairs_str}])"
|
||||
|
||||
def index(self, key: str | tuple[str, float]) -> int: # type: ignore[override]
|
||||
"""Get the position of an entry or raise :exc:`ValueError`.
|
||||
|
||||
:param key: The key to be looked up.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
This used to raise :exc:`IndexError`, which was inconsistent
|
||||
with the list API.
|
||||
"""
|
||||
if isinstance(key, str):
|
||||
for idx, (item, _quality) in enumerate(self):
|
||||
if self._value_matches(key, item):
|
||||
return idx
|
||||
raise ValueError(key)
|
||||
return list.index(self, key)
|
||||
|
||||
def find(self, key: str | tuple[str, float]) -> int:
|
||||
"""Get the position of an entry or return -1.
|
||||
|
||||
:param key: The key to be looked up.
|
||||
"""
|
||||
try:
|
||||
return self.index(key)
|
||||
except ValueError:
|
||||
return -1
|
||||
|
||||
def values(self) -> cabc.Iterator[str]:
|
||||
"""Iterate over all values."""
|
||||
for item in self:
|
||||
yield item[0]
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Convert the header set into an HTTP header string."""
|
||||
result = []
|
||||
for value, quality in self:
|
||||
if quality != 1:
|
||||
value = f"{value};q={quality}"
|
||||
result.append(value)
|
||||
return ",".join(result)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def _best_single_match(self, match: str) -> tuple[str, float] | None:
|
||||
for client_item, quality in self:
|
||||
if self._value_matches(match, client_item):
|
||||
# self is sorted by specificity descending, we can exit
|
||||
return client_item, quality
|
||||
return None
|
||||
|
||||
@t.overload
|
||||
def best_match(self, matches: cabc.Iterable[str]) -> str | None: ...
|
||||
@t.overload
|
||||
def best_match(self, matches: cabc.Iterable[str], default: str = ...) -> str: ...
|
||||
def best_match(
|
||||
self, matches: cabc.Iterable[str], default: str | None = None
|
||||
) -> str | None:
|
||||
"""Returns the best match from a list of possible matches based
|
||||
on the specificity and quality of the client. If two items have the
|
||||
same quality and specificity, the one is returned that comes first.
|
||||
|
||||
:param matches: a list of matches to check for
|
||||
:param default: the value that is returned if none match
|
||||
"""
|
||||
result = default
|
||||
best_quality: float = -1
|
||||
best_specificity: tuple[float, ...] = (-1,)
|
||||
for server_item in matches:
|
||||
match = self._best_single_match(server_item)
|
||||
if not match:
|
||||
continue
|
||||
client_item, quality = match
|
||||
specificity = self._specificity(client_item)
|
||||
if quality <= 0 or quality < best_quality:
|
||||
continue
|
||||
# better quality or same quality but more specific => better match
|
||||
if quality > best_quality or specificity > best_specificity:
|
||||
result = server_item
|
||||
best_quality = quality
|
||||
best_specificity = specificity
|
||||
return result
|
||||
|
||||
@property
|
||||
def best(self) -> str | None:
|
||||
"""The best match as value."""
|
||||
if self:
|
||||
return self[0][0]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
_mime_split_re = re.compile(r"/|(?:\s*;\s*)")
|
||||
|
||||
|
||||
def _normalize_mime(value: str) -> list[str]:
|
||||
return _mime_split_re.split(value.lower())
|
||||
|
||||
|
||||
class MIMEAccept(Accept):
|
||||
"""Like :class:`Accept` but with special methods and behavior for
|
||||
mimetypes.
|
||||
"""
|
||||
|
||||
def _specificity(self, value: str) -> tuple[bool, ...]:
|
||||
return tuple(x != "*" for x in _mime_split_re.split(value))
|
||||
|
||||
def _value_matches(self, value: str, item: str) -> bool:
|
||||
# item comes from the client, can't match if it's invalid.
|
||||
if "/" not in item:
|
||||
return False
|
||||
|
||||
# value comes from the application, tell the developer when it
|
||||
# doesn't look valid.
|
||||
if "/" not in value:
|
||||
raise ValueError(f"invalid mimetype {value!r}")
|
||||
|
||||
# Split the match value into type, subtype, and a sorted list of parameters.
|
||||
normalized_value = _normalize_mime(value)
|
||||
value_type, value_subtype = normalized_value[:2]
|
||||
value_params = sorted(normalized_value[2:])
|
||||
|
||||
# "*/*" is the only valid value that can start with "*".
|
||||
if value_type == "*" and value_subtype != "*":
|
||||
raise ValueError(f"invalid mimetype {value!r}")
|
||||
|
||||
# Split the accept item into type, subtype, and parameters.
|
||||
normalized_item = _normalize_mime(item)
|
||||
item_type, item_subtype = normalized_item[:2]
|
||||
item_params = sorted(normalized_item[2:])
|
||||
|
||||
# "*/not-*" from the client is invalid, can't match.
|
||||
if item_type == "*" and item_subtype != "*":
|
||||
return False
|
||||
|
||||
return (
|
||||
(item_type == "*" and item_subtype == "*")
|
||||
or (value_type == "*" and value_subtype == "*")
|
||||
) or (
|
||||
item_type == value_type
|
||||
and (
|
||||
item_subtype == "*"
|
||||
or value_subtype == "*"
|
||||
or (item_subtype == value_subtype and item_params == value_params)
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
def accept_html(self) -> bool:
|
||||
"""True if this object accepts HTML."""
|
||||
return "text/html" in self or self.accept_xhtml # type: ignore[comparison-overlap]
|
||||
|
||||
@property
|
||||
def accept_xhtml(self) -> bool:
|
||||
"""True if this object accepts XHTML."""
|
||||
return "application/xhtml+xml" in self or "application/xml" in self # type: ignore[comparison-overlap]
|
||||
|
||||
@property
|
||||
def accept_json(self) -> bool:
|
||||
"""True if this object accepts JSON."""
|
||||
return "application/json" in self # type: ignore[comparison-overlap]
|
||||
|
||||
|
||||
_locale_delim_re = re.compile(r"[_-]")
|
||||
|
||||
|
||||
def _normalize_lang(value: str) -> list[str]:
|
||||
"""Process a language tag for matching."""
|
||||
return _locale_delim_re.split(value.lower())
|
||||
|
||||
|
||||
class LanguageAccept(Accept):
|
||||
"""Like :class:`Accept` but with normalization for language tags."""
|
||||
|
||||
def _value_matches(self, value: str, item: str) -> bool:
|
||||
return item == "*" or _normalize_lang(value) == _normalize_lang(item)
|
||||
|
||||
@t.overload
|
||||
def best_match(self, matches: cabc.Iterable[str]) -> str | None: ...
|
||||
@t.overload
|
||||
def best_match(self, matches: cabc.Iterable[str], default: str = ...) -> str: ...
|
||||
def best_match(
|
||||
self, matches: cabc.Iterable[str], default: str | None = None
|
||||
) -> str | None:
|
||||
"""Given a list of supported values, finds the best match from
|
||||
the list of accepted values.
|
||||
|
||||
Language tags are normalized for the purpose of matching, but
|
||||
are returned unchanged.
|
||||
|
||||
If no exact match is found, this will fall back to matching
|
||||
the first subtag (primary language only), first with the
|
||||
accepted values then with the match values. This partial is not
|
||||
applied to any other language subtags.
|
||||
|
||||
The default is returned if no exact or fallback match is found.
|
||||
|
||||
:param matches: A list of supported languages to find a match.
|
||||
:param default: The value that is returned if none match.
|
||||
"""
|
||||
# Look for an exact match first. If a client accepts "en-US",
|
||||
# "en-US" is a valid match at this point.
|
||||
result = super().best_match(matches)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
# Fall back to accepting primary tags. If a client accepts
|
||||
# "en-US", "en" is a valid match at this point. Need to use
|
||||
# re.split to account for 2 or 3 letter codes.
|
||||
fallback = Accept(
|
||||
[(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
|
||||
)
|
||||
result = fallback.best_match(matches)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
# Fall back to matching primary tags. If the client accepts
|
||||
# "en", "en-US" is a valid match at this point.
|
||||
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
|
||||
result = super().best_match(fallback_matches)
|
||||
|
||||
# Return a value from the original match list. Find the first
|
||||
# original value that starts with the matched primary tag.
|
||||
if result is not None:
|
||||
return next(item for item in matches if item.startswith(result))
|
||||
|
||||
return default
|
||||
|
||||
|
||||
class CharsetAccept(Accept):
|
||||
"""Like :class:`Accept` but with normalization for charsets."""
|
||||
|
||||
def _value_matches(self, value: str, item: str) -> bool:
|
||||
def _normalize(name: str) -> str:
|
||||
try:
|
||||
return codecs.lookup(name).name
|
||||
except LookupError:
|
||||
return name.lower()
|
||||
|
||||
return item == "*" or _normalize(value) == _normalize(item)
|
||||
317
lib/python3.11/site-packages/werkzeug/datastructures/auth.py
Normal file
317
lib/python3.11/site-packages/werkzeug/datastructures/auth.py
Normal file
@ -0,0 +1,317 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import collections.abc as cabc
|
||||
import typing as t
|
||||
|
||||
from ..http import dump_header
|
||||
from ..http import parse_dict_header
|
||||
from ..http import quote_header_value
|
||||
from .structures import CallbackDict
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import typing_extensions as te
|
||||
|
||||
|
||||
class Authorization:
|
||||
"""Represents the parts of an ``Authorization`` request header.
|
||||
|
||||
:attr:`.Request.authorization` returns an instance if the header is set.
|
||||
|
||||
An instance can be used with the test :class:`.Client` request methods' ``auth``
|
||||
parameter to send the header in test requests.
|
||||
|
||||
Depending on the auth scheme, either :attr:`parameters` or :attr:`token` will be
|
||||
set. The ``Basic`` scheme's token is decoded into the ``username`` and ``password``
|
||||
parameters.
|
||||
|
||||
For convenience, ``auth["key"]`` and ``auth.key`` both access the key in the
|
||||
:attr:`parameters` dict, along with ``auth.get("key")`` and ``"key" in auth``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The ``token`` parameter and attribute was added to support auth schemes that use
|
||||
a token instead of parameters, such as ``Bearer``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The object is no longer a ``dict``.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
The object is an immutable dict.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
auth_type: str,
|
||||
data: dict[str, str | None] | None = None,
|
||||
token: str | None = None,
|
||||
) -> None:
|
||||
self.type = auth_type
|
||||
"""The authorization scheme, like ``basic``, ``digest``, or ``bearer``."""
|
||||
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
self.parameters = data
|
||||
"""A dict of parameters parsed from the header. Either this or :attr:`token`
|
||||
will have a value for a given scheme.
|
||||
"""
|
||||
|
||||
self.token = token
|
||||
"""A token parsed from the header. Either this or :attr:`parameters` will have a
|
||||
value for a given scheme.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
|
||||
def __getattr__(self, name: str) -> str | None:
|
||||
return self.parameters.get(name)
|
||||
|
||||
def __getitem__(self, name: str) -> str | None:
|
||||
return self.parameters.get(name)
|
||||
|
||||
def get(self, key: str, default: str | None = None) -> str | None:
|
||||
return self.parameters.get(key, default)
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in self.parameters
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Authorization):
|
||||
return NotImplemented
|
||||
|
||||
return (
|
||||
other.type == self.type
|
||||
and other.token == self.token
|
||||
and other.parameters == self.parameters
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_header(cls, value: str | None) -> te.Self | None:
|
||||
"""Parse an ``Authorization`` header value and return an instance, or ``None``
|
||||
if the value is empty.
|
||||
|
||||
:param value: The header value to parse.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
if not value:
|
||||
return None
|
||||
|
||||
scheme, _, rest = value.partition(" ")
|
||||
scheme = scheme.lower()
|
||||
rest = rest.strip()
|
||||
|
||||
if scheme == "basic":
|
||||
try:
|
||||
username, _, password = base64.b64decode(rest).decode().partition(":")
|
||||
except (binascii.Error, UnicodeError):
|
||||
return None
|
||||
|
||||
return cls(scheme, {"username": username, "password": password})
|
||||
|
||||
if "=" in rest.rstrip("="):
|
||||
# = that is not trailing, this is parameters.
|
||||
return cls(scheme, parse_dict_header(rest), None)
|
||||
|
||||
# No = or only trailing =, this is a token.
|
||||
return cls(scheme, None, rest)
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Produce an ``Authorization`` header value representing this data.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
if self.type == "basic":
|
||||
value = base64.b64encode(
|
||||
f"{self.username}:{self.password}".encode()
|
||||
).decode("ascii")
|
||||
return f"Basic {value}"
|
||||
|
||||
if self.token is not None:
|
||||
return f"{self.type.title()} {self.token}"
|
||||
|
||||
return f"{self.type.title()} {dump_header(self.parameters)}"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {self.to_header()}>"
|
||||
|
||||
|
||||
class WWWAuthenticate:
|
||||
"""Represents the parts of a ``WWW-Authenticate`` response header.
|
||||
|
||||
Set :attr:`.Response.www_authenticate` to an instance of list of instances to set
|
||||
values for this header in the response. Modifying this instance will modify the
|
||||
header value.
|
||||
|
||||
Depending on the auth scheme, either :attr:`parameters` or :attr:`token` should be
|
||||
set. The ``Basic`` scheme will encode ``username`` and ``password`` parameters to a
|
||||
token.
|
||||
|
||||
For convenience, ``auth["key"]`` and ``auth.key`` both act on the :attr:`parameters`
|
||||
dict, and can be used to get, set, or delete parameters. ``auth.get("key")`` and
|
||||
``"key" in auth`` are also provided.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The ``token`` parameter and attribute was added to support auth schemes that use
|
||||
a token instead of parameters, such as ``Bearer``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The object is no longer a ``dict``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The ``on_update`` parameter was removed.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
auth_type: str,
|
||||
values: dict[str, str | None] | None = None,
|
||||
token: str | None = None,
|
||||
):
|
||||
self._type = auth_type.lower()
|
||||
self._parameters: dict[str, str | None] = CallbackDict(
|
||||
values, lambda _: self._trigger_on_update()
|
||||
)
|
||||
self._token = token
|
||||
self._on_update: cabc.Callable[[WWWAuthenticate], None] | None = None
|
||||
|
||||
def _trigger_on_update(self) -> None:
|
||||
if self._on_update is not None:
|
||||
self._on_update(self)
|
||||
|
||||
@property
|
||||
def type(self) -> str:
|
||||
"""The authorization scheme, like ``basic``, ``digest``, or ``bearer``."""
|
||||
return self._type
|
||||
|
||||
@type.setter
|
||||
def type(self, value: str) -> None:
|
||||
self._type = value
|
||||
self._trigger_on_update()
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, str | None]:
|
||||
"""A dict of parameters for the header. Only one of this or :attr:`token` should
|
||||
have a value for a given scheme.
|
||||
"""
|
||||
return self._parameters
|
||||
|
||||
@parameters.setter
|
||||
def parameters(self, value: dict[str, str]) -> None:
|
||||
self._parameters = CallbackDict(value, lambda _: self._trigger_on_update())
|
||||
self._trigger_on_update()
|
||||
|
||||
@property
|
||||
def token(self) -> str | None:
|
||||
"""A dict of parameters for the header. Only one of this or :attr:`token` should
|
||||
have a value for a given scheme.
|
||||
"""
|
||||
return self._token
|
||||
|
||||
@token.setter
|
||||
def token(self, value: str | None) -> None:
|
||||
"""A token for the header. Only one of this or :attr:`parameters` should have a
|
||||
value for a given scheme.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
self._token = value
|
||||
self._trigger_on_update()
|
||||
|
||||
def __getitem__(self, key: str) -> str | None:
|
||||
return self.parameters.get(key)
|
||||
|
||||
def __setitem__(self, key: str, value: str | None) -> None:
|
||||
if value is None:
|
||||
if key in self.parameters:
|
||||
del self.parameters[key]
|
||||
else:
|
||||
self.parameters[key] = value
|
||||
|
||||
self._trigger_on_update()
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
if key in self.parameters:
|
||||
del self.parameters[key]
|
||||
self._trigger_on_update()
|
||||
|
||||
def __getattr__(self, name: str) -> str | None:
|
||||
return self[name]
|
||||
|
||||
def __setattr__(self, name: str, value: str | None) -> None:
|
||||
if name in {"_type", "_parameters", "_token", "_on_update"}:
|
||||
super().__setattr__(name, value)
|
||||
else:
|
||||
self[name] = value
|
||||
|
||||
def __delattr__(self, name: str) -> None:
|
||||
del self[name]
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
return key in self.parameters
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, WWWAuthenticate):
|
||||
return NotImplemented
|
||||
|
||||
return (
|
||||
other.type == self.type
|
||||
and other.token == self.token
|
||||
and other.parameters == self.parameters
|
||||
)
|
||||
|
||||
def get(self, key: str, default: str | None = None) -> str | None:
|
||||
return self.parameters.get(key, default)
|
||||
|
||||
@classmethod
|
||||
def from_header(cls, value: str | None) -> te.Self | None:
|
||||
"""Parse a ``WWW-Authenticate`` header value and return an instance, or ``None``
|
||||
if the value is empty.
|
||||
|
||||
:param value: The header value to parse.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
if not value:
|
||||
return None
|
||||
|
||||
scheme, _, rest = value.partition(" ")
|
||||
scheme = scheme.lower()
|
||||
rest = rest.strip()
|
||||
|
||||
if "=" in rest.rstrip("="):
|
||||
# = that is not trailing, this is parameters.
|
||||
return cls(scheme, parse_dict_header(rest), None)
|
||||
|
||||
# No = or only trailing =, this is a token.
|
||||
return cls(scheme, None, rest)
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Produce a ``WWW-Authenticate`` header value representing this data."""
|
||||
if self.token is not None:
|
||||
return f"{self.type.title()} {self.token}"
|
||||
|
||||
if self.type == "digest":
|
||||
items = []
|
||||
|
||||
for key, value in self.parameters.items():
|
||||
if key in {"realm", "domain", "nonce", "opaque", "qop"}:
|
||||
value = quote_header_value(value, allow_token=False)
|
||||
else:
|
||||
value = quote_header_value(value)
|
||||
|
||||
items.append(f"{key}={value}")
|
||||
|
||||
return f"Digest {', '.join(items)}"
|
||||
|
||||
return f"{self.type.title()} {dump_header(self.parameters)}"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {self.to_header()}>"
|
||||
@ -0,0 +1,273 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import typing as t
|
||||
from inspect import cleandoc
|
||||
|
||||
from .mixins import ImmutableDictMixin
|
||||
from .structures import CallbackDict
|
||||
|
||||
|
||||
def cache_control_property(
|
||||
key: str, empty: t.Any, type: type[t.Any] | None, *, doc: str | None = None
|
||||
) -> t.Any:
|
||||
"""Return a new property object for a cache header. Useful if you
|
||||
want to add support for a cache extension in a subclass.
|
||||
|
||||
:param key: The attribute name present in the parsed cache-control header dict.
|
||||
:param empty: The value to use if the key is present without a value.
|
||||
:param type: The type to convert the string value to instead of a string. If
|
||||
conversion raises a ``ValueError``, the returned value is ``None``.
|
||||
:param doc: The docstring for the property. If not given, it is generated
|
||||
based on the other params.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Added the ``doc`` param.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Renamed from ``cache_property``.
|
||||
"""
|
||||
if doc is None:
|
||||
parts = [f"The ``{key}`` attribute."]
|
||||
|
||||
if type is bool:
|
||||
parts.append("A ``bool``, either present or not.")
|
||||
else:
|
||||
if type is None:
|
||||
parts.append("A ``str``,")
|
||||
else:
|
||||
parts.append(f"A ``{type.__name__}``,")
|
||||
|
||||
if empty is not None:
|
||||
parts.append(f"``{empty!r}`` if present with no value,")
|
||||
|
||||
parts.append("or ``None`` if not present.")
|
||||
|
||||
doc = " ".join(parts)
|
||||
|
||||
return property(
|
||||
lambda x: x._get_cache_value(key, empty, type),
|
||||
lambda x, v: x._set_cache_value(key, v, type),
|
||||
lambda x: x._del_cache_value(key),
|
||||
doc=cleandoc(doc),
|
||||
)
|
||||
|
||||
|
||||
class _CacheControl(CallbackDict[str, t.Optional[str]]):
|
||||
"""Subclass of a dict that stores values for a Cache-Control header. It
|
||||
has accessors for all the cache-control directives specified in RFC 2616.
|
||||
The class does not differentiate between request and response directives.
|
||||
|
||||
Because the cache-control directives in the HTTP header use dashes the
|
||||
python descriptors use underscores for that.
|
||||
|
||||
To get a header of the :class:`CacheControl` object again you can convert
|
||||
the object into a string or call the :meth:`to_header` method. If you plan
|
||||
to subclass it and add your own items have a look at the sourcecode for
|
||||
that class.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Dict values are always ``str | None``. Setting properties will
|
||||
convert the value to a string. Setting a non-bool property to
|
||||
``False`` is equivalent to setting it to ``None``. Getting typed
|
||||
properties will return ``None`` if conversion raises
|
||||
``ValueError``, rather than the string.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Setting int properties such as ``max_age`` will convert the
|
||||
value to an int.
|
||||
|
||||
.. versionchanged:: 0.4
|
||||
Setting ``no_cache`` or ``private`` to ``True`` will set the
|
||||
implicit value ``"*"``.
|
||||
"""
|
||||
|
||||
no_store: bool = cache_control_property("no-store", None, bool)
|
||||
max_age: int | None = cache_control_property("max-age", None, int)
|
||||
no_transform: bool = cache_control_property("no-transform", None, bool)
|
||||
stale_if_error: int | None = cache_control_property("stale-if-error", None, int)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: cabc.Mapping[str, t.Any] | cabc.Iterable[tuple[str, t.Any]] | None = (),
|
||||
on_update: cabc.Callable[[_CacheControl], None] | None = None,
|
||||
):
|
||||
super().__init__(values, on_update)
|
||||
self.provided = values is not None
|
||||
|
||||
def _get_cache_value(
|
||||
self, key: str, empty: t.Any, type: type[t.Any] | None
|
||||
) -> t.Any:
|
||||
"""Used internally by the accessor properties."""
|
||||
if type is bool:
|
||||
return key in self
|
||||
|
||||
if key not in self:
|
||||
return None
|
||||
|
||||
if (value := self[key]) is None:
|
||||
return empty
|
||||
|
||||
if type is not None:
|
||||
try:
|
||||
value = type(value)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
return value
|
||||
|
||||
def _set_cache_value(
|
||||
self, key: str, value: t.Any, type: type[t.Any] | None
|
||||
) -> None:
|
||||
"""Used internally by the accessor properties."""
|
||||
if type is bool:
|
||||
if value:
|
||||
self[key] = None
|
||||
else:
|
||||
self.pop(key, None)
|
||||
elif value is None or value is False:
|
||||
self.pop(key, None)
|
||||
elif value is True:
|
||||
self[key] = None
|
||||
else:
|
||||
if type is not None:
|
||||
value = type(value)
|
||||
|
||||
self[key] = str(value)
|
||||
|
||||
def _del_cache_value(self, key: str) -> None:
|
||||
"""Used internally by the accessor properties."""
|
||||
if key in self:
|
||||
del self[key]
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Convert the stored values into a cache control header."""
|
||||
return http.dump_header(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
|
||||
return f"<{type(self).__name__} {kv_str}>"
|
||||
|
||||
cache_property = staticmethod(cache_control_property)
|
||||
|
||||
|
||||
class RequestCacheControl(ImmutableDictMixin[str, t.Optional[str]], _CacheControl): # type: ignore[misc]
|
||||
"""A cache control for requests. This is immutable and gives access
|
||||
to all the request-relevant cache control headers.
|
||||
|
||||
To get a header of the :class:`RequestCacheControl` object again you can
|
||||
convert the object into a string or call the :meth:`to_header` method. If
|
||||
you plan to subclass it and add your own items have a look at the sourcecode
|
||||
for that class.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Dict values are always ``str | None``. Setting properties will
|
||||
convert the value to a string. Setting a non-bool property to
|
||||
``False`` is equivalent to setting it to ``None``. Getting typed
|
||||
properties will return ``None`` if conversion raises
|
||||
``ValueError``, rather than the string.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``max_age`` is ``None`` if present without a value, rather
|
||||
than ``-1``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``no_cache`` is a boolean, it is ``True`` instead of ``"*"``
|
||||
when present.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``max_stale`` is ``True`` if present without a value, rather
|
||||
than ``"*"``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``no_transform`` is a boolean. Previously it was mistakenly
|
||||
always ``None``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``min_fresh`` is ``None`` if present without a value, rather
|
||||
than ``"*"``.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Setting int properties such as ``max_age`` will convert the
|
||||
value to an int.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
Response-only properties are not present on this request class.
|
||||
"""
|
||||
|
||||
no_cache: bool = cache_control_property("no-cache", None, bool)
|
||||
max_stale: int | t.Literal[True] | None = cache_control_property(
|
||||
"max-stale",
|
||||
True,
|
||||
int,
|
||||
)
|
||||
min_fresh: int | None = cache_control_property("min-fresh", None, int)
|
||||
only_if_cached: bool = cache_control_property("only-if-cached", None, bool)
|
||||
|
||||
|
||||
class ResponseCacheControl(_CacheControl):
|
||||
"""A cache control for responses. Unlike :class:`RequestCacheControl`
|
||||
this is mutable and gives access to response-relevant cache control
|
||||
headers.
|
||||
|
||||
To get a header of the :class:`ResponseCacheControl` object again you can
|
||||
convert the object into a string or call the :meth:`to_header` method. If
|
||||
you plan to subclass it and add your own items have a look at the sourcecode
|
||||
for that class.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Dict values are always ``str | None``. Setting properties will
|
||||
convert the value to a string. Setting a non-bool property to
|
||||
``False`` is equivalent to setting it to ``None``. Getting typed
|
||||
properties will return ``None`` if conversion raises
|
||||
``ValueError``, rather than the string.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``no_cache`` is ``True`` if present without a value, rather than
|
||||
``"*"``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``private`` is ``True`` if present without a value, rather than
|
||||
``"*"``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``no_transform`` is a boolean. Previously it was mistakenly
|
||||
always ``None``.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Added the ``must_understand``, ``stale_while_revalidate``, and
|
||||
``stale_if_error`` properties.
|
||||
|
||||
.. versionchanged:: 2.1.1
|
||||
``s_maxage`` converts the value to an int.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Setting int properties such as ``max_age`` will convert the
|
||||
value to an int.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
Request-only properties are not present on this response class.
|
||||
"""
|
||||
|
||||
no_cache: str | t.Literal[True] | None = cache_control_property(
|
||||
"no-cache", True, None
|
||||
)
|
||||
public: bool = cache_control_property("public", None, bool)
|
||||
private: str | t.Literal[True] | None = cache_control_property(
|
||||
"private", True, None
|
||||
)
|
||||
must_revalidate: bool = cache_control_property("must-revalidate", None, bool)
|
||||
proxy_revalidate: bool = cache_control_property("proxy-revalidate", None, bool)
|
||||
s_maxage: int | None = cache_control_property("s-maxage", None, int)
|
||||
immutable: bool = cache_control_property("immutable", None, bool)
|
||||
must_understand: bool = cache_control_property("must-understand", None, bool)
|
||||
stale_while_revalidate: int | None = cache_control_property(
|
||||
"stale-while-revalidate", None, int
|
||||
)
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from .. import http
|
||||
100
lib/python3.11/site-packages/werkzeug/datastructures/csp.py
Normal file
100
lib/python3.11/site-packages/werkzeug/datastructures/csp.py
Normal file
@ -0,0 +1,100 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import typing as t
|
||||
|
||||
from .structures import CallbackDict
|
||||
|
||||
|
||||
def csp_property(key: str) -> t.Any:
|
||||
"""Return a new property object for a content security policy header.
|
||||
Useful if you want to add support for a csp extension in a
|
||||
subclass.
|
||||
"""
|
||||
return property(
|
||||
lambda x: x._get_value(key),
|
||||
lambda x, v: x._set_value(key, v),
|
||||
lambda x: x._del_value(key),
|
||||
f"accessor for {key!r}",
|
||||
)
|
||||
|
||||
|
||||
class ContentSecurityPolicy(CallbackDict[str, str]):
|
||||
"""Subclass of a dict that stores values for a Content Security Policy
|
||||
header. It has accessors for all the level 3 policies.
|
||||
|
||||
Because the csp directives in the HTTP header use dashes the
|
||||
python descriptors use underscores for that.
|
||||
|
||||
To get a header of the :class:`ContentSecuirtyPolicy` object again
|
||||
you can convert the object into a string or call the
|
||||
:meth:`to_header` method. If you plan to subclass it and add your
|
||||
own items have a look at the sourcecode for that class.
|
||||
|
||||
.. versionadded:: 1.0.0
|
||||
Support for Content Security Policy headers was added.
|
||||
|
||||
"""
|
||||
|
||||
base_uri: str | None = csp_property("base-uri")
|
||||
child_src: str | None = csp_property("child-src")
|
||||
connect_src: str | None = csp_property("connect-src")
|
||||
default_src: str | None = csp_property("default-src")
|
||||
font_src: str | None = csp_property("font-src")
|
||||
form_action: str | None = csp_property("form-action")
|
||||
frame_ancestors: str | None = csp_property("frame-ancestors")
|
||||
frame_src: str | None = csp_property("frame-src")
|
||||
img_src: str | None = csp_property("img-src")
|
||||
manifest_src: str | None = csp_property("manifest-src")
|
||||
media_src: str | None = csp_property("media-src")
|
||||
navigate_to: str | None = csp_property("navigate-to")
|
||||
object_src: str | None = csp_property("object-src")
|
||||
prefetch_src: str | None = csp_property("prefetch-src")
|
||||
plugin_types: str | None = csp_property("plugin-types")
|
||||
report_to: str | None = csp_property("report-to")
|
||||
report_uri: str | None = csp_property("report-uri")
|
||||
sandbox: str | None = csp_property("sandbox")
|
||||
script_src: str | None = csp_property("script-src")
|
||||
script_src_attr: str | None = csp_property("script-src-attr")
|
||||
script_src_elem: str | None = csp_property("script-src-elem")
|
||||
style_src: str | None = csp_property("style-src")
|
||||
style_src_attr: str | None = csp_property("style-src-attr")
|
||||
style_src_elem: str | None = csp_property("style-src-elem")
|
||||
worker_src: str | None = csp_property("worker-src")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: cabc.Mapping[str, str] | cabc.Iterable[tuple[str, str]] | None = (),
|
||||
on_update: cabc.Callable[[ContentSecurityPolicy], None] | None = None,
|
||||
) -> None:
|
||||
super().__init__(values, on_update)
|
||||
self.provided = values is not None
|
||||
|
||||
def _get_value(self, key: str) -> str | None:
|
||||
"""Used internally by the accessor properties."""
|
||||
return self.get(key)
|
||||
|
||||
def _set_value(self, key: str, value: str | None) -> None:
|
||||
"""Used internally by the accessor properties."""
|
||||
if value is None:
|
||||
self.pop(key, None)
|
||||
else:
|
||||
self[key] = value
|
||||
|
||||
def _del_value(self, key: str) -> None:
|
||||
"""Used internally by the accessor properties."""
|
||||
if key in self:
|
||||
del self[key]
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Convert the stored values into a cache control header."""
|
||||
from ..http import dump_csp_header
|
||||
|
||||
return dump_csp_header(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
|
||||
return f"<{type(self).__name__} {kv_str}>"
|
||||
106
lib/python3.11/site-packages/werkzeug/datastructures/etag.py
Normal file
106
lib/python3.11/site-packages/werkzeug/datastructures/etag.py
Normal file
@ -0,0 +1,106 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
|
||||
|
||||
class ETags(cabc.Collection[str]):
|
||||
"""A set that can be used to check if one etag is present in a collection
|
||||
of etags.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
strong_etags: cabc.Iterable[str] | None = None,
|
||||
weak_etags: cabc.Iterable[str] | None = None,
|
||||
star_tag: bool = False,
|
||||
):
|
||||
if not star_tag and strong_etags:
|
||||
self._strong = frozenset(strong_etags)
|
||||
else:
|
||||
self._strong = frozenset()
|
||||
|
||||
self._weak = frozenset(weak_etags or ())
|
||||
self.star_tag = star_tag
|
||||
|
||||
def as_set(self, include_weak: bool = False) -> set[str]:
|
||||
"""Convert the `ETags` object into a python set. Per default all the
|
||||
weak etags are not part of this set."""
|
||||
rv = set(self._strong)
|
||||
if include_weak:
|
||||
rv.update(self._weak)
|
||||
return rv
|
||||
|
||||
def is_weak(self, etag: str) -> bool:
|
||||
"""Check if an etag is weak."""
|
||||
return etag in self._weak
|
||||
|
||||
def is_strong(self, etag: str) -> bool:
|
||||
"""Check if an etag is strong."""
|
||||
return etag in self._strong
|
||||
|
||||
def contains_weak(self, etag: str) -> bool:
|
||||
"""Check if an etag is part of the set including weak and strong tags."""
|
||||
return self.is_weak(etag) or self.contains(etag)
|
||||
|
||||
def contains(self, etag: str) -> bool:
|
||||
"""Check if an etag is part of the set ignoring weak tags.
|
||||
It is also possible to use the ``in`` operator.
|
||||
"""
|
||||
if self.star_tag:
|
||||
return True
|
||||
return self.is_strong(etag)
|
||||
|
||||
def contains_raw(self, etag: str) -> bool:
|
||||
"""When passed a quoted tag it will check if this tag is part of the
|
||||
set. If the tag is weak it is checked against weak and strong tags,
|
||||
otherwise strong only."""
|
||||
from ..http import unquote_etag
|
||||
|
||||
etag, weak = unquote_etag(etag)
|
||||
if weak:
|
||||
return self.contains_weak(etag)
|
||||
return self.contains(etag)
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Convert the etags set into a HTTP header string."""
|
||||
if self.star_tag:
|
||||
return "*"
|
||||
return ", ".join(
|
||||
[f'"{x}"' for x in self._strong] + [f'W/"{x}"' for x in self._weak]
|
||||
)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
etag: str | None = None,
|
||||
data: bytes | None = None,
|
||||
include_weak: bool = False,
|
||||
) -> bool:
|
||||
if etag is None:
|
||||
if data is None:
|
||||
raise TypeError("'data' is required when 'etag' is not given.")
|
||||
|
||||
from ..http import generate_etag
|
||||
|
||||
etag = generate_etag(data)
|
||||
if include_weak:
|
||||
if etag in self._weak:
|
||||
return True
|
||||
return etag in self._strong
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.star_tag or self._strong or self._weak)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._strong)
|
||||
|
||||
def __iter__(self) -> cabc.Iterator[str]:
|
||||
return iter(self._strong)
|
||||
|
||||
def __contains__(self, etag: str) -> bool: # type: ignore[override]
|
||||
return self.contains(etag)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {str(self)!r}>"
|
||||
@ -0,0 +1,209 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import mimetypes
|
||||
import os
|
||||
import typing as t
|
||||
from io import BytesIO
|
||||
from os import fsdecode
|
||||
from os import fspath
|
||||
|
||||
from .._internal import _plain_int
|
||||
from .headers import Headers
|
||||
from .structures import MultiDict
|
||||
|
||||
|
||||
class FileStorage:
|
||||
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
|
||||
It is used by the request object to represent uploaded files. All the
|
||||
attributes of the wrapper stream are proxied by the file storage so
|
||||
it's possible to do ``storage.read()`` instead of the long form
|
||||
``storage.stream.read()``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: t.IO[bytes] | None = None,
|
||||
filename: str | None = None,
|
||||
name: str | None = None,
|
||||
content_type: str | None = None,
|
||||
content_length: int | None = None,
|
||||
headers: Headers | None = None,
|
||||
):
|
||||
self.name = name
|
||||
self.stream = stream or BytesIO()
|
||||
|
||||
# If no filename is provided, attempt to get the filename from
|
||||
# the stream object. Python names special streams like
|
||||
# ``<stderr>`` with angular brackets, skip these streams.
|
||||
if filename is None:
|
||||
filename = getattr(stream, "name", None)
|
||||
|
||||
if filename is not None:
|
||||
filename = fsdecode(filename)
|
||||
|
||||
if filename and filename[0] == "<" and filename[-1] == ">":
|
||||
filename = None
|
||||
else:
|
||||
filename = fsdecode(filename)
|
||||
|
||||
self.filename = filename
|
||||
|
||||
if headers is None:
|
||||
headers = Headers()
|
||||
self.headers = headers
|
||||
if content_type is not None:
|
||||
headers["Content-Type"] = content_type
|
||||
if content_length is not None:
|
||||
headers["Content-Length"] = str(content_length)
|
||||
|
||||
def _parse_content_type(self) -> None:
|
||||
if not hasattr(self, "_parsed_content_type"):
|
||||
self._parsed_content_type = http.parse_options_header(self.content_type)
|
||||
|
||||
@property
|
||||
def content_type(self) -> str | None:
|
||||
"""The content-type sent in the header. Usually not available"""
|
||||
return self.headers.get("content-type")
|
||||
|
||||
@property
|
||||
def content_length(self) -> int:
|
||||
"""The content-length sent in the header. Usually not available"""
|
||||
if "content-length" in self.headers:
|
||||
try:
|
||||
return _plain_int(self.headers["content-length"])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
@property
|
||||
def mimetype(self) -> str:
|
||||
"""Like :attr:`content_type`, but without parameters (eg, without
|
||||
charset, type etc.) and always lowercase. For example if the content
|
||||
type is ``text/HTML; charset=utf-8`` the mimetype would be
|
||||
``'text/html'``.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[0].lower()
|
||||
|
||||
@property
|
||||
def mimetype_params(self) -> dict[str, str]:
|
||||
"""The mimetype parameters as dict. For example if the content
|
||||
type is ``text/html; charset=utf-8`` the params would be
|
||||
``{'charset': 'utf-8'}``.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[1]
|
||||
|
||||
def save(
|
||||
self, dst: str | os.PathLike[str] | t.IO[bytes], buffer_size: int = 16384
|
||||
) -> None:
|
||||
"""Save the file to a destination path or file object. If the
|
||||
destination is a file object you have to close it yourself after the
|
||||
call. The buffer size is the number of bytes held in memory during
|
||||
the copy process. It defaults to 16KB.
|
||||
|
||||
For secure file saving also have a look at :func:`secure_filename`.
|
||||
|
||||
:param dst: a filename, :class:`os.PathLike`, or open file
|
||||
object to write to.
|
||||
:param buffer_size: Passed as the ``length`` parameter of
|
||||
:func:`shutil.copyfileobj`.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
Supports :mod:`pathlib`.
|
||||
"""
|
||||
from shutil import copyfileobj
|
||||
|
||||
close_dst = False
|
||||
|
||||
if hasattr(dst, "__fspath__"):
|
||||
dst = fspath(dst)
|
||||
|
||||
if isinstance(dst, str):
|
||||
dst = open(dst, "wb")
|
||||
close_dst = True
|
||||
|
||||
try:
|
||||
copyfileobj(self.stream, dst, buffer_size)
|
||||
finally:
|
||||
if close_dst:
|
||||
dst.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the underlying file if possible."""
|
||||
try:
|
||||
self.stream.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.filename)
|
||||
|
||||
def __getattr__(self, name: str) -> t.Any:
|
||||
try:
|
||||
return getattr(self.stream, name)
|
||||
except AttributeError:
|
||||
# SpooledTemporaryFile on Python < 3.11 doesn't implement IOBase,
|
||||
# get the attribute from its backing file instead.
|
||||
if hasattr(self.stream, "_file"):
|
||||
return getattr(self.stream._file, name)
|
||||
raise
|
||||
|
||||
def __iter__(self) -> cabc.Iterator[bytes]:
|
||||
return iter(self.stream)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>"
|
||||
|
||||
|
||||
class FileMultiDict(MultiDict[str, FileStorage]):
|
||||
"""A special :class:`MultiDict` that has convenience methods to add
|
||||
files to it. This is used for :class:`EnvironBuilder` and generally
|
||||
useful for unittesting.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
"""
|
||||
|
||||
def add_file(
|
||||
self,
|
||||
name: str,
|
||||
file: str | os.PathLike[str] | t.IO[bytes] | FileStorage,
|
||||
filename: str | None = None,
|
||||
content_type: str | None = None,
|
||||
) -> None:
|
||||
"""Adds a new file to the dict. `file` can be a file name or
|
||||
a :class:`file`-like or a :class:`FileStorage` object.
|
||||
|
||||
:param name: the name of the field.
|
||||
:param file: a filename or :class:`file`-like object
|
||||
:param filename: an optional filename
|
||||
:param content_type: an optional content type
|
||||
"""
|
||||
if isinstance(file, FileStorage):
|
||||
self.add(name, file)
|
||||
return
|
||||
|
||||
if isinstance(file, (str, os.PathLike)):
|
||||
if filename is None:
|
||||
filename = os.fspath(file)
|
||||
|
||||
file_obj: t.IO[bytes] = open(file, "rb")
|
||||
else:
|
||||
file_obj = file # type: ignore[assignment]
|
||||
|
||||
if filename and content_type is None:
|
||||
content_type = (
|
||||
mimetypes.guess_type(filename)[0] or "application/octet-stream"
|
||||
)
|
||||
|
||||
self.add(name, FileStorage(file_obj, filename, name, content_type))
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from .. import http
|
||||
662
lib/python3.11/site-packages/werkzeug/datastructures/headers.py
Normal file
662
lib/python3.11/site-packages/werkzeug/datastructures/headers.py
Normal file
@ -0,0 +1,662 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
from .._internal import _missing
|
||||
from ..exceptions import BadRequestKeyError
|
||||
from .mixins import ImmutableHeadersMixin
|
||||
from .structures import iter_multi_items
|
||||
from .structures import MultiDict
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import typing_extensions as te
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
T = t.TypeVar("T")
|
||||
|
||||
|
||||
class Headers:
|
||||
"""An object that stores some headers. It has a dict-like interface,
|
||||
but is ordered, can store the same key multiple times, and iterating
|
||||
yields ``(key, value)`` pairs instead of only keys.
|
||||
|
||||
This data structure is useful if you want a nicer way to handle WSGI
|
||||
headers which are stored as tuples in a list.
|
||||
|
||||
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
|
||||
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
|
||||
and will render a page for a ``400 BAD REQUEST`` if caught in a
|
||||
catch-all for HTTP exceptions.
|
||||
|
||||
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
|
||||
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
|
||||
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
|
||||
a :class:`KeyError`.
|
||||
|
||||
To create a new ``Headers`` object, pass it a list, dict, or
|
||||
other ``Headers`` object with default values. These values are
|
||||
validated the same way values added later are.
|
||||
|
||||
:param defaults: The list of default values for the :class:`Headers`.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Implement ``|`` and ``|=`` operators.
|
||||
|
||||
.. versionchanged:: 2.1.0
|
||||
Default values are validated the same as values added later.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
This data structure now stores unicode values similar to how the
|
||||
multi dicts do it. The main difference is that bytes can be set as
|
||||
well which will automatically be latin1 decoded.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
The :meth:`linked` function was removed without replacement as it
|
||||
was an API that does not support the changes to the encoding model.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
defaults: (
|
||||
Headers
|
||||
| MultiDict[str, t.Any]
|
||||
| cabc.Mapping[str, t.Any | list[t.Any] | tuple[t.Any, ...] | set[t.Any]]
|
||||
| cabc.Iterable[tuple[str, t.Any]]
|
||||
| None
|
||||
) = None,
|
||||
) -> None:
|
||||
self._list: list[tuple[str, str]] = []
|
||||
|
||||
if defaults is not None:
|
||||
self.extend(defaults)
|
||||
|
||||
@t.overload
|
||||
def __getitem__(self, key: str) -> str: ...
|
||||
@t.overload
|
||||
def __getitem__(self, key: int) -> tuple[str, str]: ...
|
||||
@t.overload
|
||||
def __getitem__(self, key: slice) -> te.Self: ...
|
||||
def __getitem__(self, key: str | int | slice) -> str | tuple[str, str] | te.Self:
|
||||
if isinstance(key, str):
|
||||
return self._get_key(key)
|
||||
|
||||
if isinstance(key, int):
|
||||
return self._list[key]
|
||||
|
||||
return self.__class__(self._list[key])
|
||||
|
||||
def _get_key(self, key: str) -> str:
|
||||
ikey = key.lower()
|
||||
|
||||
for k, v in self._list:
|
||||
if k.lower() == ikey:
|
||||
return v
|
||||
|
||||
raise BadRequestKeyError(key)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if other.__class__ is not self.__class__:
|
||||
return NotImplemented
|
||||
|
||||
def lowered(item: tuple[str, ...]) -> tuple[str, ...]:
|
||||
return item[0].lower(), *item[1:]
|
||||
|
||||
return set(map(lowered, other._list)) == set(map(lowered, self._list)) # type: ignore[attr-defined]
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
@t.overload
|
||||
def get(self, key: str) -> str | None: ...
|
||||
@t.overload
|
||||
def get(self, key: str, default: str) -> str: ...
|
||||
@t.overload
|
||||
def get(self, key: str, default: T) -> str | T: ...
|
||||
@t.overload
|
||||
def get(self, key: str, type: cabc.Callable[[str], T]) -> T | None: ...
|
||||
@t.overload
|
||||
def get(self, key: str, default: T, type: cabc.Callable[[str], T]) -> T: ...
|
||||
def get( # type: ignore[misc]
|
||||
self,
|
||||
key: str,
|
||||
default: str | T | None = None,
|
||||
type: cabc.Callable[[str], T] | None = None,
|
||||
) -> str | T | None:
|
||||
"""Return the default value if the requested data doesn't exist.
|
||||
If `type` is provided and is a callable it should convert the value,
|
||||
return it or raise a :exc:`ValueError` if that is not possible. In
|
||||
this case the function will return the default as if the value was not
|
||||
found:
|
||||
|
||||
>>> d = Headers([('Content-Length', '42')])
|
||||
>>> d.get('Content-Length', type=int)
|
||||
42
|
||||
|
||||
:param key: The key to be looked up.
|
||||
:param default: The default value to be returned if the key can't
|
||||
be looked up. If not further specified `None` is
|
||||
returned.
|
||||
:param type: A callable that is used to cast the value in the
|
||||
:class:`Headers`. If a :exc:`ValueError` is raised
|
||||
by this callable the default value is returned.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``as_bytes`` parameter was removed.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
The ``as_bytes`` parameter was added.
|
||||
"""
|
||||
try:
|
||||
rv = self._get_key(key)
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
if type is None:
|
||||
return rv
|
||||
|
||||
try:
|
||||
return type(rv)
|
||||
except ValueError:
|
||||
return default
|
||||
|
||||
@t.overload
|
||||
def getlist(self, key: str) -> list[str]: ...
|
||||
@t.overload
|
||||
def getlist(self, key: str, type: cabc.Callable[[str], T]) -> list[T]: ...
|
||||
def getlist(
|
||||
self, key: str, type: cabc.Callable[[str], T] | None = None
|
||||
) -> list[str] | list[T]:
|
||||
"""Return the list of items for a given key. If that key is not in the
|
||||
:class:`Headers`, the return value will be an empty list. Just like
|
||||
:meth:`get`, :meth:`getlist` accepts a `type` parameter. All items will
|
||||
be converted with the callable defined there.
|
||||
|
||||
:param key: The key to be looked up.
|
||||
:param type: A callable that is used to cast the value in the
|
||||
:class:`Headers`. If a :exc:`ValueError` is raised
|
||||
by this callable the value will be removed from the list.
|
||||
:return: a :class:`list` of all the values for the key.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``as_bytes`` parameter was removed.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
The ``as_bytes`` parameter was added.
|
||||
"""
|
||||
ikey = key.lower()
|
||||
|
||||
if type is not None:
|
||||
result = []
|
||||
|
||||
for k, v in self:
|
||||
if k.lower() == ikey:
|
||||
try:
|
||||
result.append(type(v))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return result
|
||||
|
||||
return [v for k, v in self if k.lower() == ikey]
|
||||
|
||||
def get_all(self, name: str) -> list[str]:
|
||||
"""Return a list of all the values for the named field.
|
||||
|
||||
This method is compatible with the :mod:`wsgiref`
|
||||
:meth:`~wsgiref.headers.Headers.get_all` method.
|
||||
"""
|
||||
return self.getlist(name)
|
||||
|
||||
def items(self, lower: bool = False) -> t.Iterable[tuple[str, str]]:
|
||||
for key, value in self:
|
||||
if lower:
|
||||
key = key.lower()
|
||||
yield key, value
|
||||
|
||||
def keys(self, lower: bool = False) -> t.Iterable[str]:
|
||||
for key, _ in self.items(lower):
|
||||
yield key
|
||||
|
||||
def values(self) -> t.Iterable[str]:
|
||||
for _, value in self.items():
|
||||
yield value
|
||||
|
||||
def extend(
|
||||
self,
|
||||
arg: (
|
||||
Headers
|
||||
| MultiDict[str, t.Any]
|
||||
| cabc.Mapping[str, t.Any | list[t.Any] | tuple[t.Any, ...] | set[t.Any]]
|
||||
| cabc.Iterable[tuple[str, t.Any]]
|
||||
| None
|
||||
) = None,
|
||||
/,
|
||||
**kwargs: str,
|
||||
) -> None:
|
||||
"""Extend headers in this object with items from another object
|
||||
containing header items as well as keyword arguments.
|
||||
|
||||
To replace existing keys instead of extending, use
|
||||
:meth:`update` instead.
|
||||
|
||||
If provided, the first argument can be another :class:`Headers`
|
||||
object, a :class:`MultiDict`, :class:`dict`, or iterable of
|
||||
pairs.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
Support :class:`MultiDict`. Allow passing ``kwargs``.
|
||||
"""
|
||||
if arg is not None:
|
||||
for key, value in iter_multi_items(arg):
|
||||
self.add(key, value)
|
||||
|
||||
for key, value in iter_multi_items(kwargs):
|
||||
self.add(key, value)
|
||||
|
||||
def __delitem__(self, key: str | int | slice) -> None:
|
||||
if isinstance(key, str):
|
||||
self._del_key(key)
|
||||
return
|
||||
|
||||
del self._list[key]
|
||||
|
||||
def _del_key(self, key: str) -> None:
|
||||
key = key.lower()
|
||||
new = []
|
||||
|
||||
for k, v in self._list:
|
||||
if k.lower() != key:
|
||||
new.append((k, v))
|
||||
|
||||
self._list[:] = new
|
||||
|
||||
def remove(self, key: str) -> None:
|
||||
"""Remove a key.
|
||||
|
||||
:param key: The key to be removed.
|
||||
"""
|
||||
return self._del_key(key)
|
||||
|
||||
@t.overload
|
||||
def pop(self) -> tuple[str, str]: ...
|
||||
@t.overload
|
||||
def pop(self, key: str) -> str: ...
|
||||
@t.overload
|
||||
def pop(self, key: int | None = ...) -> tuple[str, str]: ...
|
||||
@t.overload
|
||||
def pop(self, key: str, default: str) -> str: ...
|
||||
@t.overload
|
||||
def pop(self, key: str, default: T) -> str | T: ...
|
||||
def pop(
|
||||
self,
|
||||
key: str | int | None = None,
|
||||
default: str | T = _missing, # type: ignore[assignment]
|
||||
) -> str | tuple[str, str] | T:
|
||||
"""Removes and returns a key or index.
|
||||
|
||||
:param key: The key to be popped. If this is an integer the item at
|
||||
that position is removed, if it's a string the value for
|
||||
that key is. If the key is omitted or `None` the last
|
||||
item is removed.
|
||||
:return: an item.
|
||||
"""
|
||||
if key is None:
|
||||
return self._list.pop()
|
||||
|
||||
if isinstance(key, int):
|
||||
return self._list.pop(key)
|
||||
|
||||
try:
|
||||
rv = self._get_key(key)
|
||||
except KeyError:
|
||||
if default is not _missing:
|
||||
return default
|
||||
|
||||
raise
|
||||
|
||||
self.remove(key)
|
||||
return rv
|
||||
|
||||
def popitem(self) -> tuple[str, str]:
|
||||
"""Removes a key or index and returns a (key, value) item."""
|
||||
return self._list.pop()
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
"""Check if a key is present."""
|
||||
try:
|
||||
self._get_key(key)
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __iter__(self) -> t.Iterator[tuple[str, str]]:
|
||||
"""Yield ``(key, value)`` tuples."""
|
||||
return iter(self._list)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._list)
|
||||
|
||||
def add(self, key: str, value: t.Any, /, **kwargs: t.Any) -> None:
|
||||
"""Add a new header tuple to the list.
|
||||
|
||||
Keyword arguments can specify additional parameters for the header
|
||||
value, with underscores converted to dashes::
|
||||
|
||||
>>> d = Headers()
|
||||
>>> d.add('Content-Type', 'text/plain')
|
||||
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
|
||||
|
||||
The keyword argument dumping uses :func:`dump_options_header`
|
||||
behind the scenes.
|
||||
|
||||
.. versionchanged:: 0.4.1
|
||||
keyword arguments were added for :mod:`wsgiref` compatibility.
|
||||
"""
|
||||
if kwargs:
|
||||
value = _options_header_vkw(value, kwargs)
|
||||
|
||||
value_str = _str_header_value(value)
|
||||
self._list.append((key, value_str))
|
||||
|
||||
def add_header(self, key: str, value: t.Any, /, **kwargs: t.Any) -> None:
|
||||
"""Add a new header tuple to the list.
|
||||
|
||||
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
|
||||
:meth:`~wsgiref.headers.Headers.add_header` method.
|
||||
"""
|
||||
self.add(key, value, **kwargs)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clears all headers."""
|
||||
self._list.clear()
|
||||
|
||||
def set(self, key: str, value: t.Any, /, **kwargs: t.Any) -> None:
|
||||
"""Remove all header tuples for `key` and add a new one. The newly
|
||||
added key either appears at the end of the list if there was no
|
||||
entry or replaces the first one.
|
||||
|
||||
Keyword arguments can specify additional parameters for the header
|
||||
value, with underscores converted to dashes. See :meth:`add` for
|
||||
more information.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
:meth:`set` now accepts the same arguments as :meth:`add`.
|
||||
|
||||
:param key: The key to be inserted.
|
||||
:param value: The value to be inserted.
|
||||
"""
|
||||
if kwargs:
|
||||
value = _options_header_vkw(value, kwargs)
|
||||
|
||||
value_str = _str_header_value(value)
|
||||
|
||||
if not self._list:
|
||||
self._list.append((key, value_str))
|
||||
return
|
||||
|
||||
iter_list = iter(self._list)
|
||||
ikey = key.lower()
|
||||
|
||||
for idx, (old_key, _) in enumerate(iter_list):
|
||||
if old_key.lower() == ikey:
|
||||
# replace first occurrence
|
||||
self._list[idx] = (key, value_str)
|
||||
break
|
||||
else:
|
||||
# no existing occurrences
|
||||
self._list.append((key, value_str))
|
||||
return
|
||||
|
||||
# remove remaining occurrences
|
||||
self._list[idx + 1 :] = [t for t in iter_list if t[0].lower() != ikey]
|
||||
|
||||
def setlist(self, key: str, values: cabc.Iterable[t.Any]) -> None:
|
||||
"""Remove any existing values for a header and add new ones.
|
||||
|
||||
:param key: The header key to set.
|
||||
:param values: An iterable of values to set for the key.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
"""
|
||||
if values:
|
||||
values_iter = iter(values)
|
||||
self.set(key, next(values_iter))
|
||||
|
||||
for value in values_iter:
|
||||
self.add(key, value)
|
||||
else:
|
||||
self.remove(key)
|
||||
|
||||
def setdefault(self, key: str, default: t.Any) -> str:
|
||||
"""Return the first value for the key if it is in the headers,
|
||||
otherwise set the header to the value given by ``default`` and
|
||||
return that.
|
||||
|
||||
:param key: The header key to get.
|
||||
:param default: The value to set for the key if it is not in the
|
||||
headers.
|
||||
"""
|
||||
try:
|
||||
return self._get_key(key)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
self.set(key, default)
|
||||
return self._get_key(key)
|
||||
|
||||
def setlistdefault(self, key: str, default: cabc.Iterable[t.Any]) -> list[str]:
|
||||
"""Return the list of values for the key if it is in the
|
||||
headers, otherwise set the header to the list of values given
|
||||
by ``default`` and return that.
|
||||
|
||||
Unlike :meth:`MultiDict.setlistdefault`, modifying the returned
|
||||
list will not affect the headers.
|
||||
|
||||
:param key: The header key to get.
|
||||
:param default: An iterable of values to set for the key if it
|
||||
is not in the headers.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
"""
|
||||
if key not in self:
|
||||
self.setlist(key, default)
|
||||
|
||||
return self.getlist(key)
|
||||
|
||||
@t.overload
|
||||
def __setitem__(self, key: str, value: t.Any) -> None: ...
|
||||
@t.overload
|
||||
def __setitem__(self, key: int, value: tuple[str, t.Any]) -> None: ...
|
||||
@t.overload
|
||||
def __setitem__(
|
||||
self, key: slice, value: cabc.Iterable[tuple[str, t.Any]]
|
||||
) -> None: ...
|
||||
def __setitem__(
|
||||
self,
|
||||
key: str | int | slice,
|
||||
value: t.Any | tuple[str, t.Any] | cabc.Iterable[tuple[str, t.Any]],
|
||||
) -> None:
|
||||
"""Like :meth:`set` but also supports index/slice based setting."""
|
||||
if isinstance(key, str):
|
||||
self.set(key, value)
|
||||
elif isinstance(key, int):
|
||||
self._list[key] = value[0], _str_header_value(value[1]) # type: ignore[index]
|
||||
else:
|
||||
self._list[key] = [(k, _str_header_value(v)) for k, v in value] # type: ignore[misc]
|
||||
|
||||
def update(
|
||||
self,
|
||||
arg: (
|
||||
Headers
|
||||
| MultiDict[str, t.Any]
|
||||
| cabc.Mapping[
|
||||
str, t.Any | list[t.Any] | tuple[t.Any, ...] | cabc.Set[t.Any]
|
||||
]
|
||||
| cabc.Iterable[tuple[str, t.Any]]
|
||||
| None
|
||||
) = None,
|
||||
/,
|
||||
**kwargs: t.Any | list[t.Any] | tuple[t.Any, ...] | cabc.Set[t.Any],
|
||||
) -> None:
|
||||
"""Replace headers in this object with items from another
|
||||
headers object and keyword arguments.
|
||||
|
||||
To extend existing keys instead of replacing, use :meth:`extend`
|
||||
instead.
|
||||
|
||||
If provided, the first argument can be another :class:`Headers`
|
||||
object, a :class:`MultiDict`, :class:`dict`, or iterable of
|
||||
pairs.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
"""
|
||||
if arg is not None:
|
||||
if isinstance(arg, (Headers, MultiDict)):
|
||||
for key in arg.keys():
|
||||
self.setlist(key, arg.getlist(key))
|
||||
elif isinstance(arg, cabc.Mapping):
|
||||
for key, value in arg.items():
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
self.setlist(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
else:
|
||||
for key, value in arg:
|
||||
self.set(key, value)
|
||||
|
||||
for key, value in kwargs.items():
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
self.setlist(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
|
||||
def __or__(
|
||||
self,
|
||||
other: cabc.Mapping[
|
||||
str, t.Any | list[t.Any] | tuple[t.Any, ...] | cabc.Set[t.Any]
|
||||
],
|
||||
) -> te.Self:
|
||||
if not isinstance(other, cabc.Mapping):
|
||||
return NotImplemented
|
||||
|
||||
rv = self.copy()
|
||||
rv.update(other)
|
||||
return rv
|
||||
|
||||
def __ior__(
|
||||
self,
|
||||
other: (
|
||||
cabc.Mapping[str, t.Any | list[t.Any] | tuple[t.Any, ...] | cabc.Set[t.Any]]
|
||||
| cabc.Iterable[tuple[str, t.Any]]
|
||||
),
|
||||
) -> te.Self:
|
||||
if not isinstance(other, (cabc.Mapping, cabc.Iterable)):
|
||||
return NotImplemented
|
||||
|
||||
self.update(other)
|
||||
return self
|
||||
|
||||
def to_wsgi_list(self) -> list[tuple[str, str]]:
|
||||
"""Convert the headers into a list suitable for WSGI.
|
||||
|
||||
:return: list
|
||||
"""
|
||||
return list(self)
|
||||
|
||||
def copy(self) -> te.Self:
|
||||
return self.__class__(self._list)
|
||||
|
||||
def __copy__(self) -> te.Self:
|
||||
return self.copy()
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Returns formatted headers suitable for HTTP transmission."""
|
||||
strs = []
|
||||
for key, value in self.to_wsgi_list():
|
||||
strs.append(f"{key}: {value}")
|
||||
strs.append("\r\n")
|
||||
return "\r\n".join(strs)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{type(self).__name__}({list(self)!r})"
|
||||
|
||||
|
||||
def _options_header_vkw(value: str, kw: dict[str, t.Any]) -> str:
|
||||
return http.dump_options_header(
|
||||
value, {k.replace("_", "-"): v for k, v in kw.items()}
|
||||
)
|
||||
|
||||
|
||||
_newline_re = re.compile(r"[\r\n]")
|
||||
|
||||
|
||||
def _str_header_value(value: t.Any) -> str:
|
||||
if not isinstance(value, str):
|
||||
value = str(value)
|
||||
|
||||
if _newline_re.search(value) is not None:
|
||||
raise ValueError("Header values must not contain newline characters.")
|
||||
|
||||
return value # type: ignore[no-any-return]
|
||||
|
||||
|
||||
class EnvironHeaders(ImmutableHeadersMixin, Headers): # type: ignore[misc]
|
||||
"""Read only version of the headers from a WSGI environment. This
|
||||
provides the same interface as `Headers` and is constructed from
|
||||
a WSGI environment.
|
||||
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
|
||||
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
|
||||
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
|
||||
HTTP exceptions.
|
||||
"""
|
||||
|
||||
def __init__(self, environ: WSGIEnvironment) -> None:
|
||||
super().__init__()
|
||||
self.environ = environ
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, EnvironHeaders):
|
||||
return NotImplemented
|
||||
|
||||
return self.environ is other.environ
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
def __getitem__(self, key: str) -> str: # type: ignore[override]
|
||||
return self._get_key(key)
|
||||
|
||||
def _get_key(self, key: str) -> str:
|
||||
if not isinstance(key, str):
|
||||
raise BadRequestKeyError(key)
|
||||
|
||||
key = key.upper().replace("-", "_")
|
||||
|
||||
if key in {"CONTENT_TYPE", "CONTENT_LENGTH"}:
|
||||
return self.environ[key] # type: ignore[no-any-return]
|
||||
|
||||
return self.environ[f"HTTP_{key}"] # type: ignore[no-any-return]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return sum(1 for _ in self)
|
||||
|
||||
def __iter__(self) -> cabc.Iterator[tuple[str, str]]:
|
||||
for key, value in self.environ.items():
|
||||
if key.startswith("HTTP_") and key not in {
|
||||
"HTTP_CONTENT_TYPE",
|
||||
"HTTP_CONTENT_LENGTH",
|
||||
}:
|
||||
yield key[5:].replace("_", "-").title(), value
|
||||
elif key in {"CONTENT_TYPE", "CONTENT_LENGTH"} and value:
|
||||
yield key.replace("_", "-").title(), value
|
||||
|
||||
def copy(self) -> t.NoReturn:
|
||||
raise TypeError(f"cannot create {type(self).__name__!r} copies")
|
||||
|
||||
def __or__(self, other: t.Any) -> t.NoReturn:
|
||||
raise TypeError(f"cannot create {type(self).__name__!r} copies")
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from .. import http
|
||||
317
lib/python3.11/site-packages/werkzeug/datastructures/mixins.py
Normal file
317
lib/python3.11/site-packages/werkzeug/datastructures/mixins.py
Normal file
@ -0,0 +1,317 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import typing as t
|
||||
from functools import update_wrapper
|
||||
from itertools import repeat
|
||||
|
||||
from .._internal import _missing
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import typing_extensions as te
|
||||
|
||||
K = t.TypeVar("K")
|
||||
V = t.TypeVar("V")
|
||||
T = t.TypeVar("T")
|
||||
F = t.TypeVar("F", bound=cabc.Callable[..., t.Any])
|
||||
|
||||
|
||||
def _immutable_error(self: t.Any) -> t.NoReturn:
|
||||
raise TypeError(f"{type(self).__name__!r} objects are immutable")
|
||||
|
||||
|
||||
class ImmutableListMixin:
|
||||
"""Makes a :class:`list` immutable.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:private:
|
||||
"""
|
||||
|
||||
_hash_cache: int | None = None
|
||||
|
||||
def __hash__(self) -> int:
|
||||
if self._hash_cache is not None:
|
||||
return self._hash_cache
|
||||
rv = self._hash_cache = hash(tuple(self)) # type: ignore[arg-type]
|
||||
return rv
|
||||
|
||||
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
|
||||
return type(self), (list(self),) # type: ignore[call-overload]
|
||||
|
||||
def __delitem__(self, key: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __iadd__(self, other: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __imul__(self, other: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __setitem__(self, key: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def append(self, item: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def remove(self, item: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def extend(self, iterable: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def insert(self, pos: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def pop(self, index: t.Any = -1) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def reverse(self: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def sort(self, key: t.Any = None, reverse: t.Any = False) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
|
||||
class ImmutableDictMixin(t.Generic[K, V]):
|
||||
"""Makes a :class:`dict` immutable.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Disallow ``|=`` operator.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:private:
|
||||
"""
|
||||
|
||||
_hash_cache: int | None = None
|
||||
|
||||
@classmethod
|
||||
@t.overload
|
||||
def fromkeys(
|
||||
cls, keys: cabc.Iterable[K], value: None
|
||||
) -> ImmutableDictMixin[K, t.Any | None]: ...
|
||||
@classmethod
|
||||
@t.overload
|
||||
def fromkeys(cls, keys: cabc.Iterable[K], value: V) -> ImmutableDictMixin[K, V]: ...
|
||||
@classmethod
|
||||
def fromkeys(
|
||||
cls, keys: cabc.Iterable[K], value: V | None = None
|
||||
) -> ImmutableDictMixin[K, t.Any | None] | ImmutableDictMixin[K, V]:
|
||||
instance = super().__new__(cls)
|
||||
instance.__init__(zip(keys, repeat(value))) # type: ignore[misc]
|
||||
return instance
|
||||
|
||||
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
|
||||
return type(self), (dict(self),) # type: ignore[call-overload]
|
||||
|
||||
def _iter_hashitems(self) -> t.Iterable[t.Any]:
|
||||
return self.items() # type: ignore[attr-defined,no-any-return]
|
||||
|
||||
def __hash__(self) -> int:
|
||||
if self._hash_cache is not None:
|
||||
return self._hash_cache
|
||||
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
|
||||
return rv
|
||||
|
||||
def setdefault(self, key: t.Any, default: t.Any = None) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def update(self, arg: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __ior__(self, other: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def pop(self, key: t.Any, default: t.Any = None) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def popitem(self) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __setitem__(self, key: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __delitem__(self, key: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def clear(self) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
|
||||
class ImmutableMultiDictMixin(ImmutableDictMixin[K, V]):
|
||||
"""Makes a :class:`MultiDict` immutable.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:private:
|
||||
"""
|
||||
|
||||
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
|
||||
return type(self), (list(self.items(multi=True)),) # type: ignore[attr-defined]
|
||||
|
||||
def _iter_hashitems(self) -> t.Iterable[t.Any]:
|
||||
return self.items(multi=True) # type: ignore[attr-defined,no-any-return]
|
||||
|
||||
def add(self, key: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def popitemlist(self) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def poplist(self, key: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def setlist(self, key: t.Any, new_list: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def setlistdefault(self, key: t.Any, default_list: t.Any = None) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
|
||||
class ImmutableHeadersMixin:
|
||||
"""Makes a :class:`Headers` immutable. We do not mark them as
|
||||
hashable though since the only usecase for this datastructure
|
||||
in Werkzeug is a view on a mutable structure.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Disallow ``|=`` operator.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:private:
|
||||
"""
|
||||
|
||||
def __delitem__(self, key: t.Any, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __setitem__(self, key: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def set(self, key: t.Any, value: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def setlist(self, key: t.Any, values: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def add(self, key: t.Any, value: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def add_header(self, key: t.Any, value: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def remove(self, key: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def extend(self, arg: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def update(self, arg: t.Any, /, **kwargs: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def __ior__(self, other: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def insert(self, pos: t.Any, value: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def pop(self, key: t.Any = None, default: t.Any = _missing) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def popitem(self) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def setdefault(self, key: t.Any, default: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
def setlistdefault(self, key: t.Any, default: t.Any) -> t.NoReturn:
|
||||
_immutable_error(self)
|
||||
|
||||
|
||||
def _always_update(f: F) -> F:
|
||||
def wrapper(
|
||||
self: UpdateDictMixin[t.Any, t.Any], /, *args: t.Any, **kwargs: t.Any
|
||||
) -> t.Any:
|
||||
rv = f(self, *args, **kwargs)
|
||||
|
||||
if self.on_update is not None:
|
||||
self.on_update(self)
|
||||
|
||||
return rv
|
||||
|
||||
return update_wrapper(wrapper, f) # type: ignore[return-value]
|
||||
|
||||
|
||||
class UpdateDictMixin(dict[K, V]):
|
||||
"""Makes dicts call `self.on_update` on modifications.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Implement ``|=`` operator.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:private:
|
||||
"""
|
||||
|
||||
on_update: cabc.Callable[[te.Self], None] | None = None
|
||||
|
||||
def setdefault(self: te.Self, key: K, default: V | None = None) -> V:
|
||||
modified = key not in self
|
||||
rv = super().setdefault(key, default) # type: ignore[arg-type]
|
||||
if modified and self.on_update is not None:
|
||||
self.on_update(self)
|
||||
return rv
|
||||
|
||||
@t.overload
|
||||
def pop(self: te.Self, key: K) -> V: ...
|
||||
@t.overload
|
||||
def pop(self: te.Self, key: K, default: V) -> V: ...
|
||||
@t.overload
|
||||
def pop(self: te.Self, key: K, default: T) -> T: ...
|
||||
def pop(
|
||||
self: te.Self,
|
||||
key: K,
|
||||
default: V | T = _missing, # type: ignore[assignment]
|
||||
) -> V | T:
|
||||
modified = key in self
|
||||
if default is _missing:
|
||||
rv = super().pop(key)
|
||||
else:
|
||||
rv = super().pop(key, default) # type: ignore[arg-type]
|
||||
if modified and self.on_update is not None:
|
||||
self.on_update(self)
|
||||
return rv
|
||||
|
||||
@_always_update
|
||||
def __setitem__(self, key: K, value: V) -> None:
|
||||
super().__setitem__(key, value)
|
||||
|
||||
@_always_update
|
||||
def __delitem__(self, key: K) -> None:
|
||||
super().__delitem__(key)
|
||||
|
||||
@_always_update
|
||||
def clear(self) -> None:
|
||||
super().clear()
|
||||
|
||||
@_always_update
|
||||
def popitem(self) -> tuple[K, V]:
|
||||
return super().popitem()
|
||||
|
||||
@_always_update
|
||||
def update( # type: ignore[override]
|
||||
self,
|
||||
arg: cabc.Mapping[K, V] | cabc.Iterable[tuple[K, V]] | None = None,
|
||||
/,
|
||||
**kwargs: V,
|
||||
) -> None:
|
||||
if arg is None:
|
||||
super().update(**kwargs)
|
||||
else:
|
||||
super().update(arg, **kwargs)
|
||||
|
||||
@_always_update
|
||||
def __ior__( # type: ignore[override]
|
||||
self, other: cabc.Mapping[K, V] | cabc.Iterable[tuple[K, V]]
|
||||
) -> te.Self:
|
||||
return super().__ior__(other)
|
||||
214
lib/python3.11/site-packages/werkzeug/datastructures/range.py
Normal file
214
lib/python3.11/site-packages/werkzeug/datastructures/range.py
Normal file
@ -0,0 +1,214 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import typing_extensions as te
|
||||
|
||||
T = t.TypeVar("T")
|
||||
|
||||
|
||||
class IfRange:
|
||||
"""Very simple object that represents the `If-Range` header in parsed
|
||||
form. It will either have neither a etag or date or one of either but
|
||||
never both.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
def __init__(self, etag: str | None = None, date: datetime | None = None):
|
||||
#: The etag parsed and unquoted. Ranges always operate on strong
|
||||
#: etags so the weakness information is not necessary.
|
||||
self.etag = etag
|
||||
#: The date in parsed format or `None`.
|
||||
self.date = date
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Converts the object back into an HTTP header."""
|
||||
if self.date is not None:
|
||||
return http.http_date(self.date)
|
||||
if self.etag is not None:
|
||||
return http.quote_etag(self.etag)
|
||||
return ""
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {str(self)!r}>"
|
||||
|
||||
|
||||
class Range:
|
||||
"""Represents a ``Range`` header. All methods only support only
|
||||
bytes as the unit. Stores a list of ranges if given, but the methods
|
||||
only work if only one range is provided.
|
||||
|
||||
:raise ValueError: If the ranges provided are invalid.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
The ranges passed in are validated.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, units: str, ranges: cabc.Sequence[tuple[int, int | None]]
|
||||
) -> None:
|
||||
#: The units of this range. Usually "bytes".
|
||||
self.units = units
|
||||
#: A list of ``(begin, end)`` tuples for the range header provided.
|
||||
#: The ranges are non-inclusive.
|
||||
self.ranges = ranges
|
||||
|
||||
for start, end in ranges:
|
||||
if start is None or (end is not None and (start < 0 or start >= end)):
|
||||
raise ValueError(f"{(start, end)} is not a valid range.")
|
||||
|
||||
def range_for_length(self, length: int | None) -> tuple[int, int] | None:
|
||||
"""If the range is for bytes, the length is not None and there is
|
||||
exactly one range and it is satisfiable it returns a ``(start, stop)``
|
||||
tuple, otherwise `None`.
|
||||
"""
|
||||
if self.units != "bytes" or length is None or len(self.ranges) != 1:
|
||||
return None
|
||||
start, end = self.ranges[0]
|
||||
if end is None:
|
||||
end = length
|
||||
if start < 0:
|
||||
start += length
|
||||
if http.is_byte_range_valid(start, end, length):
|
||||
return start, min(end, length)
|
||||
return None
|
||||
|
||||
def make_content_range(self, length: int | None) -> ContentRange | None:
|
||||
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
|
||||
from the current range and given content length.
|
||||
"""
|
||||
rng = self.range_for_length(length)
|
||||
if rng is not None:
|
||||
return ContentRange(self.units, rng[0], rng[1], length)
|
||||
return None
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Converts the object back into an HTTP header."""
|
||||
ranges = []
|
||||
for begin, end in self.ranges:
|
||||
if end is None:
|
||||
ranges.append(f"{begin}-" if begin >= 0 else str(begin))
|
||||
else:
|
||||
ranges.append(f"{begin}-{end - 1}")
|
||||
return f"{self.units}={','.join(ranges)}"
|
||||
|
||||
def to_content_range_header(self, length: int | None) -> str | None:
|
||||
"""Converts the object into `Content-Range` HTTP header,
|
||||
based on given length
|
||||
"""
|
||||
range = self.range_for_length(length)
|
||||
if range is not None:
|
||||
return f"{self.units} {range[0]}-{range[1] - 1}/{length}"
|
||||
return None
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {str(self)!r}>"
|
||||
|
||||
|
||||
class _CallbackProperty(t.Generic[T]):
|
||||
def __set_name__(self, owner: type[ContentRange], name: str) -> None:
|
||||
self.attr = f"_{name}"
|
||||
|
||||
@t.overload
|
||||
def __get__(self, instance: None, owner: None) -> te.Self: ...
|
||||
@t.overload
|
||||
def __get__(self, instance: ContentRange, owner: type[ContentRange]) -> T: ...
|
||||
def __get__(
|
||||
self, instance: ContentRange | None, owner: type[ContentRange] | None
|
||||
) -> te.Self | T:
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
return instance.__dict__[self.attr] # type: ignore[no-any-return]
|
||||
|
||||
def __set__(self, instance: ContentRange, value: T) -> None:
|
||||
instance.__dict__[self.attr] = value
|
||||
|
||||
if instance.on_update is not None:
|
||||
instance.on_update(instance)
|
||||
|
||||
|
||||
class ContentRange:
|
||||
"""Represents the content range header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
units: str | None,
|
||||
start: int | None,
|
||||
stop: int | None,
|
||||
length: int | None = None,
|
||||
on_update: cabc.Callable[[ContentRange], None] | None = None,
|
||||
) -> None:
|
||||
self.on_update = on_update
|
||||
self.set(start, stop, length, units)
|
||||
|
||||
#: The units to use, usually "bytes"
|
||||
units: str | None = _CallbackProperty() # type: ignore[assignment]
|
||||
#: The start point of the range or `None`.
|
||||
start: int | None = _CallbackProperty() # type: ignore[assignment]
|
||||
#: The stop point of the range (non-inclusive) or `None`. Can only be
|
||||
#: `None` if also start is `None`.
|
||||
stop: int | None = _CallbackProperty() # type: ignore[assignment]
|
||||
#: The length of the range or `None`.
|
||||
length: int | None = _CallbackProperty() # type: ignore[assignment]
|
||||
|
||||
def set(
|
||||
self,
|
||||
start: int | None,
|
||||
stop: int | None,
|
||||
length: int | None = None,
|
||||
units: str | None = "bytes",
|
||||
) -> None:
|
||||
"""Simple method to update the ranges."""
|
||||
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
|
||||
self._units: str | None = units
|
||||
self._start: int | None = start
|
||||
self._stop: int | None = stop
|
||||
self._length: int | None = length
|
||||
if self.on_update is not None:
|
||||
self.on_update(self)
|
||||
|
||||
def unset(self) -> None:
|
||||
"""Sets the units to `None` which indicates that the header should
|
||||
no longer be used.
|
||||
"""
|
||||
self.set(None, None, units=None)
|
||||
|
||||
def to_header(self) -> str:
|
||||
if self._units is None:
|
||||
return ""
|
||||
if self._length is None:
|
||||
length: str | int = "*"
|
||||
else:
|
||||
length = self._length
|
||||
if self._start is None:
|
||||
return f"{self._units} */{length}"
|
||||
return f"{self._units} {self._start}-{self._stop - 1}/{length}" # type: ignore[operator]
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return self._units is not None
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.to_header()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {str(self)!r}>"
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from .. import http
|
||||
1239
lib/python3.11/site-packages/werkzeug/datastructures/structures.py
Normal file
1239
lib/python3.11/site-packages/werkzeug/datastructures/structures.py
Normal file
File diff suppressed because it is too large
Load Diff
565
lib/python3.11/site-packages/werkzeug/debug/__init__.py
Normal file
565
lib/python3.11/site-packages/werkzeug/debug/__init__.py
Normal file
@ -0,0 +1,565 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import getpass
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import pkgutil
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import typing as t
|
||||
import uuid
|
||||
from contextlib import ExitStack
|
||||
from io import BytesIO
|
||||
from itertools import chain
|
||||
from multiprocessing import Value
|
||||
from os.path import basename
|
||||
from os.path import join
|
||||
from zlib import adler32
|
||||
|
||||
from .._internal import _log
|
||||
from ..exceptions import NotFound
|
||||
from ..exceptions import SecurityError
|
||||
from ..http import parse_cookie
|
||||
from ..sansio.utils import host_is_trusted
|
||||
from ..security import gen_salt
|
||||
from ..utils import send_file
|
||||
from ..wrappers.request import Request
|
||||
from ..wrappers.response import Response
|
||||
from .console import Console
|
||||
from .tbtools import DebugFrameSummary
|
||||
from .tbtools import DebugTraceback
|
||||
from .tbtools import render_console_html
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
# A week
|
||||
PIN_TIME = 60 * 60 * 24 * 7
|
||||
|
||||
|
||||
def hash_pin(pin: str) -> str:
|
||||
return hashlib.sha1(f"{pin} added salt".encode("utf-8", "replace")).hexdigest()[:12]
|
||||
|
||||
|
||||
_machine_id: str | bytes | None = None
|
||||
|
||||
|
||||
def get_machine_id() -> str | bytes | None:
|
||||
global _machine_id
|
||||
|
||||
if _machine_id is not None:
|
||||
return _machine_id
|
||||
|
||||
def _generate() -> str | bytes | None:
|
||||
linux = b""
|
||||
|
||||
# machine-id is stable across boots, boot_id is not.
|
||||
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
|
||||
try:
|
||||
with open(filename, "rb") as f:
|
||||
value = f.readline().strip()
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
if value:
|
||||
linux += value
|
||||
break
|
||||
|
||||
# Containers share the same machine id, add some cgroup
|
||||
# information. This is used outside containers too but should be
|
||||
# relatively stable across boots.
|
||||
try:
|
||||
with open("/proc/self/cgroup", "rb") as f:
|
||||
linux += f.readline().strip().rpartition(b"/")[2]
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if linux:
|
||||
return linux
|
||||
|
||||
# On OS X, use ioreg to get the computer's serial number.
|
||||
try:
|
||||
# subprocess may not be available, e.g. Google App Engine
|
||||
# https://github.com/pallets/werkzeug/issues/925
|
||||
from subprocess import PIPE
|
||||
from subprocess import Popen
|
||||
|
||||
dump = Popen(
|
||||
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
|
||||
).communicate()[0]
|
||||
match = re.search(b'"serial-number" = <([^>]+)', dump)
|
||||
|
||||
if match is not None:
|
||||
return match.group(1)
|
||||
except (OSError, ImportError):
|
||||
pass
|
||||
|
||||
# On Windows, use winreg to get the machine guid.
|
||||
if sys.platform == "win32":
|
||||
import winreg
|
||||
|
||||
try:
|
||||
with winreg.OpenKey(
|
||||
winreg.HKEY_LOCAL_MACHINE,
|
||||
"SOFTWARE\\Microsoft\\Cryptography",
|
||||
0,
|
||||
winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
|
||||
) as rk:
|
||||
guid: str | bytes
|
||||
guid_type: int
|
||||
guid, guid_type = winreg.QueryValueEx(rk, "MachineGuid")
|
||||
|
||||
if guid_type == winreg.REG_SZ:
|
||||
return guid.encode()
|
||||
|
||||
return guid
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
_machine_id = _generate()
|
||||
return _machine_id
|
||||
|
||||
|
||||
class _ConsoleFrame:
|
||||
"""Helper class so that we can reuse the frame console code for the
|
||||
standalone console.
|
||||
"""
|
||||
|
||||
def __init__(self, namespace: dict[str, t.Any]):
|
||||
self.console = Console(namespace)
|
||||
self.id = 0
|
||||
|
||||
def eval(self, code: str) -> t.Any:
|
||||
return self.console.eval(code)
|
||||
|
||||
|
||||
def get_pin_and_cookie_name(
|
||||
app: WSGIApplication,
|
||||
) -> tuple[str, str] | tuple[None, None]:
|
||||
"""Given an application object this returns a semi-stable 9 digit pin
|
||||
code and a random key. The hope is that this is stable between
|
||||
restarts to not make debugging particularly frustrating. If the pin
|
||||
was forcefully disabled this returns `None`.
|
||||
|
||||
Second item in the resulting tuple is the cookie name for remembering.
|
||||
"""
|
||||
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
|
||||
rv = None
|
||||
num = None
|
||||
|
||||
# Pin was explicitly disabled
|
||||
if pin == "off":
|
||||
return None, None
|
||||
|
||||
# Pin was provided explicitly
|
||||
if pin is not None and pin.replace("-", "").isdecimal():
|
||||
# If there are separators in the pin, return it directly
|
||||
if "-" in pin:
|
||||
rv = pin
|
||||
else:
|
||||
num = pin
|
||||
|
||||
modname = getattr(app, "__module__", t.cast(object, app).__class__.__module__)
|
||||
username: str | None
|
||||
|
||||
try:
|
||||
# getuser imports the pwd module, which does not exist in Google
|
||||
# App Engine. It may also raise a KeyError if the UID does not
|
||||
# have a username, such as in Docker.
|
||||
username = getpass.getuser()
|
||||
# Python >= 3.13 only raises OSError
|
||||
except (ImportError, KeyError, OSError):
|
||||
username = None
|
||||
|
||||
mod = sys.modules.get(modname)
|
||||
|
||||
# This information only exists to make the cookie unique on the
|
||||
# computer, not as a security feature.
|
||||
probably_public_bits = [
|
||||
username,
|
||||
modname,
|
||||
getattr(app, "__name__", type(app).__name__),
|
||||
getattr(mod, "__file__", None),
|
||||
]
|
||||
|
||||
# This information is here to make it harder for an attacker to
|
||||
# guess the cookie name. They are unlikely to be contained anywhere
|
||||
# within the unauthenticated debug page.
|
||||
private_bits = [str(uuid.getnode()), get_machine_id()]
|
||||
|
||||
h = hashlib.sha1()
|
||||
for bit in chain(probably_public_bits, private_bits):
|
||||
if not bit:
|
||||
continue
|
||||
if isinstance(bit, str):
|
||||
bit = bit.encode()
|
||||
h.update(bit)
|
||||
h.update(b"cookiesalt")
|
||||
|
||||
cookie_name = f"__wzd{h.hexdigest()[:20]}"
|
||||
|
||||
# If we need to generate a pin we salt it a bit more so that we don't
|
||||
# end up with the same value and generate out 9 digits
|
||||
if num is None:
|
||||
h.update(b"pinsalt")
|
||||
num = f"{int(h.hexdigest(), 16):09d}"[:9]
|
||||
|
||||
# Format the pincode in groups of digits for easier remembering if
|
||||
# we don't have a result yet.
|
||||
if rv is None:
|
||||
for group_size in 5, 4, 3:
|
||||
if len(num) % group_size == 0:
|
||||
rv = "-".join(
|
||||
num[x : x + group_size].rjust(group_size, "0")
|
||||
for x in range(0, len(num), group_size)
|
||||
)
|
||||
break
|
||||
else:
|
||||
rv = num
|
||||
|
||||
return rv, cookie_name
|
||||
|
||||
|
||||
class DebuggedApplication:
|
||||
"""Enables debugging support for a given application::
|
||||
|
||||
from werkzeug.debug import DebuggedApplication
|
||||
from myapp import app
|
||||
app = DebuggedApplication(app, evalex=True)
|
||||
|
||||
The ``evalex`` argument allows evaluating expressions in any frame
|
||||
of a traceback. This works by preserving each frame with its local
|
||||
state. Some state, such as context globals, cannot be restored with
|
||||
the frame by default. When ``evalex`` is enabled,
|
||||
``environ["werkzeug.debug.preserve_context"]`` will be a callable
|
||||
that takes a context manager, and can be called multiple times.
|
||||
Each context manager will be entered before evaluating code in the
|
||||
frame, then exited again, so they can perform setup and cleanup for
|
||||
each call.
|
||||
|
||||
:param app: the WSGI application to run debugged.
|
||||
:param evalex: enable exception evaluation feature (interactive
|
||||
debugging). This requires a non-forking server.
|
||||
:param request_key: The key that points to the request object in this
|
||||
environment. This parameter is ignored in current
|
||||
versions.
|
||||
:param console_path: the URL for a general purpose console.
|
||||
:param console_init_func: the function that is executed before starting
|
||||
the general purpose console. The return value
|
||||
is used as initial namespace.
|
||||
:param show_hidden_frames: by default hidden traceback frames are skipped.
|
||||
You can show them by setting this parameter
|
||||
to `True`.
|
||||
:param pin_security: can be used to disable the pin based security system.
|
||||
:param pin_logging: enables the logging of the pin system.
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Added the ``werkzeug.debug.preserve_context`` environ key.
|
||||
"""
|
||||
|
||||
_pin: str
|
||||
_pin_cookie: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
evalex: bool = False,
|
||||
request_key: str = "werkzeug.request",
|
||||
console_path: str = "/console",
|
||||
console_init_func: t.Callable[[], dict[str, t.Any]] | None = None,
|
||||
show_hidden_frames: bool = False,
|
||||
pin_security: bool = True,
|
||||
pin_logging: bool = True,
|
||||
) -> None:
|
||||
if not console_init_func:
|
||||
console_init_func = None
|
||||
self.app = app
|
||||
self.evalex = evalex
|
||||
self.frames: dict[int, DebugFrameSummary | _ConsoleFrame] = {}
|
||||
self.frame_contexts: dict[int, list[t.ContextManager[None]]] = {}
|
||||
self.request_key = request_key
|
||||
self.console_path = console_path
|
||||
self.console_init_func = console_init_func
|
||||
self.show_hidden_frames = show_hidden_frames
|
||||
self.secret = gen_salt(20)
|
||||
self._failed_pin_auth = Value("B")
|
||||
|
||||
self.pin_logging = pin_logging
|
||||
if pin_security:
|
||||
# Print out the pin for the debugger on standard out.
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
|
||||
_log("warning", " * Debugger is active!")
|
||||
if self.pin is None:
|
||||
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
|
||||
else:
|
||||
_log("info", " * Debugger PIN: %s", self.pin)
|
||||
else:
|
||||
self.pin = None
|
||||
|
||||
self.trusted_hosts: list[str] = [".localhost", "127.0.0.1"]
|
||||
"""List of domains to allow requests to the debugger from. A leading dot
|
||||
allows all subdomains. This only allows ``".localhost"`` domains by
|
||||
default.
|
||||
|
||||
.. versionadded:: 3.0.3
|
||||
"""
|
||||
|
||||
@property
|
||||
def pin(self) -> str | None:
|
||||
if not hasattr(self, "_pin"):
|
||||
pin_cookie = get_pin_and_cookie_name(self.app)
|
||||
self._pin, self._pin_cookie = pin_cookie # type: ignore
|
||||
return self._pin
|
||||
|
||||
@pin.setter
|
||||
def pin(self, value: str) -> None:
|
||||
self._pin = value
|
||||
|
||||
@property
|
||||
def pin_cookie_name(self) -> str:
|
||||
"""The name of the pin cookie."""
|
||||
if not hasattr(self, "_pin_cookie"):
|
||||
pin_cookie = get_pin_and_cookie_name(self.app)
|
||||
self._pin, self._pin_cookie = pin_cookie # type: ignore
|
||||
return self._pin_cookie
|
||||
|
||||
def debug_application(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterator[bytes]:
|
||||
"""Run the application and conserve the traceback frames."""
|
||||
contexts: list[t.ContextManager[t.Any]] = []
|
||||
|
||||
if self.evalex:
|
||||
environ["werkzeug.debug.preserve_context"] = contexts.append
|
||||
|
||||
app_iter = None
|
||||
try:
|
||||
app_iter = self.app(environ, start_response)
|
||||
yield from app_iter
|
||||
if hasattr(app_iter, "close"):
|
||||
app_iter.close()
|
||||
except Exception as e:
|
||||
if hasattr(app_iter, "close"):
|
||||
app_iter.close() # type: ignore
|
||||
|
||||
tb = DebugTraceback(e, skip=1, hide=not self.show_hidden_frames)
|
||||
|
||||
for frame in tb.all_frames:
|
||||
self.frames[id(frame)] = frame
|
||||
self.frame_contexts[id(frame)] = contexts
|
||||
|
||||
is_trusted = bool(self.check_pin_trust(environ))
|
||||
html = tb.render_debugger_html(
|
||||
evalex=self.evalex and self.check_host_trust(environ),
|
||||
secret=self.secret,
|
||||
evalex_trusted=is_trusted,
|
||||
)
|
||||
response = Response(html, status=500, mimetype="text/html")
|
||||
|
||||
try:
|
||||
yield from response(environ, start_response)
|
||||
except Exception:
|
||||
# if we end up here there has been output but an error
|
||||
# occurred. in that situation we can do nothing fancy any
|
||||
# more, better log something into the error log and fall
|
||||
# back gracefully.
|
||||
environ["wsgi.errors"].write(
|
||||
"Debugging middleware caught exception in streamed "
|
||||
"response at a point where response headers were already "
|
||||
"sent.\n"
|
||||
)
|
||||
|
||||
environ["wsgi.errors"].write("".join(tb.render_traceback_text()))
|
||||
|
||||
def execute_command(
|
||||
self,
|
||||
request: Request,
|
||||
command: str,
|
||||
frame: DebugFrameSummary | _ConsoleFrame,
|
||||
) -> Response:
|
||||
"""Execute a command in a console."""
|
||||
if not self.check_host_trust(request.environ):
|
||||
return SecurityError() # type: ignore[return-value]
|
||||
|
||||
contexts = self.frame_contexts.get(id(frame), [])
|
||||
|
||||
with ExitStack() as exit_stack:
|
||||
for cm in contexts:
|
||||
exit_stack.enter_context(cm)
|
||||
|
||||
return Response(frame.eval(command), mimetype="text/html")
|
||||
|
||||
def display_console(self, request: Request) -> Response:
|
||||
"""Display a standalone shell."""
|
||||
if not self.check_host_trust(request.environ):
|
||||
return SecurityError() # type: ignore[return-value]
|
||||
|
||||
if 0 not in self.frames:
|
||||
if self.console_init_func is None:
|
||||
ns = {}
|
||||
else:
|
||||
ns = dict(self.console_init_func())
|
||||
ns.setdefault("app", self.app)
|
||||
self.frames[0] = _ConsoleFrame(ns)
|
||||
is_trusted = bool(self.check_pin_trust(request.environ))
|
||||
return Response(
|
||||
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
|
||||
mimetype="text/html",
|
||||
)
|
||||
|
||||
def get_resource(self, request: Request, filename: str) -> Response:
|
||||
"""Return a static resource from the shared folder."""
|
||||
path = join("shared", basename(filename))
|
||||
|
||||
try:
|
||||
data = pkgutil.get_data(__package__, path)
|
||||
except OSError:
|
||||
return NotFound() # type: ignore[return-value]
|
||||
else:
|
||||
if data is None:
|
||||
return NotFound() # type: ignore[return-value]
|
||||
|
||||
etag = str(adler32(data) & 0xFFFFFFFF)
|
||||
return send_file(
|
||||
BytesIO(data), request.environ, download_name=filename, etag=etag
|
||||
)
|
||||
|
||||
def check_pin_trust(self, environ: WSGIEnvironment) -> bool | None:
|
||||
"""Checks if the request passed the pin test. This returns `True` if the
|
||||
request is trusted on a pin/cookie basis and returns `False` if not.
|
||||
Additionally if the cookie's stored pin hash is wrong it will return
|
||||
`None` so that appropriate action can be taken.
|
||||
"""
|
||||
if self.pin is None:
|
||||
return True
|
||||
val = parse_cookie(environ).get(self.pin_cookie_name)
|
||||
if not val or "|" not in val:
|
||||
return False
|
||||
ts_str, pin_hash = val.split("|", 1)
|
||||
|
||||
try:
|
||||
ts = int(ts_str)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
if pin_hash != hash_pin(self.pin):
|
||||
return None
|
||||
return (time.time() - PIN_TIME) < ts
|
||||
|
||||
def check_host_trust(self, environ: WSGIEnvironment) -> bool:
|
||||
return host_is_trusted(environ.get("HTTP_HOST"), self.trusted_hosts)
|
||||
|
||||
def _fail_pin_auth(self) -> None:
|
||||
with self._failed_pin_auth.get_lock():
|
||||
count = self._failed_pin_auth.value
|
||||
self._failed_pin_auth.value = count + 1
|
||||
|
||||
time.sleep(5.0 if count > 5 else 0.5)
|
||||
|
||||
def pin_auth(self, request: Request) -> Response:
|
||||
"""Authenticates with the pin."""
|
||||
if not self.check_host_trust(request.environ):
|
||||
return SecurityError() # type: ignore[return-value]
|
||||
|
||||
exhausted = False
|
||||
auth = False
|
||||
trust = self.check_pin_trust(request.environ)
|
||||
pin = t.cast(str, self.pin)
|
||||
|
||||
# If the trust return value is `None` it means that the cookie is
|
||||
# set but the stored pin hash value is bad. This means that the
|
||||
# pin was changed. In this case we count a bad auth and unset the
|
||||
# cookie. This way it becomes harder to guess the cookie name
|
||||
# instead of the pin as we still count up failures.
|
||||
bad_cookie = False
|
||||
if trust is None:
|
||||
self._fail_pin_auth()
|
||||
bad_cookie = True
|
||||
|
||||
# If we're trusted, we're authenticated.
|
||||
elif trust:
|
||||
auth = True
|
||||
|
||||
# If we failed too many times, then we're locked out.
|
||||
elif self._failed_pin_auth.value > 10:
|
||||
exhausted = True
|
||||
|
||||
# Otherwise go through pin based authentication
|
||||
else:
|
||||
entered_pin = request.args["pin"]
|
||||
|
||||
if entered_pin.strip().replace("-", "") == pin.replace("-", ""):
|
||||
self._failed_pin_auth.value = 0
|
||||
auth = True
|
||||
else:
|
||||
self._fail_pin_auth()
|
||||
|
||||
rv = Response(
|
||||
json.dumps({"auth": auth, "exhausted": exhausted}),
|
||||
mimetype="application/json",
|
||||
)
|
||||
if auth:
|
||||
rv.set_cookie(
|
||||
self.pin_cookie_name,
|
||||
f"{int(time.time())}|{hash_pin(pin)}",
|
||||
httponly=True,
|
||||
samesite="Strict",
|
||||
secure=request.is_secure,
|
||||
)
|
||||
elif bad_cookie:
|
||||
rv.delete_cookie(self.pin_cookie_name)
|
||||
return rv
|
||||
|
||||
def log_pin_request(self, request: Request) -> Response:
|
||||
"""Log the pin if needed."""
|
||||
if not self.check_host_trust(request.environ):
|
||||
return SecurityError() # type: ignore[return-value]
|
||||
|
||||
if self.pin_logging and self.pin is not None:
|
||||
_log(
|
||||
"info", " * To enable the debugger you need to enter the security pin:"
|
||||
)
|
||||
_log("info", " * Debugger pin code: %s", self.pin)
|
||||
return Response("")
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
"""Dispatch the requests."""
|
||||
# important: don't ever access a function here that reads the incoming
|
||||
# form data! Otherwise the application won't have access to that data
|
||||
# any more!
|
||||
request = Request(environ)
|
||||
response = self.debug_application
|
||||
if request.args.get("__debugger__") == "yes":
|
||||
cmd = request.args.get("cmd")
|
||||
arg = request.args.get("f")
|
||||
secret = request.args.get("s")
|
||||
frame = self.frames.get(request.args.get("frm", type=int)) # type: ignore
|
||||
if cmd == "resource" and arg:
|
||||
response = self.get_resource(request, arg) # type: ignore
|
||||
elif cmd == "pinauth" and secret == self.secret:
|
||||
response = self.pin_auth(request) # type: ignore
|
||||
elif cmd == "printpin" and secret == self.secret:
|
||||
response = self.log_pin_request(request) # type: ignore
|
||||
elif (
|
||||
self.evalex
|
||||
and cmd is not None
|
||||
and frame is not None
|
||||
and self.secret == secret
|
||||
and self.check_pin_trust(environ)
|
||||
):
|
||||
response = self.execute_command(request, cmd, frame) # type: ignore
|
||||
elif (
|
||||
self.evalex
|
||||
and self.console_path is not None
|
||||
and request.path == self.console_path
|
||||
):
|
||||
response = self.display_console(request) # type: ignore
|
||||
return response(environ, start_response)
|
||||
219
lib/python3.11/site-packages/werkzeug/debug/console.py
Normal file
219
lib/python3.11/site-packages/werkzeug/debug/console.py
Normal file
@ -0,0 +1,219 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import code
|
||||
import sys
|
||||
import typing as t
|
||||
from contextvars import ContextVar
|
||||
from types import CodeType
|
||||
|
||||
from markupsafe import escape
|
||||
|
||||
from .repr import debug_repr
|
||||
from .repr import dump
|
||||
from .repr import helper
|
||||
|
||||
_stream: ContextVar[HTMLStringO] = ContextVar("werkzeug.debug.console.stream")
|
||||
_ipy: ContextVar[_InteractiveConsole] = ContextVar("werkzeug.debug.console.ipy")
|
||||
|
||||
|
||||
class HTMLStringO:
|
||||
"""A StringO version that HTML escapes on write."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._buffer: list[str] = []
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def close(self) -> None:
|
||||
pass
|
||||
|
||||
def flush(self) -> None:
|
||||
pass
|
||||
|
||||
def seek(self, n: int, mode: int = 0) -> None:
|
||||
pass
|
||||
|
||||
def readline(self) -> str:
|
||||
if len(self._buffer) == 0:
|
||||
return ""
|
||||
ret = self._buffer[0]
|
||||
del self._buffer[0]
|
||||
return ret
|
||||
|
||||
def reset(self) -> str:
|
||||
val = "".join(self._buffer)
|
||||
del self._buffer[:]
|
||||
return val
|
||||
|
||||
def _write(self, x: str) -> None:
|
||||
self._buffer.append(x)
|
||||
|
||||
def write(self, x: str) -> None:
|
||||
self._write(escape(x))
|
||||
|
||||
def writelines(self, x: t.Iterable[str]) -> None:
|
||||
self._write(escape("".join(x)))
|
||||
|
||||
|
||||
class ThreadedStream:
|
||||
"""Thread-local wrapper for sys.stdout for the interactive console."""
|
||||
|
||||
@staticmethod
|
||||
def push() -> None:
|
||||
if not isinstance(sys.stdout, ThreadedStream):
|
||||
sys.stdout = t.cast(t.TextIO, ThreadedStream())
|
||||
|
||||
_stream.set(HTMLStringO())
|
||||
|
||||
@staticmethod
|
||||
def fetch() -> str:
|
||||
try:
|
||||
stream = _stream.get()
|
||||
except LookupError:
|
||||
return ""
|
||||
|
||||
return stream.reset()
|
||||
|
||||
@staticmethod
|
||||
def displayhook(obj: object) -> None:
|
||||
try:
|
||||
stream = _stream.get()
|
||||
except LookupError:
|
||||
return _displayhook(obj) # type: ignore
|
||||
|
||||
# stream._write bypasses escaping as debug_repr is
|
||||
# already generating HTML for us.
|
||||
if obj is not None:
|
||||
_ipy.get().locals["_"] = obj
|
||||
stream._write(debug_repr(obj))
|
||||
|
||||
def __setattr__(self, name: str, value: t.Any) -> None:
|
||||
raise AttributeError(f"read only attribute {name}")
|
||||
|
||||
def __dir__(self) -> list[str]:
|
||||
return dir(sys.__stdout__)
|
||||
|
||||
def __getattribute__(self, name: str) -> t.Any:
|
||||
try:
|
||||
stream = _stream.get()
|
||||
except LookupError:
|
||||
stream = sys.__stdout__ # type: ignore[assignment]
|
||||
|
||||
return getattr(stream, name)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(sys.__stdout__)
|
||||
|
||||
|
||||
# add the threaded stream as display hook
|
||||
_displayhook = sys.displayhook
|
||||
sys.displayhook = ThreadedStream.displayhook
|
||||
|
||||
|
||||
class _ConsoleLoader:
|
||||
def __init__(self) -> None:
|
||||
self._storage: dict[int, str] = {}
|
||||
|
||||
def register(self, code: CodeType, source: str) -> None:
|
||||
self._storage[id(code)] = source
|
||||
# register code objects of wrapped functions too.
|
||||
for var in code.co_consts:
|
||||
if isinstance(var, CodeType):
|
||||
self._storage[id(var)] = source
|
||||
|
||||
def get_source_by_code(self, code: CodeType) -> str | None:
|
||||
try:
|
||||
return self._storage[id(code)]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
class _InteractiveConsole(code.InteractiveInterpreter):
|
||||
locals: dict[str, t.Any]
|
||||
|
||||
def __init__(self, globals: dict[str, t.Any], locals: dict[str, t.Any]) -> None:
|
||||
self.loader = _ConsoleLoader()
|
||||
locals = {
|
||||
**globals,
|
||||
**locals,
|
||||
"dump": dump,
|
||||
"help": helper,
|
||||
"__loader__": self.loader,
|
||||
}
|
||||
super().__init__(locals)
|
||||
original_compile = self.compile
|
||||
|
||||
def compile(source: str, filename: str, symbol: str) -> CodeType | None:
|
||||
code = original_compile(source, filename, symbol)
|
||||
|
||||
if code is not None:
|
||||
self.loader.register(code, source)
|
||||
|
||||
return code
|
||||
|
||||
self.compile = compile # type: ignore[assignment]
|
||||
self.more = False
|
||||
self.buffer: list[str] = []
|
||||
|
||||
def runsource(self, source: str, **kwargs: t.Any) -> str: # type: ignore
|
||||
source = f"{source.rstrip()}\n"
|
||||
ThreadedStream.push()
|
||||
prompt = "... " if self.more else ">>> "
|
||||
try:
|
||||
source_to_eval = "".join(self.buffer + [source])
|
||||
if super().runsource(source_to_eval, "<debugger>", "single"):
|
||||
self.more = True
|
||||
self.buffer.append(source)
|
||||
else:
|
||||
self.more = False
|
||||
del self.buffer[:]
|
||||
finally:
|
||||
output = ThreadedStream.fetch()
|
||||
return f"{prompt}{escape(source)}{output}"
|
||||
|
||||
def runcode(self, code: CodeType) -> None:
|
||||
try:
|
||||
exec(code, self.locals)
|
||||
except Exception:
|
||||
self.showtraceback()
|
||||
|
||||
def showtraceback(self) -> None:
|
||||
from .tbtools import DebugTraceback
|
||||
|
||||
exc = t.cast(BaseException, sys.exc_info()[1])
|
||||
te = DebugTraceback(exc, skip=1)
|
||||
sys.stdout._write(te.render_traceback_html()) # type: ignore
|
||||
|
||||
def showsyntaxerror(self, filename: str | None = None) -> None:
|
||||
from .tbtools import DebugTraceback
|
||||
|
||||
exc = t.cast(BaseException, sys.exc_info()[1])
|
||||
te = DebugTraceback(exc, skip=4)
|
||||
sys.stdout._write(te.render_traceback_html()) # type: ignore
|
||||
|
||||
def write(self, data: str) -> None:
|
||||
sys.stdout.write(data)
|
||||
|
||||
|
||||
class Console:
|
||||
"""An interactive console."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
globals: dict[str, t.Any] | None = None,
|
||||
locals: dict[str, t.Any] | None = None,
|
||||
) -> None:
|
||||
if locals is None:
|
||||
locals = {}
|
||||
if globals is None:
|
||||
globals = {}
|
||||
self._ipy = _InteractiveConsole(globals, locals)
|
||||
|
||||
def eval(self, code: str) -> str:
|
||||
_ipy.set(self._ipy)
|
||||
old_sys_stdout = sys.stdout
|
||||
try:
|
||||
return self._ipy.runsource(code)
|
||||
finally:
|
||||
sys.stdout = old_sys_stdout
|
||||
282
lib/python3.11/site-packages/werkzeug/debug/repr.py
Normal file
282
lib/python3.11/site-packages/werkzeug/debug/repr.py
Normal file
@ -0,0 +1,282 @@
|
||||
"""Object representations for debugging purposes. Unlike the default
|
||||
repr, these expose more information and produce HTML instead of ASCII.
|
||||
|
||||
Together with the CSS and JavaScript of the debugger this gives a
|
||||
colorful and more compact output.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
import re
|
||||
import sys
|
||||
import typing as t
|
||||
from collections import deque
|
||||
from traceback import format_exception_only
|
||||
|
||||
from markupsafe import escape
|
||||
|
||||
missing = object()
|
||||
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
|
||||
RegexType = type(_paragraph_re)
|
||||
|
||||
HELP_HTML = """\
|
||||
<div class=box>
|
||||
<h3>%(title)s</h3>
|
||||
<pre class=help>%(text)s</pre>
|
||||
</div>\
|
||||
"""
|
||||
OBJECT_DUMP_HTML = """\
|
||||
<div class=box>
|
||||
<h3>%(title)s</h3>
|
||||
%(repr)s
|
||||
<table>%(items)s</table>
|
||||
</div>\
|
||||
"""
|
||||
|
||||
|
||||
def debug_repr(obj: object) -> str:
|
||||
"""Creates a debug repr of an object as HTML string."""
|
||||
return DebugReprGenerator().repr(obj)
|
||||
|
||||
|
||||
def dump(obj: object = missing) -> None:
|
||||
"""Print the object details to stdout._write (for the interactive
|
||||
console of the web debugger.
|
||||
"""
|
||||
gen = DebugReprGenerator()
|
||||
if obj is missing:
|
||||
rv = gen.dump_locals(sys._getframe(1).f_locals)
|
||||
else:
|
||||
rv = gen.dump_object(obj)
|
||||
sys.stdout._write(rv) # type: ignore
|
||||
|
||||
|
||||
class _Helper:
|
||||
"""Displays an HTML version of the normal help, for the interactive
|
||||
debugger only because it requires a patched sys.stdout.
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "Type help(object) for help about object."
|
||||
|
||||
def __call__(self, topic: t.Any | None = None) -> None:
|
||||
if topic is None:
|
||||
sys.stdout._write(f"<span class=help>{self!r}</span>") # type: ignore
|
||||
return
|
||||
import pydoc
|
||||
|
||||
pydoc.help(topic)
|
||||
rv = sys.stdout.reset() # type: ignore
|
||||
paragraphs = _paragraph_re.split(rv)
|
||||
if len(paragraphs) > 1:
|
||||
title = paragraphs[0]
|
||||
text = "\n\n".join(paragraphs[1:])
|
||||
else:
|
||||
title = "Help"
|
||||
text = paragraphs[0]
|
||||
sys.stdout._write(HELP_HTML % {"title": title, "text": text}) # type: ignore
|
||||
|
||||
|
||||
helper = _Helper()
|
||||
|
||||
|
||||
def _add_subclass_info(inner: str, obj: object, base: type | tuple[type, ...]) -> str:
|
||||
if isinstance(base, tuple):
|
||||
for cls in base:
|
||||
if type(obj) is cls:
|
||||
return inner
|
||||
elif type(obj) is base:
|
||||
return inner
|
||||
module = ""
|
||||
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
|
||||
module = f'<span class="module">{obj.__class__.__module__}.</span>'
|
||||
return f"{module}{type(obj).__name__}({inner})"
|
||||
|
||||
|
||||
def _sequence_repr_maker(
|
||||
left: str, right: str, base: type, limit: int = 8
|
||||
) -> t.Callable[[DebugReprGenerator, t.Iterable[t.Any], bool], str]:
|
||||
def proxy(self: DebugReprGenerator, obj: t.Iterable[t.Any], recursive: bool) -> str:
|
||||
if recursive:
|
||||
return _add_subclass_info(f"{left}...{right}", obj, base)
|
||||
buf = [left]
|
||||
have_extended_section = False
|
||||
for idx, item in enumerate(obj):
|
||||
if idx:
|
||||
buf.append(", ")
|
||||
if idx == limit:
|
||||
buf.append('<span class="extended">')
|
||||
have_extended_section = True
|
||||
buf.append(self.repr(item))
|
||||
if have_extended_section:
|
||||
buf.append("</span>")
|
||||
buf.append(right)
|
||||
return _add_subclass_info("".join(buf), obj, base)
|
||||
|
||||
return proxy
|
||||
|
||||
|
||||
class DebugReprGenerator:
|
||||
def __init__(self) -> None:
|
||||
self._stack: list[t.Any] = []
|
||||
|
||||
list_repr = _sequence_repr_maker("[", "]", list)
|
||||
tuple_repr = _sequence_repr_maker("(", ")", tuple)
|
||||
set_repr = _sequence_repr_maker("set([", "])", set)
|
||||
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
|
||||
deque_repr = _sequence_repr_maker(
|
||||
'<span class="module">collections.</span>deque([', "])", deque
|
||||
)
|
||||
|
||||
def regex_repr(self, obj: t.Pattern[t.AnyStr]) -> str:
|
||||
pattern = repr(obj.pattern)
|
||||
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
|
||||
pattern = f"r{pattern}"
|
||||
return f're.compile(<span class="string regex">{pattern}</span>)'
|
||||
|
||||
def string_repr(self, obj: str | bytes, limit: int = 70) -> str:
|
||||
buf = ['<span class="string">']
|
||||
r = repr(obj)
|
||||
|
||||
# shorten the repr when the hidden part would be at least 3 chars
|
||||
if len(r) - limit > 2:
|
||||
buf.extend(
|
||||
(
|
||||
escape(r[:limit]),
|
||||
'<span class="extended">',
|
||||
escape(r[limit:]),
|
||||
"</span>",
|
||||
)
|
||||
)
|
||||
else:
|
||||
buf.append(escape(r))
|
||||
|
||||
buf.append("</span>")
|
||||
out = "".join(buf)
|
||||
|
||||
# if the repr looks like a standard string, add subclass info if needed
|
||||
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
|
||||
return _add_subclass_info(out, obj, (bytes, str))
|
||||
|
||||
# otherwise, assume the repr distinguishes the subclass already
|
||||
return out
|
||||
|
||||
def dict_repr(
|
||||
self,
|
||||
d: dict[int, None] | dict[str, int] | dict[str | int, int],
|
||||
recursive: bool,
|
||||
limit: int = 5,
|
||||
) -> str:
|
||||
if recursive:
|
||||
return _add_subclass_info("{...}", d, dict)
|
||||
buf = ["{"]
|
||||
have_extended_section = False
|
||||
for idx, (key, value) in enumerate(d.items()):
|
||||
if idx:
|
||||
buf.append(", ")
|
||||
if idx == limit - 1:
|
||||
buf.append('<span class="extended">')
|
||||
have_extended_section = True
|
||||
buf.append(
|
||||
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
|
||||
f' <span class="value">{self.repr(value)}</span></span>'
|
||||
)
|
||||
if have_extended_section:
|
||||
buf.append("</span>")
|
||||
buf.append("}")
|
||||
return _add_subclass_info("".join(buf), d, dict)
|
||||
|
||||
def object_repr(self, obj: t.Any) -> str:
|
||||
r = repr(obj)
|
||||
return f'<span class="object">{escape(r)}</span>'
|
||||
|
||||
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
|
||||
if obj is helper:
|
||||
return f'<span class="help">{helper!r}</span>'
|
||||
if isinstance(obj, (int, float, complex)):
|
||||
return f'<span class="number">{obj!r}</span>'
|
||||
if isinstance(obj, str) or isinstance(obj, bytes):
|
||||
return self.string_repr(obj)
|
||||
if isinstance(obj, RegexType):
|
||||
return self.regex_repr(obj)
|
||||
if isinstance(obj, list):
|
||||
return self.list_repr(obj, recursive)
|
||||
if isinstance(obj, tuple):
|
||||
return self.tuple_repr(obj, recursive)
|
||||
if isinstance(obj, set):
|
||||
return self.set_repr(obj, recursive)
|
||||
if isinstance(obj, frozenset):
|
||||
return self.frozenset_repr(obj, recursive)
|
||||
if isinstance(obj, dict):
|
||||
return self.dict_repr(obj, recursive)
|
||||
if isinstance(obj, deque):
|
||||
return self.deque_repr(obj, recursive)
|
||||
return self.object_repr(obj)
|
||||
|
||||
def fallback_repr(self) -> str:
|
||||
try:
|
||||
info = "".join(format_exception_only(*sys.exc_info()[:2]))
|
||||
except Exception:
|
||||
info = "?"
|
||||
return (
|
||||
'<span class="brokenrepr">'
|
||||
f"<broken repr ({escape(info.strip())})></span>"
|
||||
)
|
||||
|
||||
def repr(self, obj: object) -> str:
|
||||
recursive = False
|
||||
for item in self._stack:
|
||||
if item is obj:
|
||||
recursive = True
|
||||
break
|
||||
self._stack.append(obj)
|
||||
try:
|
||||
try:
|
||||
return self.dispatch_repr(obj, recursive)
|
||||
except Exception:
|
||||
return self.fallback_repr()
|
||||
finally:
|
||||
self._stack.pop()
|
||||
|
||||
def dump_object(self, obj: object) -> str:
|
||||
repr = None
|
||||
items: list[tuple[str, str]] | None = None
|
||||
|
||||
if isinstance(obj, dict):
|
||||
title = "Contents of"
|
||||
items = []
|
||||
for key, value in obj.items():
|
||||
if not isinstance(key, str):
|
||||
items = None
|
||||
break
|
||||
items.append((key, self.repr(value)))
|
||||
if items is None:
|
||||
items = []
|
||||
repr = self.repr(obj)
|
||||
for key in dir(obj):
|
||||
try:
|
||||
items.append((key, self.repr(getattr(obj, key))))
|
||||
except Exception:
|
||||
pass
|
||||
title = "Details for"
|
||||
title += f" {object.__repr__(obj)[1:-1]}"
|
||||
return self.render_object_dump(items, title, repr)
|
||||
|
||||
def dump_locals(self, d: dict[str, t.Any]) -> str:
|
||||
items = [(key, self.repr(value)) for key, value in d.items()]
|
||||
return self.render_object_dump(items, "Local variables in frame")
|
||||
|
||||
def render_object_dump(
|
||||
self, items: list[tuple[str, str]], title: str, repr: str | None = None
|
||||
) -> str:
|
||||
html_items = []
|
||||
for key, value in items:
|
||||
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
|
||||
if not html_items:
|
||||
html_items.append("<tr><td><em>Nothing</em>")
|
||||
return OBJECT_DUMP_HTML % {
|
||||
"title": escape(title),
|
||||
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
|
||||
"items": "\n".join(html_items),
|
||||
}
|
||||
@ -0,0 +1,6 @@
|
||||
Silk icon set 1.3 by Mark James <mjames@gmail.com>
|
||||
|
||||
http://www.famfamfam.com/lab/icons/silk/
|
||||
|
||||
License: [CC-BY-2.5](https://creativecommons.org/licenses/by/2.5/)
|
||||
or [CC-BY-3.0](https://creativecommons.org/licenses/by/3.0/)
|
||||
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/console.png
Normal file
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/console.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 507 B |
344
lib/python3.11/site-packages/werkzeug/debug/shared/debugger.js
Normal file
344
lib/python3.11/site-packages/werkzeug/debug/shared/debugger.js
Normal file
@ -0,0 +1,344 @@
|
||||
docReady(() => {
|
||||
if (!EVALEX_TRUSTED) {
|
||||
initPinBox();
|
||||
}
|
||||
// if we are in console mode, show the console.
|
||||
if (CONSOLE_MODE && EVALEX) {
|
||||
createInteractiveConsole();
|
||||
}
|
||||
|
||||
const frames = document.querySelectorAll("div.traceback div.frame");
|
||||
if (EVALEX) {
|
||||
addConsoleIconToFrames(frames);
|
||||
}
|
||||
addEventListenersToElements(document.querySelectorAll("div.detail"), "click", () =>
|
||||
document.querySelector("div.traceback").scrollIntoView(false)
|
||||
);
|
||||
addToggleFrameTraceback(frames);
|
||||
addToggleTraceTypesOnClick(document.querySelectorAll("h2.traceback"));
|
||||
addInfoPrompt(document.querySelectorAll("span.nojavascript"));
|
||||
wrapPlainTraceback();
|
||||
});
|
||||
|
||||
function addToggleFrameTraceback(frames) {
|
||||
frames.forEach((frame) => {
|
||||
frame.addEventListener("click", () => {
|
||||
frame.getElementsByTagName("pre")[0].parentElement.classList.toggle("expanded");
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
function wrapPlainTraceback() {
|
||||
const plainTraceback = document.querySelector("div.plain textarea");
|
||||
const wrapper = document.createElement("pre");
|
||||
const textNode = document.createTextNode(plainTraceback.textContent);
|
||||
wrapper.appendChild(textNode);
|
||||
plainTraceback.replaceWith(wrapper);
|
||||
}
|
||||
|
||||
function makeDebugURL(args) {
|
||||
const params = new URLSearchParams(args)
|
||||
params.set("s", SECRET)
|
||||
return `?__debugger__=yes&${params}`
|
||||
}
|
||||
|
||||
function initPinBox() {
|
||||
document.querySelector(".pin-prompt form").addEventListener(
|
||||
"submit",
|
||||
function (event) {
|
||||
event.preventDefault();
|
||||
const btn = this.btn;
|
||||
btn.disabled = true;
|
||||
|
||||
fetch(
|
||||
makeDebugURL({cmd: "pinauth", pin: this.pin.value})
|
||||
)
|
||||
.then((res) => res.json())
|
||||
.then(({auth, exhausted}) => {
|
||||
if (auth) {
|
||||
EVALEX_TRUSTED = true;
|
||||
fadeOut(document.getElementsByClassName("pin-prompt")[0]);
|
||||
} else {
|
||||
alert(
|
||||
`Error: ${
|
||||
exhausted
|
||||
? "too many attempts. Restart server to retry."
|
||||
: "incorrect pin"
|
||||
}`
|
||||
);
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
alert("Error: Could not verify PIN. Network error?");
|
||||
console.error(err);
|
||||
})
|
||||
.finally(() => (btn.disabled = false));
|
||||
},
|
||||
false
|
||||
);
|
||||
}
|
||||
|
||||
function promptForPin() {
|
||||
if (!EVALEX_TRUSTED) {
|
||||
fetch(makeDebugURL({cmd: "printpin"}));
|
||||
const pinPrompt = document.getElementsByClassName("pin-prompt")[0];
|
||||
fadeIn(pinPrompt);
|
||||
document.querySelector('.pin-prompt input[name="pin"]').focus();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function for shell initialization
|
||||
*/
|
||||
function openShell(consoleNode, target, frameID) {
|
||||
promptForPin();
|
||||
if (consoleNode) {
|
||||
slideToggle(consoleNode);
|
||||
return consoleNode;
|
||||
}
|
||||
let historyPos = 0;
|
||||
const history = [""];
|
||||
const consoleElement = createConsole();
|
||||
const output = createConsoleOutput();
|
||||
const form = createConsoleInputForm();
|
||||
const command = createConsoleInput();
|
||||
|
||||
target.parentNode.appendChild(consoleElement);
|
||||
consoleElement.append(output);
|
||||
consoleElement.append(form);
|
||||
form.append(command);
|
||||
command.focus();
|
||||
slideToggle(consoleElement);
|
||||
|
||||
form.addEventListener("submit", (e) => {
|
||||
handleConsoleSubmit(e, command, frameID).then((consoleOutput) => {
|
||||
output.append(consoleOutput);
|
||||
command.focus();
|
||||
consoleElement.scrollTo(0, consoleElement.scrollHeight);
|
||||
const old = history.pop();
|
||||
history.push(command.value);
|
||||
if (typeof old !== "undefined") {
|
||||
history.push(old);
|
||||
}
|
||||
historyPos = history.length - 1;
|
||||
command.value = "";
|
||||
});
|
||||
});
|
||||
|
||||
command.addEventListener("keydown", (e) => {
|
||||
if (e.key === "l" && e.ctrlKey) {
|
||||
output.innerText = "--- screen cleared ---";
|
||||
} else if (e.key === "ArrowUp" || e.key === "ArrowDown") {
|
||||
// Handle up arrow and down arrow.
|
||||
if (e.key === "ArrowUp" && historyPos > 0) {
|
||||
e.preventDefault();
|
||||
historyPos--;
|
||||
} else if (e.key === "ArrowDown" && historyPos < history.length - 1) {
|
||||
historyPos++;
|
||||
}
|
||||
command.value = history[historyPos];
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
return consoleElement;
|
||||
}
|
||||
|
||||
function addEventListenersToElements(elements, event, listener) {
|
||||
elements.forEach((el) => el.addEventListener(event, listener));
|
||||
}
|
||||
|
||||
/**
|
||||
* Add extra info
|
||||
*/
|
||||
function addInfoPrompt(elements) {
|
||||
for (let i = 0; i < elements.length; i++) {
|
||||
elements[i].innerHTML =
|
||||
"<p>To switch between the interactive traceback and the plaintext " +
|
||||
'one, you can click on the "Traceback" headline. From the text ' +
|
||||
"traceback you can also create a paste of it. " +
|
||||
(!EVALEX
|
||||
? ""
|
||||
: "For code execution mouse-over the frame you want to debug and " +
|
||||
"click on the console icon on the right side." +
|
||||
"<p>You can execute arbitrary Python code in the stack frames and " +
|
||||
"there are some extra helpers available for introspection:" +
|
||||
"<ul><li><code>dump()</code> shows all variables in the frame" +
|
||||
"<li><code>dump(obj)</code> dumps all that's known about the object</ul>");
|
||||
elements[i].classList.remove("nojavascript");
|
||||
}
|
||||
}
|
||||
|
||||
function addConsoleIconToFrames(frames) {
|
||||
for (let i = 0; i < frames.length; i++) {
|
||||
let consoleNode = null;
|
||||
const target = frames[i];
|
||||
const frameID = frames[i].id.substring(6);
|
||||
|
||||
for (let j = 0; j < target.getElementsByTagName("pre").length; j++) {
|
||||
const img = createIconForConsole();
|
||||
img.addEventListener("click", (e) => {
|
||||
e.stopPropagation();
|
||||
consoleNode = openShell(consoleNode, target, frameID);
|
||||
return false;
|
||||
});
|
||||
target.getElementsByTagName("pre")[j].append(img);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function slideToggle(target) {
|
||||
target.classList.toggle("active");
|
||||
}
|
||||
|
||||
/**
|
||||
* toggle traceback types on click.
|
||||
*/
|
||||
function addToggleTraceTypesOnClick(elements) {
|
||||
for (let i = 0; i < elements.length; i++) {
|
||||
elements[i].addEventListener("click", () => {
|
||||
document.querySelector("div.traceback").classList.toggle("hidden");
|
||||
document.querySelector("div.plain").classList.toggle("hidden");
|
||||
});
|
||||
elements[i].style.cursor = "pointer";
|
||||
document.querySelector("div.plain").classList.toggle("hidden");
|
||||
}
|
||||
}
|
||||
|
||||
function createConsole() {
|
||||
const consoleNode = document.createElement("pre");
|
||||
consoleNode.classList.add("console");
|
||||
consoleNode.classList.add("active");
|
||||
return consoleNode;
|
||||
}
|
||||
|
||||
function createConsoleOutput() {
|
||||
const output = document.createElement("div");
|
||||
output.classList.add("output");
|
||||
output.innerHTML = "[console ready]";
|
||||
return output;
|
||||
}
|
||||
|
||||
function createConsoleInputForm() {
|
||||
const form = document.createElement("form");
|
||||
form.innerHTML = ">>> ";
|
||||
return form;
|
||||
}
|
||||
|
||||
function createConsoleInput() {
|
||||
const command = document.createElement("input");
|
||||
command.type = "text";
|
||||
command.setAttribute("autocomplete", "off");
|
||||
command.setAttribute("spellcheck", false);
|
||||
command.setAttribute("autocapitalize", "off");
|
||||
command.setAttribute("autocorrect", "off");
|
||||
return command;
|
||||
}
|
||||
|
||||
function createIconForConsole() {
|
||||
const img = document.createElement("img");
|
||||
img.setAttribute("src", makeDebugURL({cmd: "resource", f: "console.png"}));
|
||||
img.setAttribute("title", "Open an interactive python shell in this frame");
|
||||
return img;
|
||||
}
|
||||
|
||||
function createExpansionButtonForConsole() {
|
||||
const expansionButton = document.createElement("a");
|
||||
expansionButton.setAttribute("href", "#");
|
||||
expansionButton.setAttribute("class", "toggle");
|
||||
expansionButton.innerHTML = " ";
|
||||
return expansionButton;
|
||||
}
|
||||
|
||||
function createInteractiveConsole() {
|
||||
const target = document.querySelector("div.console div.inner");
|
||||
while (target.firstChild) {
|
||||
target.removeChild(target.firstChild);
|
||||
}
|
||||
openShell(null, target, 0);
|
||||
}
|
||||
|
||||
function handleConsoleSubmit(e, command, frameID) {
|
||||
// Prevent page from refreshing.
|
||||
e.preventDefault();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
fetch(makeDebugURL({cmd: command.value, frm: frameID}))
|
||||
.then((res) => {
|
||||
return res.text();
|
||||
})
|
||||
.then((data) => {
|
||||
const tmp = document.createElement("div");
|
||||
tmp.innerHTML = data;
|
||||
resolve(tmp);
|
||||
|
||||
// Handle expandable span for long list outputs.
|
||||
// Example to test: list(range(13))
|
||||
let wrapperAdded = false;
|
||||
const wrapperSpan = document.createElement("span");
|
||||
const expansionButton = createExpansionButtonForConsole();
|
||||
|
||||
tmp.querySelectorAll("span.extended").forEach((spanToWrap) => {
|
||||
const parentDiv = spanToWrap.parentNode;
|
||||
if (!wrapperAdded) {
|
||||
parentDiv.insertBefore(wrapperSpan, spanToWrap);
|
||||
wrapperAdded = true;
|
||||
}
|
||||
parentDiv.removeChild(spanToWrap);
|
||||
wrapperSpan.append(spanToWrap);
|
||||
spanToWrap.hidden = true;
|
||||
|
||||
expansionButton.addEventListener("click", (event) => {
|
||||
event.preventDefault();
|
||||
spanToWrap.hidden = !spanToWrap.hidden;
|
||||
expansionButton.classList.toggle("open");
|
||||
return false;
|
||||
});
|
||||
});
|
||||
|
||||
// Add expansion button at end of wrapper.
|
||||
if (wrapperAdded) {
|
||||
wrapperSpan.append(expansionButton);
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error(err);
|
||||
});
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
function fadeOut(element) {
|
||||
element.style.opacity = 1;
|
||||
|
||||
(function fade() {
|
||||
element.style.opacity -= 0.1;
|
||||
if (element.style.opacity < 0) {
|
||||
element.style.display = "none";
|
||||
} else {
|
||||
requestAnimationFrame(fade);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
function fadeIn(element, display) {
|
||||
element.style.opacity = 0;
|
||||
element.style.display = display || "block";
|
||||
|
||||
(function fade() {
|
||||
let val = parseFloat(element.style.opacity) + 0.1;
|
||||
if (val <= 1) {
|
||||
element.style.opacity = val;
|
||||
requestAnimationFrame(fade);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
function docReady(fn) {
|
||||
if (document.readyState === "complete" || document.readyState === "interactive") {
|
||||
setTimeout(fn, 1);
|
||||
} else {
|
||||
document.addEventListener("DOMContentLoaded", fn);
|
||||
}
|
||||
}
|
||||
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/less.png
Normal file
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/less.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 191 B |
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/more.png
Normal file
BIN
lib/python3.11/site-packages/werkzeug/debug/shared/more.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 200 B |
150
lib/python3.11/site-packages/werkzeug/debug/shared/style.css
Normal file
150
lib/python3.11/site-packages/werkzeug/debug/shared/style.css
Normal file
@ -0,0 +1,150 @@
|
||||
body, input { font-family: sans-serif; color: #000; text-align: center;
|
||||
margin: 1em; padding: 0; font-size: 15px; }
|
||||
h1, h2, h3 { font-weight: normal; }
|
||||
|
||||
input { background-color: #fff; margin: 0; text-align: left;
|
||||
outline: none !important; }
|
||||
input[type="submit"] { padding: 3px 6px; }
|
||||
a { color: #11557C; }
|
||||
a:hover { color: #177199; }
|
||||
pre, code,
|
||||
textarea { font-family: monospace; font-size: 14px; }
|
||||
|
||||
div.debugger { text-align: left; padding: 12px; margin: auto;
|
||||
background-color: white; }
|
||||
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
|
||||
div.detail { cursor: pointer; }
|
||||
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
|
||||
font-family: monospace; }
|
||||
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
|
||||
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
|
||||
color: #86989B; }
|
||||
|
||||
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
|
||||
background-color: #11557C; color: white; }
|
||||
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
|
||||
|
||||
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
|
||||
div.plain p { margin: 0; }
|
||||
div.plain textarea,
|
||||
div.plain pre { margin: 10px 0 0 0; padding: 4px;
|
||||
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
|
||||
div.plain textarea { width: 99%; height: 300px; }
|
||||
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
|
||||
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
|
||||
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
|
||||
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
|
||||
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
|
||||
div.traceback .library .current { background: white; color: #555; }
|
||||
div.traceback .expanded .current { background: #E8EFF0; color: black; }
|
||||
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
|
||||
div.traceback div.source.expanded pre + pre { border-top: none; }
|
||||
|
||||
div.traceback span.ws { display: none; }
|
||||
div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
|
||||
div.traceback div.source.expanded pre.before,
|
||||
div.traceback div.source.expanded pre.after {
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.traceback div.source.expanded span.ws {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
|
||||
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
|
||||
div.traceback img:hover { background-color: #ddd; cursor: pointer;
|
||||
border-color: #BFDDE0; }
|
||||
div.traceback pre:hover img { display: block; }
|
||||
div.traceback cite.filename { font-style: normal; color: #3B666B; }
|
||||
|
||||
pre.console { border: 1px solid #ccc; background: white!important;
|
||||
color: black; padding: 5px!important;
|
||||
margin: 3px 0 0 0!important; cursor: default!important;
|
||||
max-height: 400px; overflow: auto; }
|
||||
pre.console form { color: #555; }
|
||||
pre.console input { background-color: transparent; color: #555;
|
||||
width: 90%; font-family: monospace; font-size: 14px;
|
||||
border: none!important; }
|
||||
|
||||
span.string { color: #30799B; }
|
||||
span.number { color: #9C1A1C; }
|
||||
span.help { color: #3A7734; }
|
||||
span.object { color: #485F6E; }
|
||||
span.extended { opacity: 0.5; }
|
||||
span.extended:hover { opacity: 1; }
|
||||
a.toggle { text-decoration: none; background-repeat: no-repeat;
|
||||
background-position: center center;
|
||||
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
|
||||
a.toggle:hover { background-color: #444; }
|
||||
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
|
||||
|
||||
pre.console div.traceback,
|
||||
pre.console div.box { margin: 5px 10px; white-space: normal;
|
||||
border: 1px solid #11557C; padding: 10px;
|
||||
font-family: sans-serif; }
|
||||
pre.console div.box h3,
|
||||
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
|
||||
background: #11557C; color: white; }
|
||||
|
||||
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
|
||||
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
|
||||
margin: 20px -10px -10px -10px;
|
||||
padding: 10px; border-top: 1px solid #BFDDE0;
|
||||
background: #E8EFF0; }
|
||||
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
|
||||
|
||||
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
|
||||
pre.console div.box table { margin-top: 6px; }
|
||||
pre.console div.box pre { border: none; }
|
||||
pre.console div.box pre.help { background-color: white; }
|
||||
pre.console div.box pre.help:hover { cursor: default; }
|
||||
pre.console table tr { vertical-align: top; }
|
||||
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
|
||||
|
||||
div.traceback pre, div.console pre {
|
||||
white-space: pre-wrap; /* css-3 should we be so lucky... */
|
||||
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
||||
white-space: -pre-wrap; /* Opera 4-6 ?? */
|
||||
white-space: -o-pre-wrap; /* Opera 7 ?? */
|
||||
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
||||
_white-space: pre; /* IE only hack to re-specify in
|
||||
addition to word-wrap */
|
||||
}
|
||||
|
||||
|
||||
div.pin-prompt {
|
||||
position: absolute;
|
||||
display: none;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
background: rgba(255, 255, 255, 0.8);
|
||||
}
|
||||
|
||||
div.pin-prompt .inner {
|
||||
background: #eee;
|
||||
padding: 10px 50px;
|
||||
width: 350px;
|
||||
margin: 10% auto 0 auto;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
div.exc-divider {
|
||||
margin: 0.7em 0 0 -1em;
|
||||
padding: 0.5em;
|
||||
background: #11557C;
|
||||
color: #ddd;
|
||||
border: 1px solid #ddd;
|
||||
}
|
||||
|
||||
.console.active {
|
||||
max-height: 0!important;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
450
lib/python3.11/site-packages/werkzeug/debug/tbtools.py
Normal file
450
lib/python3.11/site-packages/werkzeug/debug/tbtools.py
Normal file
@ -0,0 +1,450 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import linecache
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import sysconfig
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from markupsafe import escape
|
||||
|
||||
from ..utils import cached_property
|
||||
from .console import Console
|
||||
|
||||
HEADER = """\
|
||||
<!doctype html>
|
||||
<html lang=en>
|
||||
<head>
|
||||
<title>%(title)s // Werkzeug Debugger</title>
|
||||
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css">
|
||||
<link rel="shortcut icon"
|
||||
href="?__debugger__=yes&cmd=resource&f=console.png">
|
||||
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
|
||||
<script>
|
||||
var CONSOLE_MODE = %(console)s,
|
||||
EVALEX = %(evalex)s,
|
||||
EVALEX_TRUSTED = %(evalex_trusted)s,
|
||||
SECRET = "%(secret)s";
|
||||
</script>
|
||||
</head>
|
||||
<body style="background-color: #fff">
|
||||
<div class="debugger">
|
||||
"""
|
||||
|
||||
FOOTER = """\
|
||||
<div class="footer">
|
||||
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
|
||||
friendly Werkzeug powered traceback interpreter.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="pin-prompt">
|
||||
<div class="inner">
|
||||
<h3>Console Locked</h3>
|
||||
<p>
|
||||
The console is locked and needs to be unlocked by entering the PIN.
|
||||
You can find the PIN printed out on the standard output of your
|
||||
shell that runs the server.
|
||||
<form>
|
||||
<p>PIN:
|
||||
<input type=text name=pin size=14>
|
||||
<input type=submit name=btn value="Confirm Pin">
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
PAGE_HTML = (
|
||||
HEADER
|
||||
+ """\
|
||||
<h1>%(exception_type)s</h1>
|
||||
<div class="detail">
|
||||
<p class="errormsg">%(exception)s</p>
|
||||
</div>
|
||||
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
|
||||
%(summary)s
|
||||
<div class="plain">
|
||||
<p>
|
||||
This is the Copy/Paste friendly version of the traceback.
|
||||
</p>
|
||||
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
|
||||
</div>
|
||||
<div class="explanation">
|
||||
The debugger caught an exception in your WSGI application. You can now
|
||||
look at the traceback which led to the error. <span class="nojavascript">
|
||||
If you enable JavaScript you can also use additional features such as code
|
||||
execution (if the evalex feature is enabled), automatic pasting of the
|
||||
exceptions and much more.</span>
|
||||
</div>
|
||||
"""
|
||||
+ FOOTER
|
||||
+ """
|
||||
<!--
|
||||
|
||||
%(plaintext_cs)s
|
||||
|
||||
-->
|
||||
"""
|
||||
)
|
||||
|
||||
CONSOLE_HTML = (
|
||||
HEADER
|
||||
+ """\
|
||||
<h1>Interactive Console</h1>
|
||||
<div class="explanation">
|
||||
In this console you can execute Python expressions in the context of the
|
||||
application. The initial namespace was created by the debugger automatically.
|
||||
</div>
|
||||
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
|
||||
"""
|
||||
+ FOOTER
|
||||
)
|
||||
|
||||
SUMMARY_HTML = """\
|
||||
<div class="%(classes)s">
|
||||
%(title)s
|
||||
<ul>%(frames)s</ul>
|
||||
%(description)s
|
||||
</div>
|
||||
"""
|
||||
|
||||
FRAME_HTML = """\
|
||||
<div class="frame" id="frame-%(id)d">
|
||||
<h4>File <cite class="filename">"%(filename)s"</cite>,
|
||||
line <em class="line">%(lineno)s</em>,
|
||||
in <code class="function">%(function_name)s</code></h4>
|
||||
<div class="source %(library)s">%(lines)s</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
def _process_traceback(
|
||||
exc: BaseException,
|
||||
te: traceback.TracebackException | None = None,
|
||||
*,
|
||||
skip: int = 0,
|
||||
hide: bool = True,
|
||||
) -> traceback.TracebackException:
|
||||
if te is None:
|
||||
te = traceback.TracebackException.from_exception(exc, lookup_lines=False)
|
||||
|
||||
# Get the frames the same way StackSummary.extract did, in order
|
||||
# to match each frame with the FrameSummary to augment.
|
||||
frame_gen = traceback.walk_tb(exc.__traceback__)
|
||||
limit = getattr(sys, "tracebacklimit", None)
|
||||
|
||||
if limit is not None:
|
||||
if limit < 0:
|
||||
limit = 0
|
||||
|
||||
frame_gen = itertools.islice(frame_gen, limit)
|
||||
|
||||
if skip:
|
||||
frame_gen = itertools.islice(frame_gen, skip, None)
|
||||
del te.stack[:skip]
|
||||
|
||||
new_stack: list[DebugFrameSummary] = []
|
||||
hidden = False
|
||||
|
||||
# Match each frame with the FrameSummary that was generated.
|
||||
# Hide frames using Paste's __traceback_hide__ rules. Replace
|
||||
# all visible FrameSummary with DebugFrameSummary.
|
||||
for (f, _), fs in zip(frame_gen, te.stack):
|
||||
if hide:
|
||||
hide_value = f.f_locals.get("__traceback_hide__", False)
|
||||
|
||||
if hide_value in {"before", "before_and_this"}:
|
||||
new_stack = []
|
||||
hidden = False
|
||||
|
||||
if hide_value == "before_and_this":
|
||||
continue
|
||||
elif hide_value in {"reset", "reset_and_this"}:
|
||||
hidden = False
|
||||
|
||||
if hide_value == "reset_and_this":
|
||||
continue
|
||||
elif hide_value in {"after", "after_and_this"}:
|
||||
hidden = True
|
||||
|
||||
if hide_value == "after_and_this":
|
||||
continue
|
||||
elif hide_value or hidden:
|
||||
continue
|
||||
|
||||
frame_args: dict[str, t.Any] = {
|
||||
"filename": fs.filename,
|
||||
"lineno": fs.lineno,
|
||||
"name": fs.name,
|
||||
"locals": f.f_locals,
|
||||
"globals": f.f_globals,
|
||||
}
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
frame_args["colno"] = fs.colno
|
||||
frame_args["end_colno"] = fs.end_colno
|
||||
|
||||
new_stack.append(DebugFrameSummary(**frame_args))
|
||||
|
||||
# The codeop module is used to compile code from the interactive
|
||||
# debugger. Hide any codeop frames from the bottom of the traceback.
|
||||
while new_stack:
|
||||
module = new_stack[0].global_ns.get("__name__")
|
||||
|
||||
if module is None:
|
||||
module = new_stack[0].local_ns.get("__name__")
|
||||
|
||||
if module == "codeop":
|
||||
del new_stack[0]
|
||||
else:
|
||||
break
|
||||
|
||||
te.stack[:] = new_stack
|
||||
|
||||
if te.__context__:
|
||||
context_exc = t.cast(BaseException, exc.__context__)
|
||||
te.__context__ = _process_traceback(context_exc, te.__context__, hide=hide)
|
||||
|
||||
if te.__cause__:
|
||||
cause_exc = t.cast(BaseException, exc.__cause__)
|
||||
te.__cause__ = _process_traceback(cause_exc, te.__cause__, hide=hide)
|
||||
|
||||
return te
|
||||
|
||||
|
||||
class DebugTraceback:
|
||||
__slots__ = ("_te", "_cache_all_tracebacks", "_cache_all_frames")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
exc: BaseException,
|
||||
te: traceback.TracebackException | None = None,
|
||||
*,
|
||||
skip: int = 0,
|
||||
hide: bool = True,
|
||||
) -> None:
|
||||
self._te = _process_traceback(exc, te, skip=skip, hide=hide)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"<{type(self).__name__} {self._te}>"
|
||||
|
||||
@cached_property
|
||||
def all_tracebacks(
|
||||
self,
|
||||
) -> list[tuple[str | None, traceback.TracebackException]]:
|
||||
out = []
|
||||
current = self._te
|
||||
|
||||
while current is not None:
|
||||
if current.__cause__ is not None:
|
||||
chained_msg = (
|
||||
"The above exception was the direct cause of the"
|
||||
" following exception"
|
||||
)
|
||||
chained_exc = current.__cause__
|
||||
elif current.__context__ is not None and not current.__suppress_context__:
|
||||
chained_msg = (
|
||||
"During handling of the above exception, another"
|
||||
" exception occurred"
|
||||
)
|
||||
chained_exc = current.__context__
|
||||
else:
|
||||
chained_msg = None
|
||||
chained_exc = None
|
||||
|
||||
out.append((chained_msg, current))
|
||||
current = chained_exc
|
||||
|
||||
return out
|
||||
|
||||
@cached_property
|
||||
def all_frames(self) -> list[DebugFrameSummary]:
|
||||
return [
|
||||
f # type: ignore[misc]
|
||||
for _, te in self.all_tracebacks
|
||||
for f in te.stack
|
||||
]
|
||||
|
||||
def render_traceback_text(self) -> str:
|
||||
return "".join(self._te.format())
|
||||
|
||||
def render_traceback_html(self, include_title: bool = True) -> str:
|
||||
library_frames = [f.is_library for f in self.all_frames]
|
||||
mark_library = 0 < sum(library_frames) < len(library_frames)
|
||||
rows = []
|
||||
|
||||
if not library_frames:
|
||||
classes = "traceback noframe-traceback"
|
||||
else:
|
||||
classes = "traceback"
|
||||
|
||||
for msg, current in reversed(self.all_tracebacks):
|
||||
row_parts = []
|
||||
|
||||
if msg is not None:
|
||||
row_parts.append(f'<li><div class="exc-divider">{msg}:</div>')
|
||||
|
||||
for frame in current.stack:
|
||||
frame = t.cast(DebugFrameSummary, frame)
|
||||
info = f' title="{escape(frame.info)}"' if frame.info else ""
|
||||
row_parts.append(f"<li{info}>{frame.render_html(mark_library)}")
|
||||
|
||||
rows.append("\n".join(row_parts))
|
||||
|
||||
if sys.version_info < (3, 13):
|
||||
exc_type_str = self._te.exc_type.__name__
|
||||
else:
|
||||
exc_type_str = self._te.exc_type_str
|
||||
|
||||
is_syntax_error = exc_type_str == "SyntaxError"
|
||||
|
||||
if include_title:
|
||||
if is_syntax_error:
|
||||
title = "Syntax Error"
|
||||
else:
|
||||
title = "Traceback <em>(most recent call last)</em>:"
|
||||
else:
|
||||
title = ""
|
||||
|
||||
exc_full = escape("".join(self._te.format_exception_only()))
|
||||
|
||||
if is_syntax_error:
|
||||
description = f"<pre class=syntaxerror>{exc_full}</pre>"
|
||||
else:
|
||||
description = f"<blockquote>{exc_full}</blockquote>"
|
||||
|
||||
return SUMMARY_HTML % {
|
||||
"classes": classes,
|
||||
"title": f"<h3>{title}</h3>",
|
||||
"frames": "\n".join(rows),
|
||||
"description": description,
|
||||
}
|
||||
|
||||
def render_debugger_html(
|
||||
self, evalex: bool, secret: str, evalex_trusted: bool
|
||||
) -> str:
|
||||
exc_lines = list(self._te.format_exception_only())
|
||||
plaintext = "".join(self._te.format())
|
||||
|
||||
if sys.version_info < (3, 13):
|
||||
exc_type_str = self._te.exc_type.__name__
|
||||
else:
|
||||
exc_type_str = self._te.exc_type_str
|
||||
|
||||
return PAGE_HTML % {
|
||||
"evalex": "true" if evalex else "false",
|
||||
"evalex_trusted": "true" if evalex_trusted else "false",
|
||||
"console": "false",
|
||||
"title": escape(exc_lines[0]),
|
||||
"exception": escape("".join(exc_lines)),
|
||||
"exception_type": escape(exc_type_str),
|
||||
"summary": self.render_traceback_html(include_title=False),
|
||||
"plaintext": escape(plaintext),
|
||||
"plaintext_cs": re.sub("-{2,}", "-", plaintext),
|
||||
"secret": secret,
|
||||
}
|
||||
|
||||
|
||||
class DebugFrameSummary(traceback.FrameSummary):
|
||||
"""A :class:`traceback.FrameSummary` that can evaluate code in the
|
||||
frame's namespace.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"local_ns",
|
||||
"global_ns",
|
||||
"_cache_info",
|
||||
"_cache_is_library",
|
||||
"_cache_console",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
locals: dict[str, t.Any],
|
||||
globals: dict[str, t.Any],
|
||||
**kwargs: t.Any,
|
||||
) -> None:
|
||||
super().__init__(locals=None, **kwargs)
|
||||
self.local_ns = locals
|
||||
self.global_ns = globals
|
||||
|
||||
@cached_property
|
||||
def info(self) -> str | None:
|
||||
return self.local_ns.get("__traceback_info__")
|
||||
|
||||
@cached_property
|
||||
def is_library(self) -> bool:
|
||||
return any(
|
||||
self.filename.startswith((path, os.path.realpath(path)))
|
||||
for path in sysconfig.get_paths().values()
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def console(self) -> Console:
|
||||
return Console(self.global_ns, self.local_ns)
|
||||
|
||||
def eval(self, code: str) -> t.Any:
|
||||
return self.console.eval(code)
|
||||
|
||||
def render_html(self, mark_library: bool) -> str:
|
||||
context = 5
|
||||
lines = linecache.getlines(self.filename)
|
||||
line_idx = self.lineno - 1 # type: ignore[operator]
|
||||
start_idx = max(0, line_idx - context)
|
||||
stop_idx = min(len(lines), line_idx + context + 1)
|
||||
rendered_lines = []
|
||||
|
||||
def render_line(line: str, cls: str) -> None:
|
||||
line = line.expandtabs().rstrip()
|
||||
stripped_line = line.strip()
|
||||
prefix = len(line) - len(stripped_line)
|
||||
colno = getattr(self, "colno", 0)
|
||||
end_colno = getattr(self, "end_colno", 0)
|
||||
|
||||
if cls == "current" and colno and end_colno:
|
||||
arrow = (
|
||||
f'\n<span class="ws">{" " * prefix}</span>'
|
||||
f'{" " * (colno - prefix)}{"^" * (end_colno - colno)}'
|
||||
)
|
||||
else:
|
||||
arrow = ""
|
||||
|
||||
rendered_lines.append(
|
||||
f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
|
||||
f"{escape(stripped_line) if stripped_line else ' '}"
|
||||
f"{arrow if arrow else ''}</pre>"
|
||||
)
|
||||
|
||||
if lines:
|
||||
for line in lines[start_idx:line_idx]:
|
||||
render_line(line, "before")
|
||||
|
||||
render_line(lines[line_idx], "current")
|
||||
|
||||
for line in lines[line_idx + 1 : stop_idx]:
|
||||
render_line(line, "after")
|
||||
|
||||
return FRAME_HTML % {
|
||||
"id": id(self),
|
||||
"filename": escape(self.filename),
|
||||
"lineno": self.lineno,
|
||||
"function_name": escape(self.name),
|
||||
"lines": "\n".join(rendered_lines),
|
||||
"library": "library" if mark_library and self.is_library else "",
|
||||
}
|
||||
|
||||
|
||||
def render_console_html(secret: str, evalex_trusted: bool) -> str:
|
||||
return CONSOLE_HTML % {
|
||||
"evalex": "true",
|
||||
"evalex_trusted": "true" if evalex_trusted else "false",
|
||||
"console": "true",
|
||||
"title": "Console",
|
||||
"secret": secret,
|
||||
}
|
||||
894
lib/python3.11/site-packages/werkzeug/exceptions.py
Normal file
894
lib/python3.11/site-packages/werkzeug/exceptions.py
Normal file
@ -0,0 +1,894 @@
|
||||
"""Implements a number of Python exceptions which can be raised from within
|
||||
a view to trigger a standard HTTP non-200 response.
|
||||
|
||||
Usage Example
|
||||
-------------
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.wrappers.request import Request
|
||||
from werkzeug.exceptions import HTTPException, NotFound
|
||||
|
||||
def view(request):
|
||||
raise NotFound()
|
||||
|
||||
@Request.application
|
||||
def application(request):
|
||||
try:
|
||||
return view(request)
|
||||
except HTTPException as e:
|
||||
return e
|
||||
|
||||
As you can see from this example those exceptions are callable WSGI
|
||||
applications. However, they are not Werkzeug response objects. You
|
||||
can get a response object by calling ``get_response()`` on a HTTP
|
||||
exception.
|
||||
|
||||
Keep in mind that you may have to pass an environ (WSGI) or scope
|
||||
(ASGI) to ``get_response()`` because some errors fetch additional
|
||||
information relating to the request.
|
||||
|
||||
If you want to hook in a different exception page to say, a 404 status
|
||||
code, you can add a second except for a specific subclass of an error:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@Request.application
|
||||
def application(request):
|
||||
try:
|
||||
return view(request)
|
||||
except NotFound as e:
|
||||
return not_found(request)
|
||||
except HTTPException as e:
|
||||
return e
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
|
||||
from markupsafe import escape
|
||||
from markupsafe import Markup
|
||||
|
||||
from ._internal import _get_environ
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from .datastructures import WWWAuthenticate
|
||||
from .sansio.response import Response
|
||||
from .wrappers.request import Request as WSGIRequest
|
||||
from .wrappers.response import Response as WSGIResponse
|
||||
|
||||
|
||||
class HTTPException(Exception):
|
||||
"""The base class for all HTTP exceptions. This exception can be called as a WSGI
|
||||
application to render a default error page or you can catch the subclasses
|
||||
of it independently and render nicer error messages.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Removed the ``wrap`` class method.
|
||||
"""
|
||||
|
||||
code: int | None = None
|
||||
description: str | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
if description is not None:
|
||||
self.description = description
|
||||
self.response = response
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""The status name."""
|
||||
from .http import HTTP_STATUS_CODES
|
||||
|
||||
return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore
|
||||
|
||||
def get_description(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> str:
|
||||
"""Get the description."""
|
||||
if self.description is None:
|
||||
description = ""
|
||||
else:
|
||||
description = self.description
|
||||
|
||||
description = escape(description).replace("\n", Markup("<br>"))
|
||||
return f"<p>{description}</p>"
|
||||
|
||||
def get_body(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> str:
|
||||
"""Get the HTML body."""
|
||||
return (
|
||||
"<!doctype html>\n"
|
||||
"<html lang=en>\n"
|
||||
f"<title>{self.code} {escape(self.name)}</title>\n"
|
||||
f"<h1>{escape(self.name)}</h1>\n"
|
||||
f"{self.get_description(environ)}\n"
|
||||
)
|
||||
|
||||
def get_headers(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Get a list of headers."""
|
||||
return [("Content-Type", "text/html; charset=utf-8")]
|
||||
|
||||
def get_response(
|
||||
self,
|
||||
environ: WSGIEnvironment | WSGIRequest | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> Response:
|
||||
"""Get a response object. If one was passed to the exception
|
||||
it's returned directly.
|
||||
|
||||
:param environ: the optional environ for the request. This
|
||||
can be used to modify the response depending
|
||||
on how the request looked like.
|
||||
:return: a :class:`Response` object or a subclass thereof.
|
||||
"""
|
||||
from .wrappers.response import Response as WSGIResponse # noqa: F811
|
||||
|
||||
if self.response is not None:
|
||||
return self.response
|
||||
if environ is not None:
|
||||
environ = _get_environ(environ)
|
||||
headers = self.get_headers(environ, scope)
|
||||
return WSGIResponse(self.get_body(environ, scope), self.code, headers)
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
"""Call the exception as WSGI application.
|
||||
|
||||
:param environ: the WSGI environment.
|
||||
:param start_response: the response callable provided by the WSGI
|
||||
server.
|
||||
"""
|
||||
response = t.cast("WSGIResponse", self.get_response(environ))
|
||||
return response(environ, start_response)
|
||||
|
||||
def __str__(self) -> str:
|
||||
code = self.code if self.code is not None else "???"
|
||||
return f"{code} {self.name}: {self.description}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
code = self.code if self.code is not None else "???"
|
||||
return f"<{type(self).__name__} '{code}: {self.name}'>"
|
||||
|
||||
|
||||
class BadRequest(HTTPException):
|
||||
"""*400* `Bad Request`
|
||||
|
||||
Raise if the browser sends something to the application the application
|
||||
or server cannot handle.
|
||||
"""
|
||||
|
||||
code = 400
|
||||
description = (
|
||||
"The browser (or proxy) sent a request that this server could "
|
||||
"not understand."
|
||||
)
|
||||
|
||||
|
||||
class BadRequestKeyError(BadRequest, KeyError):
|
||||
"""An exception that is used to signal both a :exc:`KeyError` and a
|
||||
:exc:`BadRequest`. Used by many of the datastructures.
|
||||
"""
|
||||
|
||||
_description = BadRequest.description
|
||||
#: Show the KeyError along with the HTTP error message in the
|
||||
#: response. This should be disabled in production, but can be
|
||||
#: useful in a debug mode.
|
||||
show_exception = False
|
||||
|
||||
def __init__(self, arg: object | None = None, *args: t.Any, **kwargs: t.Any):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if arg is None:
|
||||
KeyError.__init__(self)
|
||||
else:
|
||||
KeyError.__init__(self, arg)
|
||||
|
||||
@property # type: ignore
|
||||
def description(self) -> str:
|
||||
if self.show_exception:
|
||||
return (
|
||||
f"{self._description}\n"
|
||||
f"{KeyError.__name__}: {KeyError.__str__(self)}"
|
||||
)
|
||||
|
||||
return self._description
|
||||
|
||||
@description.setter
|
||||
def description(self, value: str) -> None:
|
||||
self._description = value
|
||||
|
||||
|
||||
class ClientDisconnected(BadRequest):
|
||||
"""Internal exception that is raised if Werkzeug detects a disconnected
|
||||
client. Since the client is already gone at that point attempting to
|
||||
send the error message to the client might not work and might ultimately
|
||||
result in another exception in the server. Mainly this is here so that
|
||||
it is silenced by default as far as Werkzeug is concerned.
|
||||
|
||||
Since disconnections cannot be reliably detected and are unspecified
|
||||
by WSGI to a large extent this might or might not be raised if a client
|
||||
is gone.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
|
||||
class SecurityError(BadRequest):
|
||||
"""Raised if something triggers a security error. This is otherwise
|
||||
exactly like a bad request error.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
|
||||
|
||||
class BadHost(BadRequest):
|
||||
"""Raised if the submitted host is badly formatted.
|
||||
|
||||
.. versionadded:: 0.11.2
|
||||
"""
|
||||
|
||||
|
||||
class Unauthorized(HTTPException):
|
||||
"""*401* ``Unauthorized``
|
||||
|
||||
Raise if the user is not authorized to access a resource.
|
||||
|
||||
The ``www_authenticate`` argument should be used to set the
|
||||
``WWW-Authenticate`` header. This is used for HTTP basic auth and
|
||||
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
|
||||
to create correctly formatted values. Strictly speaking a 401
|
||||
response is invalid if it doesn't provide at least one value for
|
||||
this header, although real clients typically don't care.
|
||||
|
||||
:param description: Override the default message used for the body
|
||||
of the response.
|
||||
:param www-authenticate: A single value, or list of values, for the
|
||||
WWW-Authenticate header(s).
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Serialize multiple ``www_authenticate`` items into multiple
|
||||
``WWW-Authenticate`` headers, rather than joining them
|
||||
into a single value, for better interoperability.
|
||||
|
||||
.. versionchanged:: 0.15.3
|
||||
If the ``www_authenticate`` argument is not set, the
|
||||
``WWW-Authenticate`` header is not set.
|
||||
|
||||
.. versionchanged:: 0.15.3
|
||||
The ``response`` argument was restored.
|
||||
|
||||
.. versionchanged:: 0.15.1
|
||||
``description`` was moved back as the first argument, restoring
|
||||
its previous position.
|
||||
|
||||
.. versionchanged:: 0.15.0
|
||||
``www_authenticate`` was added as the first argument, ahead of
|
||||
``description``.
|
||||
"""
|
||||
|
||||
code = 401
|
||||
description = (
|
||||
"The server could not verify that you are authorized to access"
|
||||
" the URL requested. You either supplied the wrong credentials"
|
||||
" (e.g. a bad password), or your browser doesn't understand"
|
||||
" how to supply the credentials required."
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
www_authenticate: None | (WWWAuthenticate | t.Iterable[WWWAuthenticate]) = None,
|
||||
) -> None:
|
||||
super().__init__(description, response)
|
||||
|
||||
from .datastructures import WWWAuthenticate
|
||||
|
||||
if isinstance(www_authenticate, WWWAuthenticate):
|
||||
www_authenticate = (www_authenticate,)
|
||||
|
||||
self.www_authenticate = www_authenticate
|
||||
|
||||
def get_headers(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
headers = super().get_headers(environ, scope)
|
||||
if self.www_authenticate:
|
||||
headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate)
|
||||
return headers
|
||||
|
||||
|
||||
class Forbidden(HTTPException):
|
||||
"""*403* `Forbidden`
|
||||
|
||||
Raise if the user doesn't have the permission for the requested resource
|
||||
but was authenticated.
|
||||
"""
|
||||
|
||||
code = 403
|
||||
description = (
|
||||
"You don't have the permission to access the requested"
|
||||
" resource. It is either read-protected or not readable by the"
|
||||
" server."
|
||||
)
|
||||
|
||||
|
||||
class NotFound(HTTPException):
|
||||
"""*404* `Not Found`
|
||||
|
||||
Raise if a resource does not exist and never existed.
|
||||
"""
|
||||
|
||||
code = 404
|
||||
description = (
|
||||
"The requested URL was not found on the server. If you entered"
|
||||
" the URL manually please check your spelling and try again."
|
||||
)
|
||||
|
||||
|
||||
class MethodNotAllowed(HTTPException):
|
||||
"""*405* `Method Not Allowed`
|
||||
|
||||
Raise if the server used a method the resource does not handle. For
|
||||
example `POST` if the resource is view only. Especially useful for REST.
|
||||
|
||||
The first argument for this exception should be a list of allowed methods.
|
||||
Strictly speaking the response would be invalid if you don't provide valid
|
||||
methods in the header which you can do with that list.
|
||||
"""
|
||||
|
||||
code = 405
|
||||
description = "The method is not allowed for the requested URL."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
valid_methods: t.Iterable[str] | None = None,
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
) -> None:
|
||||
"""Takes an optional list of valid http methods
|
||||
starting with werkzeug 0.3 the list will be mandatory."""
|
||||
super().__init__(description=description, response=response)
|
||||
self.valid_methods = valid_methods
|
||||
|
||||
def get_headers(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
headers = super().get_headers(environ, scope)
|
||||
if self.valid_methods:
|
||||
headers.append(("Allow", ", ".join(self.valid_methods)))
|
||||
return headers
|
||||
|
||||
|
||||
class NotAcceptable(HTTPException):
|
||||
"""*406* `Not Acceptable`
|
||||
|
||||
Raise if the server can't return any content conforming to the
|
||||
`Accept` headers of the client.
|
||||
"""
|
||||
|
||||
code = 406
|
||||
description = (
|
||||
"The resource identified by the request is only capable of"
|
||||
" generating response entities which have content"
|
||||
" characteristics not acceptable according to the accept"
|
||||
" headers sent in the request."
|
||||
)
|
||||
|
||||
|
||||
class RequestTimeout(HTTPException):
|
||||
"""*408* `Request Timeout`
|
||||
|
||||
Raise to signalize a timeout.
|
||||
"""
|
||||
|
||||
code = 408
|
||||
description = (
|
||||
"The server closed the network connection because the browser"
|
||||
" didn't finish the request within the specified time."
|
||||
)
|
||||
|
||||
|
||||
class Conflict(HTTPException):
|
||||
"""*409* `Conflict`
|
||||
|
||||
Raise to signal that a request cannot be completed because it conflicts
|
||||
with the current state on the server.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 409
|
||||
description = (
|
||||
"A conflict happened while processing the request. The"
|
||||
" resource might have been modified while the request was being"
|
||||
" processed."
|
||||
)
|
||||
|
||||
|
||||
class Gone(HTTPException):
|
||||
"""*410* `Gone`
|
||||
|
||||
Raise if a resource existed previously and went away without new location.
|
||||
"""
|
||||
|
||||
code = 410
|
||||
description = (
|
||||
"The requested URL is no longer available on this server and"
|
||||
" there is no forwarding address. If you followed a link from a"
|
||||
" foreign page, please contact the author of this page."
|
||||
)
|
||||
|
||||
|
||||
class LengthRequired(HTTPException):
|
||||
"""*411* `Length Required`
|
||||
|
||||
Raise if the browser submitted data but no ``Content-Length`` header which
|
||||
is required for the kind of processing the server does.
|
||||
"""
|
||||
|
||||
code = 411
|
||||
description = (
|
||||
"A request with this method requires a valid <code>Content-"
|
||||
"Length</code> header."
|
||||
)
|
||||
|
||||
|
||||
class PreconditionFailed(HTTPException):
|
||||
"""*412* `Precondition Failed`
|
||||
|
||||
Status code used in combination with ``If-Match``, ``If-None-Match``, or
|
||||
``If-Unmodified-Since``.
|
||||
"""
|
||||
|
||||
code = 412
|
||||
description = (
|
||||
"The precondition on the request for the URL failed positive evaluation."
|
||||
)
|
||||
|
||||
|
||||
class RequestEntityTooLarge(HTTPException):
|
||||
"""*413* `Request Entity Too Large`
|
||||
|
||||
The status code one should return if the data submitted exceeded a given
|
||||
limit.
|
||||
"""
|
||||
|
||||
code = 413
|
||||
description = "The data value transmitted exceeds the capacity limit."
|
||||
|
||||
|
||||
class RequestURITooLarge(HTTPException):
|
||||
"""*414* `Request URI Too Large`
|
||||
|
||||
Like *413* but for too long URLs.
|
||||
"""
|
||||
|
||||
code = 414
|
||||
description = (
|
||||
"The length of the requested URL exceeds the capacity limit for"
|
||||
" this server. The request cannot be processed."
|
||||
)
|
||||
|
||||
|
||||
class UnsupportedMediaType(HTTPException):
|
||||
"""*415* `Unsupported Media Type`
|
||||
|
||||
The status code returned if the server is unable to handle the media type
|
||||
the client transmitted.
|
||||
"""
|
||||
|
||||
code = 415
|
||||
description = (
|
||||
"The server does not support the media type transmitted in the request."
|
||||
)
|
||||
|
||||
|
||||
class RequestedRangeNotSatisfiable(HTTPException):
|
||||
"""*416* `Requested Range Not Satisfiable`
|
||||
|
||||
The client asked for an invalid part of the file.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 416
|
||||
description = "The server cannot provide the requested range."
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
length: int | None = None,
|
||||
units: str = "bytes",
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
) -> None:
|
||||
"""Takes an optional `Content-Range` header value based on ``length``
|
||||
parameter.
|
||||
"""
|
||||
super().__init__(description=description, response=response)
|
||||
self.length = length
|
||||
self.units = units
|
||||
|
||||
def get_headers(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
headers = super().get_headers(environ, scope)
|
||||
if self.length is not None:
|
||||
headers.append(("Content-Range", f"{self.units} */{self.length}"))
|
||||
return headers
|
||||
|
||||
|
||||
class ExpectationFailed(HTTPException):
|
||||
"""*417* `Expectation Failed`
|
||||
|
||||
The server cannot meet the requirements of the Expect request-header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 417
|
||||
description = "The server could not meet the requirements of the Expect header"
|
||||
|
||||
|
||||
class ImATeapot(HTTPException):
|
||||
"""*418* `I'm a teapot`
|
||||
|
||||
The server should return this if it is a teapot and someone attempted
|
||||
to brew coffee with it.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 418
|
||||
description = "This server is a teapot, not a coffee machine"
|
||||
|
||||
|
||||
class MisdirectedRequest(HTTPException):
|
||||
"""421 Misdirected Request
|
||||
|
||||
Indicates that the request was directed to a server that is not able to
|
||||
produce a response.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
"""
|
||||
|
||||
code = 421
|
||||
description = "The server is not able to produce a response."
|
||||
|
||||
|
||||
class UnprocessableEntity(HTTPException):
|
||||
"""*422* `Unprocessable Entity`
|
||||
|
||||
Used if the request is well formed, but the instructions are otherwise
|
||||
incorrect.
|
||||
"""
|
||||
|
||||
code = 422
|
||||
description = (
|
||||
"The request was well-formed but was unable to be followed due"
|
||||
" to semantic errors."
|
||||
)
|
||||
|
||||
|
||||
class Locked(HTTPException):
|
||||
"""*423* `Locked`
|
||||
|
||||
Used if the resource that is being accessed is locked.
|
||||
"""
|
||||
|
||||
code = 423
|
||||
description = "The resource that is being accessed is locked."
|
||||
|
||||
|
||||
class FailedDependency(HTTPException):
|
||||
"""*424* `Failed Dependency`
|
||||
|
||||
Used if the method could not be performed on the resource
|
||||
because the requested action depended on another action and that action failed.
|
||||
"""
|
||||
|
||||
code = 424
|
||||
description = (
|
||||
"The method could not be performed on the resource because the"
|
||||
" requested action depended on another action and that action"
|
||||
" failed."
|
||||
)
|
||||
|
||||
|
||||
class PreconditionRequired(HTTPException):
|
||||
"""*428* `Precondition Required`
|
||||
|
||||
The server requires this request to be conditional, typically to prevent
|
||||
the lost update problem, which is a race condition between two or more
|
||||
clients attempting to update a resource through PUT or DELETE. By requiring
|
||||
each client to include a conditional header ("If-Match" or "If-Unmodified-
|
||||
Since") with the proper value retained from a recent GET request, the
|
||||
server ensures that each client has at least seen the previous revision of
|
||||
the resource.
|
||||
"""
|
||||
|
||||
code = 428
|
||||
description = (
|
||||
"This request is required to be conditional; try using"
|
||||
' "If-Match" or "If-Unmodified-Since".'
|
||||
)
|
||||
|
||||
|
||||
class _RetryAfter(HTTPException):
|
||||
"""Adds an optional ``retry_after`` parameter which will set the
|
||||
``Retry-After`` header. May be an :class:`int` number of seconds or
|
||||
a :class:`~datetime.datetime`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
retry_after: datetime | int | None = None,
|
||||
) -> None:
|
||||
super().__init__(description, response)
|
||||
self.retry_after = retry_after
|
||||
|
||||
def get_headers(
|
||||
self,
|
||||
environ: WSGIEnvironment | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> list[tuple[str, str]]:
|
||||
headers = super().get_headers(environ, scope)
|
||||
|
||||
if self.retry_after:
|
||||
if isinstance(self.retry_after, datetime):
|
||||
from .http import http_date
|
||||
|
||||
value = http_date(self.retry_after)
|
||||
else:
|
||||
value = str(self.retry_after)
|
||||
|
||||
headers.append(("Retry-After", value))
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
class TooManyRequests(_RetryAfter):
|
||||
"""*429* `Too Many Requests`
|
||||
|
||||
The server is limiting the rate at which this user receives
|
||||
responses, and this request exceeds that rate. (The server may use
|
||||
any convenient method to identify users and their request rates).
|
||||
The server may include a "Retry-After" header to indicate how long
|
||||
the user should wait before retrying.
|
||||
|
||||
:param retry_after: If given, set the ``Retry-After`` header to this
|
||||
value. May be an :class:`int` number of seconds or a
|
||||
:class:`~datetime.datetime`.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
Added ``retry_after`` parameter.
|
||||
"""
|
||||
|
||||
code = 429
|
||||
description = "This user has exceeded an allotted request count. Try again later."
|
||||
|
||||
|
||||
class RequestHeaderFieldsTooLarge(HTTPException):
|
||||
"""*431* `Request Header Fields Too Large`
|
||||
|
||||
The server refuses to process the request because the header fields are too
|
||||
large. One or more individual fields may be too large, or the set of all
|
||||
headers is too large.
|
||||
"""
|
||||
|
||||
code = 431
|
||||
description = "One or more header fields exceeds the maximum size."
|
||||
|
||||
|
||||
class UnavailableForLegalReasons(HTTPException):
|
||||
"""*451* `Unavailable For Legal Reasons`
|
||||
|
||||
This status code indicates that the server is denying access to the
|
||||
resource as a consequence of a legal demand.
|
||||
"""
|
||||
|
||||
code = 451
|
||||
description = "Unavailable for legal reasons."
|
||||
|
||||
|
||||
class InternalServerError(HTTPException):
|
||||
"""*500* `Internal Server Error`
|
||||
|
||||
Raise if an internal server error occurred. This is a good fallback if an
|
||||
unknown error occurred in the dispatcher.
|
||||
|
||||
.. versionchanged:: 1.0.0
|
||||
Added the :attr:`original_exception` attribute.
|
||||
"""
|
||||
|
||||
code = 500
|
||||
description = (
|
||||
"The server encountered an internal error and was unable to"
|
||||
" complete your request. Either the server is overloaded or"
|
||||
" there is an error in the application."
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
description: str | None = None,
|
||||
response: Response | None = None,
|
||||
original_exception: BaseException | None = None,
|
||||
) -> None:
|
||||
#: The original exception that caused this 500 error. Can be
|
||||
#: used by frameworks to provide context when handling
|
||||
#: unexpected errors.
|
||||
self.original_exception = original_exception
|
||||
super().__init__(description=description, response=response)
|
||||
|
||||
|
||||
class NotImplemented(HTTPException):
|
||||
"""*501* `Not Implemented`
|
||||
|
||||
Raise if the application does not support the action requested by the
|
||||
browser.
|
||||
"""
|
||||
|
||||
code = 501
|
||||
description = "The server does not support the action requested by the browser."
|
||||
|
||||
|
||||
class BadGateway(HTTPException):
|
||||
"""*502* `Bad Gateway`
|
||||
|
||||
If you do proxying in your application you should return this status code
|
||||
if you received an invalid response from the upstream server it accessed
|
||||
in attempting to fulfill the request.
|
||||
"""
|
||||
|
||||
code = 502
|
||||
description = (
|
||||
"The proxy server received an invalid response from an upstream server."
|
||||
)
|
||||
|
||||
|
||||
class ServiceUnavailable(_RetryAfter):
|
||||
"""*503* `Service Unavailable`
|
||||
|
||||
Status code you should return if a service is temporarily
|
||||
unavailable.
|
||||
|
||||
:param retry_after: If given, set the ``Retry-After`` header to this
|
||||
value. May be an :class:`int` number of seconds or a
|
||||
:class:`~datetime.datetime`.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
Added ``retry_after`` parameter.
|
||||
"""
|
||||
|
||||
code = 503
|
||||
description = (
|
||||
"The server is temporarily unable to service your request due"
|
||||
" to maintenance downtime or capacity problems. Please try"
|
||||
" again later."
|
||||
)
|
||||
|
||||
|
||||
class GatewayTimeout(HTTPException):
|
||||
"""*504* `Gateway Timeout`
|
||||
|
||||
Status code you should return if a connection to an upstream server
|
||||
times out.
|
||||
"""
|
||||
|
||||
code = 504
|
||||
description = "The connection to an upstream server timed out."
|
||||
|
||||
|
||||
class HTTPVersionNotSupported(HTTPException):
|
||||
"""*505* `HTTP Version Not Supported`
|
||||
|
||||
The server does not support the HTTP protocol version used in the request.
|
||||
"""
|
||||
|
||||
code = 505
|
||||
description = (
|
||||
"The server does not support the HTTP protocol version used in the request."
|
||||
)
|
||||
|
||||
|
||||
default_exceptions: dict[int, type[HTTPException]] = {}
|
||||
|
||||
|
||||
def _find_exceptions() -> None:
|
||||
for obj in globals().values():
|
||||
try:
|
||||
is_http_exception = issubclass(obj, HTTPException)
|
||||
except TypeError:
|
||||
is_http_exception = False
|
||||
if not is_http_exception or obj.code is None:
|
||||
continue
|
||||
old_obj = default_exceptions.get(obj.code, None)
|
||||
if old_obj is not None and issubclass(obj, old_obj):
|
||||
continue
|
||||
default_exceptions[obj.code] = obj
|
||||
|
||||
|
||||
_find_exceptions()
|
||||
del _find_exceptions
|
||||
|
||||
|
||||
class Aborter:
|
||||
"""When passed a dict of code -> exception items it can be used as
|
||||
callable that raises exceptions. If the first argument to the
|
||||
callable is an integer it will be looked up in the mapping, if it's
|
||||
a WSGI application it will be raised in a proxy exception.
|
||||
|
||||
The rest of the arguments are forwarded to the exception constructor.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mapping: dict[int, type[HTTPException]] | None = None,
|
||||
extra: dict[int, type[HTTPException]] | None = None,
|
||||
) -> None:
|
||||
if mapping is None:
|
||||
mapping = default_exceptions
|
||||
self.mapping = dict(mapping)
|
||||
if extra is not None:
|
||||
self.mapping.update(extra)
|
||||
|
||||
def __call__(
|
||||
self, code: int | Response, *args: t.Any, **kwargs: t.Any
|
||||
) -> t.NoReturn:
|
||||
from .sansio.response import Response
|
||||
|
||||
if isinstance(code, Response):
|
||||
raise HTTPException(response=code)
|
||||
|
||||
if code not in self.mapping:
|
||||
raise LookupError(f"no exception for {code!r}")
|
||||
|
||||
raise self.mapping[code](*args, **kwargs)
|
||||
|
||||
|
||||
def abort(status: int | Response, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
|
||||
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
|
||||
application.
|
||||
|
||||
If a status code is given, it will be looked up in the list of
|
||||
exceptions and will raise that exception. If passed a WSGI application,
|
||||
it will wrap it in a proxy WSGI exception and raise that::
|
||||
|
||||
abort(404) # 404 Not Found
|
||||
abort(Response('Hello World'))
|
||||
|
||||
"""
|
||||
_aborter(status, *args, **kwargs)
|
||||
|
||||
|
||||
_aborter: Aborter = Aborter()
|
||||
430
lib/python3.11/site-packages/werkzeug/formparser.py
Normal file
430
lib/python3.11/site-packages/werkzeug/formparser.py
Normal file
@ -0,0 +1,430 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from io import BytesIO
|
||||
from urllib.parse import parse_qsl
|
||||
|
||||
from ._internal import _plain_int
|
||||
from .datastructures import FileStorage
|
||||
from .datastructures import Headers
|
||||
from .datastructures import MultiDict
|
||||
from .exceptions import RequestEntityTooLarge
|
||||
from .http import parse_options_header
|
||||
from .sansio.multipart import Data
|
||||
from .sansio.multipart import Epilogue
|
||||
from .sansio.multipart import Field
|
||||
from .sansio.multipart import File
|
||||
from .sansio.multipart import MultipartDecoder
|
||||
from .sansio.multipart import NeedData
|
||||
from .wsgi import get_content_length
|
||||
from .wsgi import get_input_stream
|
||||
|
||||
# there are some platforms where SpooledTemporaryFile is not available.
|
||||
# In that case we need to provide a fallback.
|
||||
try:
|
||||
from tempfile import SpooledTemporaryFile
|
||||
except ImportError:
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
SpooledTemporaryFile = None # type: ignore
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import typing as te
|
||||
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
t_parse_result = tuple[
|
||||
t.IO[bytes], MultiDict[str, str], MultiDict[str, FileStorage]
|
||||
]
|
||||
|
||||
class TStreamFactory(te.Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
total_content_length: int | None,
|
||||
content_type: str | None,
|
||||
filename: str | None,
|
||||
content_length: int | None = None,
|
||||
) -> t.IO[bytes]: ...
|
||||
|
||||
|
||||
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
|
||||
|
||||
|
||||
def default_stream_factory(
|
||||
total_content_length: int | None,
|
||||
content_type: str | None,
|
||||
filename: str | None,
|
||||
content_length: int | None = None,
|
||||
) -> t.IO[bytes]:
|
||||
max_size = 1024 * 500
|
||||
|
||||
if SpooledTemporaryFile is not None:
|
||||
return t.cast(t.IO[bytes], SpooledTemporaryFile(max_size=max_size, mode="rb+"))
|
||||
elif total_content_length is None or total_content_length > max_size:
|
||||
return t.cast(t.IO[bytes], TemporaryFile("rb+"))
|
||||
|
||||
return BytesIO()
|
||||
|
||||
|
||||
def parse_form_data(
|
||||
environ: WSGIEnvironment,
|
||||
stream_factory: TStreamFactory | None = None,
|
||||
max_form_memory_size: int | None = None,
|
||||
max_content_length: int | None = None,
|
||||
cls: type[MultiDict[str, t.Any]] | None = None,
|
||||
silent: bool = True,
|
||||
*,
|
||||
max_form_parts: int | None = None,
|
||||
) -> t_parse_result:
|
||||
"""Parse the form data in the environ and return it as tuple in the form
|
||||
``(stream, form, files)``. You should only call this method if the
|
||||
transport method is `POST`, `PUT`, or `PATCH`.
|
||||
|
||||
If the mimetype of the data transmitted is `multipart/form-data` the
|
||||
files multidict will be filled with `FileStorage` objects. If the
|
||||
mimetype is unknown the input stream is wrapped and returned as first
|
||||
argument, else the stream is empty.
|
||||
|
||||
This is a shortcut for the common usage of :class:`FormDataParser`.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`Response._get_file_stream`.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
:param max_form_parts: The maximum number of multipart parts to be parsed. If this
|
||||
is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset`` and ``errors`` parameters were removed.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Added the ``max_form_parts`` parameter.
|
||||
|
||||
.. versionadded:: 0.5.1
|
||||
Added the ``silent`` parameter.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
Added the ``max_form_memory_size``, ``max_content_length``, and ``cls``
|
||||
parameters.
|
||||
"""
|
||||
return FormDataParser(
|
||||
stream_factory=stream_factory,
|
||||
max_form_memory_size=max_form_memory_size,
|
||||
max_content_length=max_content_length,
|
||||
max_form_parts=max_form_parts,
|
||||
silent=silent,
|
||||
cls=cls,
|
||||
).parse_from_environ(environ)
|
||||
|
||||
|
||||
class FormDataParser:
|
||||
"""This class implements parsing of form data for Werkzeug. By itself
|
||||
it can parse multipart and url encoded form data. It can be subclassed
|
||||
and extended but for most mimetypes it is a better idea to use the
|
||||
untouched stream and expose it as separate attributes on a request
|
||||
object.
|
||||
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`Response._get_file_stream`.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
:param max_form_parts: The maximum number of multipart parts to be parsed. If this
|
||||
is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset`` and ``errors`` parameters were removed.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``parse_functions`` attribute and ``get_parse_func`` methods were removed.
|
||||
|
||||
.. versionchanged:: 2.2.3
|
||||
Added the ``max_form_parts`` parameter.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream_factory: TStreamFactory | None = None,
|
||||
max_form_memory_size: int | None = None,
|
||||
max_content_length: int | None = None,
|
||||
cls: type[MultiDict[str, t.Any]] | None = None,
|
||||
silent: bool = True,
|
||||
*,
|
||||
max_form_parts: int | None = None,
|
||||
) -> None:
|
||||
if stream_factory is None:
|
||||
stream_factory = default_stream_factory
|
||||
|
||||
self.stream_factory = stream_factory
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.max_content_length = max_content_length
|
||||
self.max_form_parts = max_form_parts
|
||||
|
||||
if cls is None:
|
||||
cls = t.cast("type[MultiDict[str, t.Any]]", MultiDict)
|
||||
|
||||
self.cls = cls
|
||||
self.silent = silent
|
||||
|
||||
def parse_from_environ(self, environ: WSGIEnvironment) -> t_parse_result:
|
||||
"""Parses the information from the environment as form data.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
stream = get_input_stream(environ, max_content_length=self.max_content_length)
|
||||
content_length = get_content_length(environ)
|
||||
mimetype, options = parse_options_header(environ.get("CONTENT_TYPE"))
|
||||
return self.parse(
|
||||
stream,
|
||||
content_length=content_length,
|
||||
mimetype=mimetype,
|
||||
options=options,
|
||||
)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
stream: t.IO[bytes],
|
||||
mimetype: str,
|
||||
content_length: int | None,
|
||||
options: dict[str, str] | None = None,
|
||||
) -> t_parse_result:
|
||||
"""Parses the information from the given stream, mimetype,
|
||||
content length and mimetype parameters.
|
||||
|
||||
:param stream: an input stream
|
||||
:param mimetype: the mimetype of the data
|
||||
:param content_length: the content length of the incoming data
|
||||
:param options: optional mimetype parameters (used for
|
||||
the multipart boundary for instance)
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The invalid ``application/x-url-encoded`` content type is not
|
||||
treated as ``application/x-www-form-urlencoded``.
|
||||
"""
|
||||
if mimetype == "multipart/form-data":
|
||||
parse_func = self._parse_multipart
|
||||
elif mimetype == "application/x-www-form-urlencoded":
|
||||
parse_func = self._parse_urlencoded
|
||||
else:
|
||||
return stream, self.cls(), self.cls()
|
||||
|
||||
if options is None:
|
||||
options = {}
|
||||
|
||||
try:
|
||||
return parse_func(stream, mimetype, content_length, options)
|
||||
except ValueError:
|
||||
if not self.silent:
|
||||
raise
|
||||
|
||||
return stream, self.cls(), self.cls()
|
||||
|
||||
def _parse_multipart(
|
||||
self,
|
||||
stream: t.IO[bytes],
|
||||
mimetype: str,
|
||||
content_length: int | None,
|
||||
options: dict[str, str],
|
||||
) -> t_parse_result:
|
||||
parser = MultiPartParser(
|
||||
stream_factory=self.stream_factory,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
max_form_parts=self.max_form_parts,
|
||||
cls=self.cls,
|
||||
)
|
||||
boundary = options.get("boundary", "").encode("ascii")
|
||||
|
||||
if not boundary:
|
||||
raise ValueError("Missing boundary")
|
||||
|
||||
form, files = parser.parse(stream, boundary, content_length)
|
||||
return stream, form, files
|
||||
|
||||
def _parse_urlencoded(
|
||||
self,
|
||||
stream: t.IO[bytes],
|
||||
mimetype: str,
|
||||
content_length: int | None,
|
||||
options: dict[str, str],
|
||||
) -> t_parse_result:
|
||||
if (
|
||||
self.max_form_memory_size is not None
|
||||
and content_length is not None
|
||||
and content_length > self.max_form_memory_size
|
||||
):
|
||||
raise RequestEntityTooLarge()
|
||||
|
||||
items = parse_qsl(
|
||||
stream.read().decode(),
|
||||
keep_blank_values=True,
|
||||
errors="werkzeug.url_quote",
|
||||
)
|
||||
return stream, self.cls(items), self.cls()
|
||||
|
||||
|
||||
class MultiPartParser:
|
||||
def __init__(
|
||||
self,
|
||||
stream_factory: TStreamFactory | None = None,
|
||||
max_form_memory_size: int | None = None,
|
||||
cls: type[MultiDict[str, t.Any]] | None = None,
|
||||
buffer_size: int = 64 * 1024,
|
||||
max_form_parts: int | None = None,
|
||||
) -> None:
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.max_form_parts = max_form_parts
|
||||
|
||||
if stream_factory is None:
|
||||
stream_factory = default_stream_factory
|
||||
|
||||
self.stream_factory = stream_factory
|
||||
|
||||
if cls is None:
|
||||
cls = t.cast("type[MultiDict[str, t.Any]]", MultiDict)
|
||||
|
||||
self.cls = cls
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def fail(self, message: str) -> te.NoReturn:
|
||||
raise ValueError(message)
|
||||
|
||||
def get_part_charset(self, headers: Headers) -> str:
|
||||
# Figure out input charset for current part
|
||||
content_type = headers.get("content-type")
|
||||
|
||||
if content_type:
|
||||
parameters = parse_options_header(content_type)[1]
|
||||
ct_charset = parameters.get("charset", "").lower()
|
||||
|
||||
# A safe list of encodings. Modern clients should only send ASCII or UTF-8.
|
||||
# This list will not be extended further.
|
||||
if ct_charset in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
|
||||
return ct_charset
|
||||
|
||||
return "utf-8"
|
||||
|
||||
def start_file_streaming(
|
||||
self, event: File, total_content_length: int | None
|
||||
) -> t.IO[bytes]:
|
||||
content_type = event.headers.get("content-type")
|
||||
|
||||
try:
|
||||
content_length = _plain_int(event.headers["content-length"])
|
||||
except (KeyError, ValueError):
|
||||
content_length = 0
|
||||
|
||||
container = self.stream_factory(
|
||||
total_content_length=total_content_length,
|
||||
filename=event.filename,
|
||||
content_type=content_type,
|
||||
content_length=content_length,
|
||||
)
|
||||
return container
|
||||
|
||||
def parse(
|
||||
self, stream: t.IO[bytes], boundary: bytes, content_length: int | None
|
||||
) -> tuple[MultiDict[str, str], MultiDict[str, FileStorage]]:
|
||||
current_part: Field | File
|
||||
field_size: int | None = None
|
||||
container: t.IO[bytes] | list[bytes]
|
||||
_write: t.Callable[[bytes], t.Any]
|
||||
|
||||
parser = MultipartDecoder(
|
||||
boundary,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
max_parts=self.max_form_parts,
|
||||
)
|
||||
|
||||
fields = []
|
||||
files = []
|
||||
|
||||
for data in _chunk_iter(stream.read, self.buffer_size):
|
||||
parser.receive_data(data)
|
||||
event = parser.next_event()
|
||||
while not isinstance(event, (Epilogue, NeedData)):
|
||||
if isinstance(event, Field):
|
||||
current_part = event
|
||||
field_size = 0
|
||||
container = []
|
||||
_write = container.append
|
||||
elif isinstance(event, File):
|
||||
current_part = event
|
||||
field_size = None
|
||||
container = self.start_file_streaming(event, content_length)
|
||||
_write = container.write
|
||||
elif isinstance(event, Data):
|
||||
if self.max_form_memory_size is not None and field_size is not None:
|
||||
# Ensure that accumulated data events do not exceed limit.
|
||||
# Also checked within single event in MultipartDecoder.
|
||||
field_size += len(event.data)
|
||||
|
||||
if field_size > self.max_form_memory_size:
|
||||
raise RequestEntityTooLarge()
|
||||
|
||||
_write(event.data)
|
||||
if not event.more_data:
|
||||
if isinstance(current_part, Field):
|
||||
value = b"".join(container).decode(
|
||||
self.get_part_charset(current_part.headers), "replace"
|
||||
)
|
||||
fields.append((current_part.name, value))
|
||||
else:
|
||||
container = t.cast(t.IO[bytes], container)
|
||||
container.seek(0)
|
||||
files.append(
|
||||
(
|
||||
current_part.name,
|
||||
FileStorage(
|
||||
container,
|
||||
current_part.filename,
|
||||
current_part.name,
|
||||
headers=current_part.headers,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
event = parser.next_event()
|
||||
|
||||
return self.cls(fields), self.cls(files)
|
||||
|
||||
|
||||
def _chunk_iter(read: t.Callable[[int], bytes], size: int) -> t.Iterator[bytes | None]:
|
||||
"""Read data in chunks for multipart/form-data parsing. Stop if no data is read.
|
||||
Yield ``None`` at the end to signal end of parsing.
|
||||
"""
|
||||
while True:
|
||||
data = read(size)
|
||||
|
||||
if not data:
|
||||
break
|
||||
|
||||
yield data
|
||||
|
||||
yield None
|
||||
1405
lib/python3.11/site-packages/werkzeug/http.py
Normal file
1405
lib/python3.11/site-packages/werkzeug/http.py
Normal file
File diff suppressed because it is too large
Load Diff
653
lib/python3.11/site-packages/werkzeug/local.py
Normal file
653
lib/python3.11/site-packages/werkzeug/local.py
Normal file
@ -0,0 +1,653 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import math
|
||||
import operator
|
||||
import typing as t
|
||||
from contextvars import ContextVar
|
||||
from functools import partial
|
||||
from functools import update_wrapper
|
||||
from operator import attrgetter
|
||||
|
||||
from .wsgi import ClosingIterator
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
T = t.TypeVar("T")
|
||||
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
|
||||
|
||||
|
||||
def release_local(local: Local | LocalStack[t.Any]) -> None:
|
||||
"""Release the data for the current context in a :class:`Local` or
|
||||
:class:`LocalStack` without using a :class:`LocalManager`.
|
||||
|
||||
This should not be needed for modern use cases, and may be removed
|
||||
in the future.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
local.__release_local__()
|
||||
|
||||
|
||||
class Local:
|
||||
"""Create a namespace of context-local data. This wraps a
|
||||
:class:`ContextVar` containing a :class:`dict` value.
|
||||
|
||||
This may incur a performance penalty compared to using individual
|
||||
context vars, as it has to copy data to avoid mutating the dict
|
||||
between nested contexts.
|
||||
|
||||
:param context_var: The :class:`~contextvars.ContextVar` to use as
|
||||
storage for this local. If not given, one will be created.
|
||||
Context vars not created at the global scope may interfere with
|
||||
garbage collection.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Uses ``ContextVar`` instead of a custom storage implementation.
|
||||
"""
|
||||
|
||||
__slots__ = ("__storage",)
|
||||
|
||||
def __init__(self, context_var: ContextVar[dict[str, t.Any]] | None = None) -> None:
|
||||
if context_var is None:
|
||||
# A ContextVar not created at global scope interferes with
|
||||
# Python's garbage collection. However, a local only makes
|
||||
# sense defined at the global scope as well, in which case
|
||||
# the GC issue doesn't seem relevant.
|
||||
context_var = ContextVar(f"werkzeug.Local<{id(self)}>.storage")
|
||||
|
||||
object.__setattr__(self, "_Local__storage", context_var)
|
||||
|
||||
def __iter__(self) -> t.Iterator[tuple[str, t.Any]]:
|
||||
return iter(self.__storage.get({}).items())
|
||||
|
||||
def __call__(
|
||||
self, name: str, *, unbound_message: str | None = None
|
||||
) -> LocalProxy[t.Any]:
|
||||
"""Create a :class:`LocalProxy` that access an attribute on this
|
||||
local namespace.
|
||||
|
||||
:param name: Proxy this attribute.
|
||||
:param unbound_message: The error message that the proxy will
|
||||
show if the attribute isn't set.
|
||||
"""
|
||||
return LocalProxy(self, name, unbound_message=unbound_message)
|
||||
|
||||
def __release_local__(self) -> None:
|
||||
self.__storage.set({})
|
||||
|
||||
def __getattr__(self, name: str) -> t.Any:
|
||||
values = self.__storage.get({})
|
||||
|
||||
if name in values:
|
||||
return values[name]
|
||||
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name: str, value: t.Any) -> None:
|
||||
values = self.__storage.get({}).copy()
|
||||
values[name] = value
|
||||
self.__storage.set(values)
|
||||
|
||||
def __delattr__(self, name: str) -> None:
|
||||
values = self.__storage.get({})
|
||||
|
||||
if name in values:
|
||||
values = values.copy()
|
||||
del values[name]
|
||||
self.__storage.set(values)
|
||||
else:
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
class LocalStack(t.Generic[T]):
|
||||
"""Create a stack of context-local data. This wraps a
|
||||
:class:`ContextVar` containing a :class:`list` value.
|
||||
|
||||
This may incur a performance penalty compared to using individual
|
||||
context vars, as it has to copy data to avoid mutating the list
|
||||
between nested contexts.
|
||||
|
||||
:param context_var: The :class:`~contextvars.ContextVar` to use as
|
||||
storage for this local. If not given, one will be created.
|
||||
Context vars not created at the global scope may interfere with
|
||||
garbage collection.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Uses ``ContextVar`` instead of a custom storage implementation.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
|
||||
__slots__ = ("_storage",)
|
||||
|
||||
def __init__(self, context_var: ContextVar[list[T]] | None = None) -> None:
|
||||
if context_var is None:
|
||||
# A ContextVar not created at global scope interferes with
|
||||
# Python's garbage collection. However, a local only makes
|
||||
# sense defined at the global scope as well, in which case
|
||||
# the GC issue doesn't seem relevant.
|
||||
context_var = ContextVar(f"werkzeug.LocalStack<{id(self)}>.storage")
|
||||
|
||||
self._storage = context_var
|
||||
|
||||
def __release_local__(self) -> None:
|
||||
self._storage.set([])
|
||||
|
||||
def push(self, obj: T) -> list[T]:
|
||||
"""Add a new item to the top of the stack."""
|
||||
stack = self._storage.get([]).copy()
|
||||
stack.append(obj)
|
||||
self._storage.set(stack)
|
||||
return stack
|
||||
|
||||
def pop(self) -> T | None:
|
||||
"""Remove the top item from the stack and return it. If the
|
||||
stack is empty, return ``None``.
|
||||
"""
|
||||
stack = self._storage.get([])
|
||||
|
||||
if len(stack) == 0:
|
||||
return None
|
||||
|
||||
rv = stack[-1]
|
||||
self._storage.set(stack[:-1])
|
||||
return rv
|
||||
|
||||
@property
|
||||
def top(self) -> T | None:
|
||||
"""The topmost item on the stack. If the stack is empty,
|
||||
`None` is returned.
|
||||
"""
|
||||
stack = self._storage.get([])
|
||||
|
||||
if len(stack) == 0:
|
||||
return None
|
||||
|
||||
return stack[-1]
|
||||
|
||||
def __call__(
|
||||
self, name: str | None = None, *, unbound_message: str | None = None
|
||||
) -> LocalProxy[t.Any]:
|
||||
"""Create a :class:`LocalProxy` that accesses the top of this
|
||||
local stack.
|
||||
|
||||
:param name: If given, the proxy access this attribute of the
|
||||
top item, rather than the item itself.
|
||||
:param unbound_message: The error message that the proxy will
|
||||
show if the stack is empty.
|
||||
"""
|
||||
return LocalProxy(self, name, unbound_message=unbound_message)
|
||||
|
||||
|
||||
class LocalManager:
|
||||
"""Manage releasing the data for the current context in one or more
|
||||
:class:`Local` and :class:`LocalStack` objects.
|
||||
|
||||
This should not be needed for modern use cases, and may be removed
|
||||
in the future.
|
||||
|
||||
:param locals: A local or list of locals to manage.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
The ``ident_func`` was removed.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
The ``ident_func`` parameter was added.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
The :func:`release_local` function can be used instead of a
|
||||
manager.
|
||||
"""
|
||||
|
||||
__slots__ = ("locals",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
locals: None
|
||||
| (Local | LocalStack[t.Any] | t.Iterable[Local | LocalStack[t.Any]]) = None,
|
||||
) -> None:
|
||||
if locals is None:
|
||||
self.locals = []
|
||||
elif isinstance(locals, Local):
|
||||
self.locals = [locals]
|
||||
else:
|
||||
self.locals = list(locals) # type: ignore[arg-type]
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""Release the data in the locals for this context. Call this at
|
||||
the end of each request or use :meth:`make_middleware`.
|
||||
"""
|
||||
for local in self.locals:
|
||||
release_local(local)
|
||||
|
||||
def make_middleware(self, app: WSGIApplication) -> WSGIApplication:
|
||||
"""Wrap a WSGI application so that local data is released
|
||||
automatically after the response has been sent for a request.
|
||||
"""
|
||||
|
||||
def application(
|
||||
environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
return ClosingIterator(app(environ, start_response), self.cleanup)
|
||||
|
||||
return application
|
||||
|
||||
def middleware(self, func: WSGIApplication) -> WSGIApplication:
|
||||
"""Like :meth:`make_middleware` but used as a decorator on the
|
||||
WSGI application function.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@manager.middleware
|
||||
def application(environ, start_response):
|
||||
...
|
||||
"""
|
||||
return update_wrapper(self.make_middleware(func), func)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} storages: {len(self.locals)}>"
|
||||
|
||||
|
||||
class _ProxyLookup:
|
||||
"""Descriptor that handles proxied attribute lookup for
|
||||
:class:`LocalProxy`.
|
||||
|
||||
:param f: The built-in function this attribute is accessed through.
|
||||
Instead of looking up the special method, the function call
|
||||
is redone on the object.
|
||||
:param fallback: Return this function if the proxy is unbound
|
||||
instead of raising a :exc:`RuntimeError`.
|
||||
:param is_attr: This proxied name is an attribute, not a function.
|
||||
Call the fallback immediately to get the value.
|
||||
:param class_value: Value to return when accessed from the
|
||||
``LocalProxy`` class directly. Used for ``__doc__`` so building
|
||||
docs still works.
|
||||
"""
|
||||
|
||||
__slots__ = ("bind_f", "fallback", "is_attr", "class_value", "name")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
f: t.Callable[..., t.Any] | None = None,
|
||||
fallback: t.Callable[[LocalProxy[t.Any]], t.Any] | None = None,
|
||||
class_value: t.Any | None = None,
|
||||
is_attr: bool = False,
|
||||
) -> None:
|
||||
bind_f: t.Callable[[LocalProxy[t.Any], t.Any], t.Callable[..., t.Any]] | None
|
||||
|
||||
if hasattr(f, "__get__"):
|
||||
# A Python function, can be turned into a bound method.
|
||||
|
||||
def bind_f(
|
||||
instance: LocalProxy[t.Any], obj: t.Any
|
||||
) -> t.Callable[..., t.Any]:
|
||||
return f.__get__(obj, type(obj)) # type: ignore
|
||||
|
||||
elif f is not None:
|
||||
# A C function, use partial to bind the first argument.
|
||||
|
||||
def bind_f(
|
||||
instance: LocalProxy[t.Any], obj: t.Any
|
||||
) -> t.Callable[..., t.Any]:
|
||||
return partial(f, obj)
|
||||
|
||||
else:
|
||||
# Use getattr, which will produce a bound method.
|
||||
bind_f = None
|
||||
|
||||
self.bind_f = bind_f
|
||||
self.fallback = fallback
|
||||
self.class_value = class_value
|
||||
self.is_attr = is_attr
|
||||
|
||||
def __set_name__(self, owner: LocalProxy[t.Any], name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __get__(self, instance: LocalProxy[t.Any], owner: type | None = None) -> t.Any:
|
||||
if instance is None:
|
||||
if self.class_value is not None:
|
||||
return self.class_value
|
||||
|
||||
return self
|
||||
|
||||
try:
|
||||
obj = instance._get_current_object()
|
||||
except RuntimeError:
|
||||
if self.fallback is None:
|
||||
raise
|
||||
|
||||
fallback = self.fallback.__get__(instance, owner)
|
||||
|
||||
if self.is_attr:
|
||||
# __class__ and __doc__ are attributes, not methods.
|
||||
# Call the fallback to get the value.
|
||||
return fallback()
|
||||
|
||||
return fallback
|
||||
|
||||
if self.bind_f is not None:
|
||||
return self.bind_f(instance, obj)
|
||||
|
||||
return getattr(obj, self.name)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"proxy {self.name}"
|
||||
|
||||
def __call__(
|
||||
self, instance: LocalProxy[t.Any], *args: t.Any, **kwargs: t.Any
|
||||
) -> t.Any:
|
||||
"""Support calling unbound methods from the class. For example,
|
||||
this happens with ``copy.copy``, which does
|
||||
``type(x).__copy__(x)``. ``type(x)`` can't be proxied, so it
|
||||
returns the proxy type and descriptor.
|
||||
"""
|
||||
return self.__get__(instance, type(instance))(*args, **kwargs)
|
||||
|
||||
|
||||
class _ProxyIOp(_ProxyLookup):
|
||||
"""Look up an augmented assignment method on a proxied object. The
|
||||
method is wrapped to return the proxy instead of the object.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
f: t.Callable[..., t.Any] | None = None,
|
||||
fallback: t.Callable[[LocalProxy[t.Any]], t.Any] | None = None,
|
||||
) -> None:
|
||||
super().__init__(f, fallback)
|
||||
|
||||
def bind_f(instance: LocalProxy[t.Any], obj: t.Any) -> t.Callable[..., t.Any]:
|
||||
def i_op(self: t.Any, other: t.Any) -> LocalProxy[t.Any]:
|
||||
f(self, other) # type: ignore
|
||||
return instance
|
||||
|
||||
return i_op.__get__(obj, type(obj)) # type: ignore
|
||||
|
||||
self.bind_f = bind_f
|
||||
|
||||
|
||||
def _l_to_r_op(op: F) -> F:
|
||||
"""Swap the argument order to turn an l-op into an r-op."""
|
||||
|
||||
def r_op(obj: t.Any, other: t.Any) -> t.Any:
|
||||
return op(other, obj)
|
||||
|
||||
return t.cast(F, r_op)
|
||||
|
||||
|
||||
def _identity(o: T) -> T:
|
||||
return o
|
||||
|
||||
|
||||
class LocalProxy(t.Generic[T]):
|
||||
"""A proxy to the object bound to a context-local object. All
|
||||
operations on the proxy are forwarded to the bound object. If no
|
||||
object is bound, a ``RuntimeError`` is raised.
|
||||
|
||||
:param local: The context-local object that provides the proxied
|
||||
object.
|
||||
:param name: Proxy this attribute from the proxied object.
|
||||
:param unbound_message: The error message to show if the
|
||||
context-local object is unbound.
|
||||
|
||||
Proxy a :class:`~contextvars.ContextVar` to make it easier to
|
||||
access. Pass a name to proxy that attribute.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
_request_var = ContextVar("request")
|
||||
request = LocalProxy(_request_var)
|
||||
session = LocalProxy(_request_var, "session")
|
||||
|
||||
Proxy an attribute on a :class:`Local` namespace by calling the
|
||||
local with the attribute name:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
data = Local()
|
||||
user = data("user")
|
||||
|
||||
Proxy the top item on a :class:`LocalStack` by calling the local.
|
||||
Pass a name to proxy that attribute.
|
||||
|
||||
.. code-block::
|
||||
|
||||
app_stack = LocalStack()
|
||||
current_app = app_stack()
|
||||
g = app_stack("g")
|
||||
|
||||
Pass a function to proxy the return value from that function. This
|
||||
was previously used to access attributes of local objects before
|
||||
that was supported directly.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
session = LocalProxy(lambda: request.session)
|
||||
|
||||
``__repr__`` and ``__class__`` are proxied, so ``repr(x)`` and
|
||||
``isinstance(x, cls)`` will look like the proxied object. Use
|
||||
``issubclass(type(x), LocalProxy)`` to check if an object is a
|
||||
proxy.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
repr(user) # <User admin>
|
||||
isinstance(user, User) # True
|
||||
issubclass(type(user), LocalProxy) # True
|
||||
|
||||
.. versionchanged:: 2.2.2
|
||||
``__wrapped__`` is set when wrapping an object, not only when
|
||||
wrapping a function, to prevent doctest from failing.
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Can proxy a ``ContextVar`` or ``LocalStack`` directly.
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
The ``name`` parameter can be used with any proxied object, not
|
||||
only ``Local``.
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Added the ``unbound_message`` parameter.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Updated proxied attributes and methods to reflect the current
|
||||
data model.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
The class can be instantiated with a callable.
|
||||
"""
|
||||
|
||||
__slots__ = ("__wrapped", "_get_current_object")
|
||||
|
||||
_get_current_object: t.Callable[[], T]
|
||||
"""Return the current object this proxy is bound to. If the proxy is
|
||||
unbound, this raises a ``RuntimeError``.
|
||||
|
||||
This should be used if you need to pass the object to something that
|
||||
doesn't understand the proxy. It can also be useful for performance
|
||||
if you are accessing the object multiple times in a function, rather
|
||||
than going through the proxy multiple times.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
local: ContextVar[T] | Local | LocalStack[T] | t.Callable[[], T],
|
||||
name: str | None = None,
|
||||
*,
|
||||
unbound_message: str | None = None,
|
||||
) -> None:
|
||||
if name is None:
|
||||
get_name = _identity
|
||||
else:
|
||||
get_name = attrgetter(name) # type: ignore[assignment]
|
||||
|
||||
if unbound_message is None:
|
||||
unbound_message = "object is not bound"
|
||||
|
||||
if isinstance(local, Local):
|
||||
if name is None:
|
||||
raise TypeError("'name' is required when proxying a 'Local' object.")
|
||||
|
||||
def _get_current_object() -> T:
|
||||
try:
|
||||
return get_name(local) # type: ignore[return-value]
|
||||
except AttributeError:
|
||||
raise RuntimeError(unbound_message) from None
|
||||
|
||||
elif isinstance(local, LocalStack):
|
||||
|
||||
def _get_current_object() -> T:
|
||||
obj = local.top
|
||||
|
||||
if obj is None:
|
||||
raise RuntimeError(unbound_message)
|
||||
|
||||
return get_name(obj)
|
||||
|
||||
elif isinstance(local, ContextVar):
|
||||
|
||||
def _get_current_object() -> T:
|
||||
try:
|
||||
obj = local.get()
|
||||
except LookupError:
|
||||
raise RuntimeError(unbound_message) from None
|
||||
|
||||
return get_name(obj)
|
||||
|
||||
elif callable(local):
|
||||
|
||||
def _get_current_object() -> T:
|
||||
return get_name(local())
|
||||
|
||||
else:
|
||||
raise TypeError(f"Don't know how to proxy '{type(local)}'.")
|
||||
|
||||
object.__setattr__(self, "_LocalProxy__wrapped", local)
|
||||
object.__setattr__(self, "_get_current_object", _get_current_object)
|
||||
|
||||
__doc__ = _ProxyLookup( # type: ignore[assignment]
|
||||
class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
|
||||
)
|
||||
__wrapped__ = _ProxyLookup(
|
||||
fallback=lambda self: self._LocalProxy__wrapped, # type: ignore[attr-defined]
|
||||
is_attr=True,
|
||||
)
|
||||
# __del__ should only delete the proxy
|
||||
__repr__ = _ProxyLookup( # type: ignore[assignment]
|
||||
repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
|
||||
)
|
||||
__str__ = _ProxyLookup(str) # type: ignore[assignment]
|
||||
__bytes__ = _ProxyLookup(bytes)
|
||||
__format__ = _ProxyLookup() # type: ignore[assignment]
|
||||
__lt__ = _ProxyLookup(operator.lt)
|
||||
__le__ = _ProxyLookup(operator.le)
|
||||
__eq__ = _ProxyLookup(operator.eq) # type: ignore[assignment]
|
||||
__ne__ = _ProxyLookup(operator.ne) # type: ignore[assignment]
|
||||
__gt__ = _ProxyLookup(operator.gt)
|
||||
__ge__ = _ProxyLookup(operator.ge)
|
||||
__hash__ = _ProxyLookup(hash) # type: ignore[assignment]
|
||||
__bool__ = _ProxyLookup(bool, fallback=lambda self: False)
|
||||
__getattr__ = _ProxyLookup(getattr)
|
||||
# __getattribute__ triggered through __getattr__
|
||||
__setattr__ = _ProxyLookup(setattr) # type: ignore[assignment]
|
||||
__delattr__ = _ProxyLookup(delattr) # type: ignore[assignment]
|
||||
__dir__ = _ProxyLookup(dir, fallback=lambda self: []) # type: ignore[assignment]
|
||||
# __get__ (proxying descriptor not supported)
|
||||
# __set__ (descriptor)
|
||||
# __delete__ (descriptor)
|
||||
# __set_name__ (descriptor)
|
||||
# __objclass__ (descriptor)
|
||||
# __slots__ used by proxy itself
|
||||
# __dict__ (__getattr__)
|
||||
# __weakref__ (__getattr__)
|
||||
# __init_subclass__ (proxying metaclass not supported)
|
||||
# __prepare__ (metaclass)
|
||||
__class__ = _ProxyLookup(fallback=lambda self: type(self), is_attr=True) # type: ignore[assignment]
|
||||
__instancecheck__ = _ProxyLookup(lambda self, other: isinstance(other, self))
|
||||
__subclasscheck__ = _ProxyLookup(lambda self, other: issubclass(other, self))
|
||||
# __class_getitem__ triggered through __getitem__
|
||||
__call__ = _ProxyLookup(lambda self, *args, **kwargs: self(*args, **kwargs))
|
||||
__len__ = _ProxyLookup(len)
|
||||
__length_hint__ = _ProxyLookup(operator.length_hint)
|
||||
__getitem__ = _ProxyLookup(operator.getitem)
|
||||
__setitem__ = _ProxyLookup(operator.setitem)
|
||||
__delitem__ = _ProxyLookup(operator.delitem)
|
||||
# __missing__ triggered through __getitem__
|
||||
__iter__ = _ProxyLookup(iter)
|
||||
__next__ = _ProxyLookup(next)
|
||||
__reversed__ = _ProxyLookup(reversed)
|
||||
__contains__ = _ProxyLookup(operator.contains)
|
||||
__add__ = _ProxyLookup(operator.add)
|
||||
__sub__ = _ProxyLookup(operator.sub)
|
||||
__mul__ = _ProxyLookup(operator.mul)
|
||||
__matmul__ = _ProxyLookup(operator.matmul)
|
||||
__truediv__ = _ProxyLookup(operator.truediv)
|
||||
__floordiv__ = _ProxyLookup(operator.floordiv)
|
||||
__mod__ = _ProxyLookup(operator.mod)
|
||||
__divmod__ = _ProxyLookup(divmod)
|
||||
__pow__ = _ProxyLookup(pow)
|
||||
__lshift__ = _ProxyLookup(operator.lshift)
|
||||
__rshift__ = _ProxyLookup(operator.rshift)
|
||||
__and__ = _ProxyLookup(operator.and_)
|
||||
__xor__ = _ProxyLookup(operator.xor)
|
||||
__or__ = _ProxyLookup(operator.or_)
|
||||
__radd__ = _ProxyLookup(_l_to_r_op(operator.add))
|
||||
__rsub__ = _ProxyLookup(_l_to_r_op(operator.sub))
|
||||
__rmul__ = _ProxyLookup(_l_to_r_op(operator.mul))
|
||||
__rmatmul__ = _ProxyLookup(_l_to_r_op(operator.matmul))
|
||||
__rtruediv__ = _ProxyLookup(_l_to_r_op(operator.truediv))
|
||||
__rfloordiv__ = _ProxyLookup(_l_to_r_op(operator.floordiv))
|
||||
__rmod__ = _ProxyLookup(_l_to_r_op(operator.mod))
|
||||
__rdivmod__ = _ProxyLookup(_l_to_r_op(divmod))
|
||||
__rpow__ = _ProxyLookup(_l_to_r_op(pow))
|
||||
__rlshift__ = _ProxyLookup(_l_to_r_op(operator.lshift))
|
||||
__rrshift__ = _ProxyLookup(_l_to_r_op(operator.rshift))
|
||||
__rand__ = _ProxyLookup(_l_to_r_op(operator.and_))
|
||||
__rxor__ = _ProxyLookup(_l_to_r_op(operator.xor))
|
||||
__ror__ = _ProxyLookup(_l_to_r_op(operator.or_))
|
||||
__iadd__ = _ProxyIOp(operator.iadd)
|
||||
__isub__ = _ProxyIOp(operator.isub)
|
||||
__imul__ = _ProxyIOp(operator.imul)
|
||||
__imatmul__ = _ProxyIOp(operator.imatmul)
|
||||
__itruediv__ = _ProxyIOp(operator.itruediv)
|
||||
__ifloordiv__ = _ProxyIOp(operator.ifloordiv)
|
||||
__imod__ = _ProxyIOp(operator.imod)
|
||||
__ipow__ = _ProxyIOp(operator.ipow)
|
||||
__ilshift__ = _ProxyIOp(operator.ilshift)
|
||||
__irshift__ = _ProxyIOp(operator.irshift)
|
||||
__iand__ = _ProxyIOp(operator.iand)
|
||||
__ixor__ = _ProxyIOp(operator.ixor)
|
||||
__ior__ = _ProxyIOp(operator.ior)
|
||||
__neg__ = _ProxyLookup(operator.neg)
|
||||
__pos__ = _ProxyLookup(operator.pos)
|
||||
__abs__ = _ProxyLookup(abs)
|
||||
__invert__ = _ProxyLookup(operator.invert)
|
||||
__complex__ = _ProxyLookup(complex)
|
||||
__int__ = _ProxyLookup(int)
|
||||
__float__ = _ProxyLookup(float)
|
||||
__index__ = _ProxyLookup(operator.index)
|
||||
__round__ = _ProxyLookup(round)
|
||||
__trunc__ = _ProxyLookup(math.trunc)
|
||||
__floor__ = _ProxyLookup(math.floor)
|
||||
__ceil__ = _ProxyLookup(math.ceil)
|
||||
__enter__ = _ProxyLookup()
|
||||
__exit__ = _ProxyLookup()
|
||||
__await__ = _ProxyLookup()
|
||||
__aiter__ = _ProxyLookup()
|
||||
__anext__ = _ProxyLookup()
|
||||
__aenter__ = _ProxyLookup()
|
||||
__aexit__ = _ProxyLookup()
|
||||
__copy__ = _ProxyLookup(copy.copy)
|
||||
__deepcopy__ = _ProxyLookup(copy.deepcopy)
|
||||
# __getnewargs_ex__ (pickle through proxy not supported)
|
||||
# __getnewargs__ (pickle)
|
||||
# __getstate__ (pickle)
|
||||
# __setstate__ (pickle)
|
||||
# __reduce__ (pickle)
|
||||
# __reduce_ex__ (pickle)
|
||||
@ -0,0 +1,81 @@
|
||||
"""
|
||||
Application Dispatcher
|
||||
======================
|
||||
|
||||
This middleware creates a single WSGI application that dispatches to
|
||||
multiple other WSGI applications mounted at different URL paths.
|
||||
|
||||
A common example is writing a Single Page Application, where you have a
|
||||
backend API and a frontend written in JavaScript that does the routing
|
||||
in the browser rather than requesting different pages from the server.
|
||||
The frontend is a single HTML and JS file that should be served for any
|
||||
path besides "/api".
|
||||
|
||||
This example dispatches to an API app under "/api", an admin app
|
||||
under "/admin", and an app that serves frontend files for all other
|
||||
requests::
|
||||
|
||||
app = DispatcherMiddleware(serve_frontend, {
|
||||
'/api': api_app,
|
||||
'/admin': admin_app,
|
||||
})
|
||||
|
||||
In production, you might instead handle this at the HTTP server level,
|
||||
serving files or proxying to application servers based on location. The
|
||||
API and admin apps would each be deployed with a separate WSGI server,
|
||||
and the static files would be served directly by the HTTP server.
|
||||
|
||||
.. autoclass:: DispatcherMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class DispatcherMiddleware:
|
||||
"""Combine multiple applications as a single WSGI application.
|
||||
Requests are dispatched to an application based on the path it is
|
||||
mounted under.
|
||||
|
||||
:param app: The WSGI application to dispatch to if the request
|
||||
doesn't match a mounted path.
|
||||
:param mounts: Maps path prefixes to applications for dispatching.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
mounts: dict[str, WSGIApplication] | None = None,
|
||||
) -> None:
|
||||
self.app = app
|
||||
self.mounts = mounts or {}
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
script = environ.get("PATH_INFO", "")
|
||||
path_info = ""
|
||||
|
||||
while "/" in script:
|
||||
if script in self.mounts:
|
||||
app = self.mounts[script]
|
||||
break
|
||||
|
||||
script, last_item = script.rsplit("/", 1)
|
||||
path_info = f"/{last_item}{path_info}"
|
||||
else:
|
||||
app = self.mounts.get(script, self.app)
|
||||
|
||||
original_script_name = environ.get("SCRIPT_NAME", "")
|
||||
environ["SCRIPT_NAME"] = original_script_name + script
|
||||
environ["PATH_INFO"] = path_info
|
||||
return app(environ, start_response)
|
||||
236
lib/python3.11/site-packages/werkzeug/middleware/http_proxy.py
Normal file
236
lib/python3.11/site-packages/werkzeug/middleware/http_proxy.py
Normal file
@ -0,0 +1,236 @@
|
||||
"""
|
||||
Basic HTTP Proxy
|
||||
================
|
||||
|
||||
.. autoclass:: ProxyMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from http import client
|
||||
from urllib.parse import quote
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
from ..datastructures import EnvironHeaders
|
||||
from ..http import is_hop_by_hop_header
|
||||
from ..wsgi import get_input_stream
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class ProxyMiddleware:
|
||||
"""Proxy requests under a path to an external server, routing other
|
||||
requests to the app.
|
||||
|
||||
This middleware can only proxy HTTP requests, as HTTP is the only
|
||||
protocol handled by the WSGI server. Other protocols, such as
|
||||
WebSocket requests, cannot be proxied at this layer. This should
|
||||
only be used for development, in production a real proxy server
|
||||
should be used.
|
||||
|
||||
The middleware takes a dict mapping a path prefix to a dict
|
||||
describing the host to be proxied to::
|
||||
|
||||
app = ProxyMiddleware(app, {
|
||||
"/static/": {
|
||||
"target": "http://127.0.0.1:5001/",
|
||||
}
|
||||
})
|
||||
|
||||
Each host has the following options:
|
||||
|
||||
``target``:
|
||||
The target URL to dispatch to. This is required.
|
||||
``remove_prefix``:
|
||||
Whether to remove the prefix from the URL before dispatching it
|
||||
to the target. The default is ``False``.
|
||||
``host``:
|
||||
``"<auto>"`` (default):
|
||||
The host header is automatically rewritten to the URL of the
|
||||
target.
|
||||
``None``:
|
||||
The host header is unmodified from the client request.
|
||||
Any other value:
|
||||
The host header is overwritten with the value.
|
||||
``headers``:
|
||||
A dictionary of headers to be sent with the request to the
|
||||
target. The default is ``{}``.
|
||||
``ssl_context``:
|
||||
A :class:`ssl.SSLContext` defining how to verify requests if the
|
||||
target is HTTPS. The default is ``None``.
|
||||
|
||||
In the example above, everything under ``"/static/"`` is proxied to
|
||||
the server on port 5001. The host header is rewritten to the target,
|
||||
and the ``"/static/"`` prefix is removed from the URLs.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param targets: Proxy target configurations. See description above.
|
||||
:param chunk_size: Size of chunks to read from input stream and
|
||||
write to target.
|
||||
:param timeout: Seconds before an operation to a target fails.
|
||||
|
||||
.. versionadded:: 0.14
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
targets: t.Mapping[str, dict[str, t.Any]],
|
||||
chunk_size: int = 2 << 13,
|
||||
timeout: int = 10,
|
||||
) -> None:
|
||||
def _set_defaults(opts: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
opts.setdefault("remove_prefix", False)
|
||||
opts.setdefault("host", "<auto>")
|
||||
opts.setdefault("headers", {})
|
||||
opts.setdefault("ssl_context", None)
|
||||
return opts
|
||||
|
||||
self.app = app
|
||||
self.targets = {
|
||||
f"/{k.strip('/')}/": _set_defaults(v) for k, v in targets.items()
|
||||
}
|
||||
self.chunk_size = chunk_size
|
||||
self.timeout = timeout
|
||||
|
||||
def proxy_to(
|
||||
self, opts: dict[str, t.Any], path: str, prefix: str
|
||||
) -> WSGIApplication:
|
||||
target = urlsplit(opts["target"])
|
||||
# socket can handle unicode host, but header must be ascii
|
||||
host = target.hostname.encode("idna").decode("ascii")
|
||||
|
||||
def application(
|
||||
environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
headers = list(EnvironHeaders(environ).items())
|
||||
headers[:] = [
|
||||
(k, v)
|
||||
for k, v in headers
|
||||
if not is_hop_by_hop_header(k)
|
||||
and k.lower() not in ("content-length", "host")
|
||||
]
|
||||
headers.append(("Connection", "close"))
|
||||
|
||||
if opts["host"] == "<auto>":
|
||||
headers.append(("Host", host))
|
||||
elif opts["host"] is None:
|
||||
headers.append(("Host", environ["HTTP_HOST"]))
|
||||
else:
|
||||
headers.append(("Host", opts["host"]))
|
||||
|
||||
headers.extend(opts["headers"].items())
|
||||
remote_path = path
|
||||
|
||||
if opts["remove_prefix"]:
|
||||
remote_path = remote_path[len(prefix) :].lstrip("/")
|
||||
remote_path = f"{target.path.rstrip('/')}/{remote_path}"
|
||||
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
chunked = False
|
||||
|
||||
if content_length not in ("", None):
|
||||
headers.append(("Content-Length", content_length)) # type: ignore
|
||||
elif content_length is not None:
|
||||
headers.append(("Transfer-Encoding", "chunked"))
|
||||
chunked = True
|
||||
|
||||
try:
|
||||
if target.scheme == "http":
|
||||
con = client.HTTPConnection(
|
||||
host, target.port or 80, timeout=self.timeout
|
||||
)
|
||||
elif target.scheme == "https":
|
||||
con = client.HTTPSConnection(
|
||||
host,
|
||||
target.port or 443,
|
||||
timeout=self.timeout,
|
||||
context=opts["ssl_context"],
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Target scheme must be 'http' or 'https', got"
|
||||
f" {target.scheme!r}."
|
||||
)
|
||||
|
||||
con.connect()
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
# as well as percent for things that are already quoted
|
||||
remote_url = quote(remote_path, safe="!$&'()*+,/:;=@%")
|
||||
querystring = environ["QUERY_STRING"]
|
||||
|
||||
if querystring:
|
||||
remote_url = f"{remote_url}?{querystring}"
|
||||
|
||||
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
|
||||
|
||||
for k, v in headers:
|
||||
if k.lower() == "connection":
|
||||
v = "close"
|
||||
|
||||
con.putheader(k, v)
|
||||
|
||||
con.endheaders()
|
||||
stream = get_input_stream(environ)
|
||||
|
||||
while True:
|
||||
data = stream.read(self.chunk_size)
|
||||
|
||||
if not data:
|
||||
break
|
||||
|
||||
if chunked:
|
||||
con.send(b"%x\r\n%s\r\n" % (len(data), data))
|
||||
else:
|
||||
con.send(data)
|
||||
|
||||
resp = con.getresponse()
|
||||
except OSError:
|
||||
from ..exceptions import BadGateway
|
||||
|
||||
return BadGateway()(environ, start_response)
|
||||
|
||||
start_response(
|
||||
f"{resp.status} {resp.reason}",
|
||||
[
|
||||
(k.title(), v)
|
||||
for k, v in resp.getheaders()
|
||||
if not is_hop_by_hop_header(k)
|
||||
],
|
||||
)
|
||||
|
||||
def read() -> t.Iterator[bytes]:
|
||||
while True:
|
||||
try:
|
||||
data = resp.read(self.chunk_size)
|
||||
except OSError:
|
||||
break
|
||||
|
||||
if not data:
|
||||
break
|
||||
|
||||
yield data
|
||||
|
||||
return read()
|
||||
|
||||
return application
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
path = environ["PATH_INFO"]
|
||||
app = self.app
|
||||
|
||||
for prefix, opts in self.targets.items():
|
||||
if path.startswith(prefix):
|
||||
app = self.proxy_to(opts, path, prefix)
|
||||
break
|
||||
|
||||
return app(environ, start_response)
|
||||
439
lib/python3.11/site-packages/werkzeug/middleware/lint.py
Normal file
439
lib/python3.11/site-packages/werkzeug/middleware/lint.py
Normal file
@ -0,0 +1,439 @@
|
||||
"""
|
||||
WSGI Protocol Linter
|
||||
====================
|
||||
|
||||
This module provides a middleware that performs sanity checks on the
|
||||
behavior of the WSGI server and application. It checks that the
|
||||
:pep:`3333` WSGI spec is properly implemented. It also warns on some
|
||||
common HTTP errors such as non-empty responses for 304 status codes.
|
||||
|
||||
.. autoclass:: LintMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from types import TracebackType
|
||||
from urllib.parse import urlparse
|
||||
from warnings import warn
|
||||
|
||||
from ..datastructures import Headers
|
||||
from ..http import is_entity_header
|
||||
from ..wsgi import FileWrapper
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class WSGIWarning(Warning):
|
||||
"""Warning class for WSGI warnings."""
|
||||
|
||||
|
||||
class HTTPWarning(Warning):
|
||||
"""Warning class for HTTP warnings."""
|
||||
|
||||
|
||||
def check_type(context: str, obj: object, need: type = str) -> None:
|
||||
if type(obj) is not need:
|
||||
warn(
|
||||
f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
class InputStream:
|
||||
def __init__(self, stream: t.IO[bytes]) -> None:
|
||||
self._stream = stream
|
||||
|
||||
def read(self, *args: t.Any) -> bytes:
|
||||
if len(args) == 0:
|
||||
warn(
|
||||
"WSGI does not guarantee an EOF marker on the input stream, thus making"
|
||||
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
|
||||
" return from this call.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif len(args) != 1:
|
||||
warn(
|
||||
"Too many parameters passed to 'wsgi.input.read()'.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self._stream.read(*args)
|
||||
|
||||
def readline(self, *args: t.Any) -> bytes:
|
||||
if len(args) == 0:
|
||||
warn(
|
||||
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
|
||||
" 'wsgi.input.read()' instead.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif len(args) == 1:
|
||||
warn(
|
||||
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
|
||||
" support this, although it's available on all major servers.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
|
||||
return self._stream.readline(*args)
|
||||
|
||||
def __iter__(self) -> t.Iterator[bytes]:
|
||||
try:
|
||||
return iter(self._stream)
|
||||
except TypeError:
|
||||
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
|
||||
return iter(())
|
||||
|
||||
def close(self) -> None:
|
||||
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class ErrorStream:
|
||||
def __init__(self, stream: t.IO[str]) -> None:
|
||||
self._stream = stream
|
||||
|
||||
def write(self, s: str) -> None:
|
||||
check_type("wsgi.error.write()", s, str)
|
||||
self._stream.write(s)
|
||||
|
||||
def flush(self) -> None:
|
||||
self._stream.flush()
|
||||
|
||||
def writelines(self, seq: t.Iterable[str]) -> None:
|
||||
for line in seq:
|
||||
self.write(line)
|
||||
|
||||
def close(self) -> None:
|
||||
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class GuardedWrite:
|
||||
def __init__(self, write: t.Callable[[bytes], object], chunks: list[int]) -> None:
|
||||
self._write = write
|
||||
self._chunks = chunks
|
||||
|
||||
def __call__(self, s: bytes) -> None:
|
||||
check_type("write()", s, bytes)
|
||||
self._write(s)
|
||||
self._chunks.append(len(s))
|
||||
|
||||
|
||||
class GuardedIterator:
|
||||
def __init__(
|
||||
self,
|
||||
iterator: t.Iterable[bytes],
|
||||
headers_set: tuple[int, Headers],
|
||||
chunks: list[int],
|
||||
) -> None:
|
||||
self._iterator = iterator
|
||||
self._next = iter(iterator).__next__
|
||||
self.closed = False
|
||||
self.headers_set = headers_set
|
||||
self.chunks = chunks
|
||||
|
||||
def __iter__(self) -> GuardedIterator:
|
||||
return self
|
||||
|
||||
def __next__(self) -> bytes:
|
||||
if self.closed:
|
||||
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
|
||||
|
||||
rv = self._next()
|
||||
|
||||
if not self.headers_set:
|
||||
warn(
|
||||
"The application returned before it started the response.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
check_type("application iterator items", rv, bytes)
|
||||
self.chunks.append(len(rv))
|
||||
return rv
|
||||
|
||||
def close(self) -> None:
|
||||
self.closed = True
|
||||
|
||||
if hasattr(self._iterator, "close"):
|
||||
self._iterator.close()
|
||||
|
||||
if self.headers_set:
|
||||
status_code, headers = self.headers_set
|
||||
bytes_sent = sum(self.chunks)
|
||||
content_length = headers.get("content-length", type=int)
|
||||
|
||||
if status_code == 304:
|
||||
for key, _value in headers:
|
||||
key = key.lower()
|
||||
if key not in ("expires", "content-location") and is_entity_header(
|
||||
key
|
||||
):
|
||||
warn(
|
||||
f"Entity header {key!r} found in 304 response.",
|
||||
HTTPWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if bytes_sent:
|
||||
warn(
|
||||
"304 responses must not have a body.",
|
||||
HTTPWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif 100 <= status_code < 200 or status_code == 204:
|
||||
if content_length != 0:
|
||||
warn(
|
||||
f"{status_code} responses must have an empty content length.",
|
||||
HTTPWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if bytes_sent:
|
||||
warn(
|
||||
f"{status_code} responses must not have a body.",
|
||||
HTTPWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif content_length is not None and content_length != bytes_sent:
|
||||
warn(
|
||||
"Content-Length and the number of bytes sent to the"
|
||||
" client do not match.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self.closed:
|
||||
try:
|
||||
warn(
|
||||
"Iterator was garbage collected before it was closed.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class LintMiddleware:
|
||||
"""Warns about common errors in the WSGI and HTTP behavior of the
|
||||
server and wrapped application. Some of the issues it checks are:
|
||||
|
||||
- invalid status codes
|
||||
- non-bytes sent to the WSGI server
|
||||
- strings returned from the WSGI application
|
||||
- non-empty conditional responses
|
||||
- unquoted etags
|
||||
- relative URLs in the Location header
|
||||
- unsafe calls to wsgi.input
|
||||
- unclosed iterators
|
||||
|
||||
Error information is emitted using the :mod:`warnings` module.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.lint import LintMiddleware
|
||||
app = LintMiddleware(app)
|
||||
"""
|
||||
|
||||
def __init__(self, app: WSGIApplication) -> None:
|
||||
self.app = app
|
||||
|
||||
def check_environ(self, environ: WSGIEnvironment) -> None:
|
||||
if type(environ) is not dict: # noqa: E721
|
||||
warn(
|
||||
"WSGI environment is not a standard Python dict.",
|
||||
WSGIWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
for key in (
|
||||
"REQUEST_METHOD",
|
||||
"SERVER_NAME",
|
||||
"SERVER_PORT",
|
||||
"wsgi.version",
|
||||
"wsgi.input",
|
||||
"wsgi.errors",
|
||||
"wsgi.multithread",
|
||||
"wsgi.multiprocess",
|
||||
"wsgi.run_once",
|
||||
):
|
||||
if key not in environ:
|
||||
warn(
|
||||
f"Required environment key {key!r} not found",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
if environ["wsgi.version"] != (1, 0):
|
||||
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
|
||||
|
||||
script_name = environ.get("SCRIPT_NAME", "")
|
||||
path_info = environ.get("PATH_INFO", "")
|
||||
|
||||
if script_name and script_name[0] != "/":
|
||||
warn(
|
||||
f"'SCRIPT_NAME' does not start with a slash: {script_name!r}",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
if path_info and path_info[0] != "/":
|
||||
warn(
|
||||
f"'PATH_INFO' does not start with a slash: {path_info!r}",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
def check_start_response(
|
||||
self,
|
||||
status: str,
|
||||
headers: list[tuple[str, str]],
|
||||
exc_info: None | (tuple[type[BaseException], BaseException, TracebackType]),
|
||||
) -> tuple[int, Headers]:
|
||||
check_type("status", status, str)
|
||||
status_code_str = status.split(None, 1)[0]
|
||||
|
||||
if len(status_code_str) != 3 or not status_code_str.isdecimal():
|
||||
warn("Status code must be three digits.", WSGIWarning, stacklevel=3)
|
||||
|
||||
if len(status) < 4 or status[3] != " ":
|
||||
warn(
|
||||
f"Invalid value for status {status!r}. Valid status strings are three"
|
||||
" digits, a space and a status explanation.",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
status_code = int(status_code_str)
|
||||
|
||||
if status_code < 100:
|
||||
warn("Status code < 100 detected.", WSGIWarning, stacklevel=3)
|
||||
|
||||
if type(headers) is not list: # noqa: E721
|
||||
warn("Header list is not a list.", WSGIWarning, stacklevel=3)
|
||||
|
||||
for item in headers:
|
||||
if type(item) is not tuple or len(item) != 2:
|
||||
warn("Header items must be 2-item tuples.", WSGIWarning, stacklevel=3)
|
||||
name, value = item
|
||||
if type(name) is not str or type(value) is not str: # noqa: E721
|
||||
warn(
|
||||
"Header keys and values must be strings.", WSGIWarning, stacklevel=3
|
||||
)
|
||||
if name.lower() == "status":
|
||||
warn(
|
||||
"The status header is not supported due to"
|
||||
" conflicts with the CGI spec.",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
if exc_info is not None and not isinstance(exc_info, tuple):
|
||||
warn("Invalid value for exc_info.", WSGIWarning, stacklevel=3)
|
||||
|
||||
headers_obj = Headers(headers)
|
||||
self.check_headers(headers_obj)
|
||||
|
||||
return status_code, headers_obj
|
||||
|
||||
def check_headers(self, headers: Headers) -> None:
|
||||
etag = headers.get("etag")
|
||||
|
||||
if etag is not None:
|
||||
if etag.startswith(("W/", "w/")):
|
||||
if etag.startswith("w/"):
|
||||
warn(
|
||||
"Weak etag indicator should be upper case.",
|
||||
HTTPWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
etag = etag[2:]
|
||||
|
||||
if not (etag[:1] == etag[-1:] == '"'):
|
||||
warn("Unquoted etag emitted.", HTTPWarning, stacklevel=4)
|
||||
|
||||
location = headers.get("location")
|
||||
|
||||
if location is not None:
|
||||
if not urlparse(location).netloc:
|
||||
warn(
|
||||
"Absolute URLs required for location header.",
|
||||
HTTPWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
def check_iterator(self, app_iter: t.Iterable[bytes]) -> None:
|
||||
if isinstance(app_iter, str):
|
||||
warn(
|
||||
"The application returned a string. The response will send one"
|
||||
" character at a time to the client, which will kill performance."
|
||||
" Return a list or iterable instead.",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Iterable[bytes]:
|
||||
if len(args) != 2:
|
||||
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
|
||||
|
||||
if kwargs:
|
||||
warn(
|
||||
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
|
||||
)
|
||||
|
||||
environ: WSGIEnvironment = args[0]
|
||||
start_response: StartResponse = args[1]
|
||||
|
||||
self.check_environ(environ)
|
||||
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
|
||||
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
|
||||
|
||||
# Hook our own file wrapper in so that applications will always
|
||||
# iterate to the end and we can check the content length.
|
||||
environ["wsgi.file_wrapper"] = FileWrapper
|
||||
|
||||
headers_set: list[t.Any] = []
|
||||
chunks: list[int] = []
|
||||
|
||||
def checking_start_response(
|
||||
*args: t.Any, **kwargs: t.Any
|
||||
) -> t.Callable[[bytes], None]:
|
||||
if len(args) not in {2, 3}:
|
||||
warn(
|
||||
f"Invalid number of arguments: {len(args)}, expected 2 or 3.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
warn(
|
||||
"'start_response' does not take keyword arguments.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
status: str = args[0]
|
||||
headers: list[tuple[str, str]] = args[1]
|
||||
exc_info: (
|
||||
None | (tuple[type[BaseException], BaseException, TracebackType])
|
||||
) = args[2] if len(args) == 3 else None
|
||||
|
||||
headers_set[:] = self.check_start_response(status, headers, exc_info)
|
||||
return GuardedWrite(start_response(status, headers, exc_info), chunks)
|
||||
|
||||
app_iter = self.app(environ, t.cast("StartResponse", checking_start_response))
|
||||
self.check_iterator(app_iter)
|
||||
return GuardedIterator(
|
||||
app_iter, t.cast(tuple[int, Headers], headers_set), chunks
|
||||
)
|
||||
155
lib/python3.11/site-packages/werkzeug/middleware/profiler.py
Normal file
155
lib/python3.11/site-packages/werkzeug/middleware/profiler.py
Normal file
@ -0,0 +1,155 @@
|
||||
"""
|
||||
Application Profiler
|
||||
====================
|
||||
|
||||
This module provides a middleware that profiles each request with the
|
||||
:mod:`cProfile` module. This can help identify bottlenecks in your code
|
||||
that may be slowing down your application.
|
||||
|
||||
.. autoclass:: ProfilerMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import typing as t
|
||||
from pstats import Stats
|
||||
|
||||
try:
|
||||
from cProfile import Profile
|
||||
except ImportError:
|
||||
from profile import Profile # type: ignore
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class ProfilerMiddleware:
|
||||
"""Wrap a WSGI application and profile the execution of each
|
||||
request. Responses are buffered so that timings are more exact.
|
||||
|
||||
If ``stream`` is given, :class:`pstats.Stats` are written to it
|
||||
after each request. If ``profile_dir`` is given, :mod:`cProfile`
|
||||
data files are saved to that directory, one file per request.
|
||||
|
||||
The filename can be customized by passing ``filename_format``. If
|
||||
it is a string, it will be formatted using :meth:`str.format` with
|
||||
the following fields available:
|
||||
|
||||
- ``{method}`` - The request method; GET, POST, etc.
|
||||
- ``{path}`` - The request path or 'root' should one not exist.
|
||||
- ``{elapsed}`` - The elapsed time of the request in milliseconds.
|
||||
- ``{time}`` - The time of the request.
|
||||
|
||||
If it is a callable, it will be called with the WSGI ``environ`` and
|
||||
be expected to return a filename string. The ``environ`` dictionary
|
||||
will also have the ``"werkzeug.profiler"`` key populated with a
|
||||
dictionary containing the following fields (more may be added in the
|
||||
future):
|
||||
- ``{elapsed}`` - The elapsed time of the request in milliseconds.
|
||||
- ``{time}`` - The time of the request.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param stream: Write stats to this stream. Disable with ``None``.
|
||||
:param sort_by: A tuple of columns to sort stats by. See
|
||||
:meth:`pstats.Stats.sort_stats`.
|
||||
:param restrictions: A tuple of restrictions to filter stats by. See
|
||||
:meth:`pstats.Stats.print_stats`.
|
||||
:param profile_dir: Save profile data files to this directory.
|
||||
:param filename_format: Format string for profile data file names,
|
||||
or a callable returning a name. See explanation above.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.profiler import ProfilerMiddleware
|
||||
app = ProfilerMiddleware(app)
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Added the ``"werkzeug.profiler"`` key to the ``filename_format(environ)``
|
||||
parameter with the ``elapsed`` and ``time`` fields.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Stats are written even if ``profile_dir`` is given, and can be
|
||||
disable by passing ``stream=None``.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
Added ``filename_format``.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Added ``restrictions`` and ``profile_dir``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
stream: t.IO[str] | None = sys.stdout,
|
||||
sort_by: t.Iterable[str] = ("time", "calls"),
|
||||
restrictions: t.Iterable[str | int | float] = (),
|
||||
profile_dir: str | None = None,
|
||||
filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
|
||||
) -> None:
|
||||
self._app = app
|
||||
self._stream = stream
|
||||
self._sort_by = sort_by
|
||||
self._restrictions = restrictions
|
||||
self._profile_dir = profile_dir
|
||||
self._filename_format = filename_format
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
response_body: list[bytes] = []
|
||||
|
||||
def catching_start_response(status, headers, exc_info=None): # type: ignore
|
||||
start_response(status, headers, exc_info)
|
||||
return response_body.append
|
||||
|
||||
def runapp() -> None:
|
||||
app_iter = self._app(
|
||||
environ, t.cast("StartResponse", catching_start_response)
|
||||
)
|
||||
response_body.extend(app_iter)
|
||||
|
||||
if hasattr(app_iter, "close"):
|
||||
app_iter.close()
|
||||
|
||||
profile = Profile()
|
||||
start = time.time()
|
||||
profile.runcall(runapp)
|
||||
body = b"".join(response_body)
|
||||
elapsed = time.time() - start
|
||||
|
||||
if self._profile_dir is not None:
|
||||
if callable(self._filename_format):
|
||||
environ["werkzeug.profiler"] = {
|
||||
"elapsed": elapsed * 1000.0,
|
||||
"time": time.time(),
|
||||
}
|
||||
filename = self._filename_format(environ)
|
||||
else:
|
||||
filename = self._filename_format.format(
|
||||
method=environ["REQUEST_METHOD"],
|
||||
path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root",
|
||||
elapsed=elapsed * 1000.0,
|
||||
time=time.time(),
|
||||
)
|
||||
filename = os.path.join(self._profile_dir, filename)
|
||||
profile.dump_stats(filename)
|
||||
|
||||
if self._stream is not None:
|
||||
stats = Stats(profile, stream=self._stream)
|
||||
stats.sort_stats(*self._sort_by)
|
||||
print("-" * 80, file=self._stream)
|
||||
path_info = environ.get("PATH_INFO", "")
|
||||
print(f"PATH: {path_info!r}", file=self._stream)
|
||||
stats.print_stats(*self._restrictions)
|
||||
print(f"{'-' * 80}\n", file=self._stream)
|
||||
|
||||
return [body]
|
||||
183
lib/python3.11/site-packages/werkzeug/middleware/proxy_fix.py
Normal file
183
lib/python3.11/site-packages/werkzeug/middleware/proxy_fix.py
Normal file
@ -0,0 +1,183 @@
|
||||
"""
|
||||
X-Forwarded-For Proxy Fix
|
||||
=========================
|
||||
|
||||
This module provides a middleware that adjusts the WSGI environ based on
|
||||
``X-Forwarded-`` headers that proxies in front of an application may
|
||||
set.
|
||||
|
||||
When an application is running behind a proxy server, WSGI may see the
|
||||
request as coming from that server rather than the real client. Proxies
|
||||
set various headers to track where the request actually came from.
|
||||
|
||||
This middleware should only be used if the application is actually
|
||||
behind such a proxy, and should be configured with the number of proxies
|
||||
that are chained in front of it. Not all proxies set all the headers.
|
||||
Since incoming headers can be faked, you must set how many proxies are
|
||||
setting each header so the middleware knows what to trust.
|
||||
|
||||
.. autoclass:: ProxyFix
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ..http import parse_list_header
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class ProxyFix:
|
||||
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
|
||||
front of the application may set.
|
||||
|
||||
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
|
||||
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
|
||||
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
|
||||
``SERVER_PORT``.
|
||||
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
|
||||
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
|
||||
|
||||
You must tell the middleware how many proxies set each header so it
|
||||
knows what values to trust. It is a security issue to trust values
|
||||
that came from the client rather than a proxy.
|
||||
|
||||
The original values of the headers are stored in the WSGI
|
||||
environ as ``werkzeug.proxy_fix.orig``, a dict.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param x_for: Number of values to trust for ``X-Forwarded-For``.
|
||||
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
|
||||
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
|
||||
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
|
||||
:param x_prefix: Number of values to trust for
|
||||
``X-Forwarded-Prefix``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
# App is behind one proxy that sets the -For and -Host headers.
|
||||
app = ProxyFix(app, x_for=1, x_host=1)
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
The ``num_proxies`` argument and attribute; the ``get_remote_addr`` method; and
|
||||
the environ keys ``orig_remote_addr``, ``orig_wsgi_url_scheme``, and
|
||||
``orig_http_host`` were removed.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
All headers support multiple values. Each header is configured with a separate
|
||||
number of trusted proxies.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Original WSGI environ values are stored in the ``werkzeug.proxy_fix.orig`` dict.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
|
||||
``SERVER_NAME`` and ``SERVER_PORT``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
x_for: int = 1,
|
||||
x_proto: int = 1,
|
||||
x_host: int = 0,
|
||||
x_port: int = 0,
|
||||
x_prefix: int = 0,
|
||||
) -> None:
|
||||
self.app = app
|
||||
self.x_for = x_for
|
||||
self.x_proto = x_proto
|
||||
self.x_host = x_host
|
||||
self.x_port = x_port
|
||||
self.x_prefix = x_prefix
|
||||
|
||||
def _get_real_value(self, trusted: int, value: str | None) -> str | None:
|
||||
"""Get the real value from a list header based on the configured
|
||||
number of trusted proxies.
|
||||
|
||||
:param trusted: Number of values to trust in the header.
|
||||
:param value: Comma separated list header value to parse.
|
||||
:return: The real value, or ``None`` if there are fewer values
|
||||
than the number of trusted proxies.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
Renamed from ``_get_trusted_comma``.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
"""
|
||||
if not (trusted and value):
|
||||
return None
|
||||
values = parse_list_header(value)
|
||||
if len(values) >= trusted:
|
||||
return values[-trusted]
|
||||
return None
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
"""Modify the WSGI environ based on the various ``Forwarded``
|
||||
headers before calling the wrapped application. Store the
|
||||
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
|
||||
"""
|
||||
environ_get = environ.get
|
||||
orig_remote_addr = environ_get("REMOTE_ADDR")
|
||||
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
|
||||
orig_http_host = environ_get("HTTP_HOST")
|
||||
environ.update(
|
||||
{
|
||||
"werkzeug.proxy_fix.orig": {
|
||||
"REMOTE_ADDR": orig_remote_addr,
|
||||
"wsgi.url_scheme": orig_wsgi_url_scheme,
|
||||
"HTTP_HOST": orig_http_host,
|
||||
"SERVER_NAME": environ_get("SERVER_NAME"),
|
||||
"SERVER_PORT": environ_get("SERVER_PORT"),
|
||||
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
|
||||
if x_for:
|
||||
environ["REMOTE_ADDR"] = x_for
|
||||
|
||||
x_proto = self._get_real_value(
|
||||
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
|
||||
)
|
||||
if x_proto:
|
||||
environ["wsgi.url_scheme"] = x_proto
|
||||
|
||||
x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
|
||||
if x_host:
|
||||
environ["HTTP_HOST"] = environ["SERVER_NAME"] = x_host
|
||||
# "]" to check for IPv6 address without port
|
||||
if ":" in x_host and not x_host.endswith("]"):
|
||||
environ["SERVER_NAME"], environ["SERVER_PORT"] = x_host.rsplit(":", 1)
|
||||
|
||||
x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
|
||||
if x_port:
|
||||
host = environ.get("HTTP_HOST")
|
||||
if host:
|
||||
# "]" to check for IPv6 address without port
|
||||
if ":" in host and not host.endswith("]"):
|
||||
host = host.rsplit(":", 1)[0]
|
||||
environ["HTTP_HOST"] = f"{host}:{x_port}"
|
||||
environ["SERVER_PORT"] = x_port
|
||||
|
||||
x_prefix = self._get_real_value(
|
||||
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
|
||||
)
|
||||
if x_prefix:
|
||||
environ["SCRIPT_NAME"] = x_prefix
|
||||
|
||||
return self.app(environ, start_response)
|
||||
283
lib/python3.11/site-packages/werkzeug/middleware/shared_data.py
Normal file
283
lib/python3.11/site-packages/werkzeug/middleware/shared_data.py
Normal file
@ -0,0 +1,283 @@
|
||||
"""
|
||||
Serve Shared Static Files
|
||||
=========================
|
||||
|
||||
.. autoclass:: SharedDataMiddleware
|
||||
:members: is_allowed
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import importlib.util
|
||||
import mimetypes
|
||||
import os
|
||||
import posixpath
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from io import BytesIO
|
||||
from time import time
|
||||
from zlib import adler32
|
||||
|
||||
from ..http import http_date
|
||||
from ..http import is_resource_modified
|
||||
from ..security import safe_join
|
||||
from ..utils import get_content_type
|
||||
from ..wsgi import get_path_info
|
||||
from ..wsgi import wrap_file
|
||||
|
||||
_TOpener = t.Callable[[], tuple[t.IO[bytes], datetime, int]]
|
||||
_TLoader = t.Callable[[t.Optional[str]], tuple[t.Optional[str], t.Optional[_TOpener]]]
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class SharedDataMiddleware:
|
||||
"""A WSGI middleware which provides static content for development
|
||||
environments or simple server setups. Its usage is quite simple::
|
||||
|
||||
import os
|
||||
from werkzeug.middleware.shared_data import SharedDataMiddleware
|
||||
|
||||
app = SharedDataMiddleware(app, {
|
||||
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
|
||||
})
|
||||
|
||||
The contents of the folder ``./shared`` will now be available on
|
||||
``http://example.com/shared/``. This is pretty useful during development
|
||||
because a standalone media server is not required. Files can also be
|
||||
mounted on the root folder and still continue to use the application because
|
||||
the shared data middleware forwards all unhandled requests to the
|
||||
application, even if the requests are below one of the shared folders.
|
||||
|
||||
If `pkg_resources` is available you can also tell the middleware to serve
|
||||
files from package data::
|
||||
|
||||
app = SharedDataMiddleware(app, {
|
||||
'/static': ('myapplication', 'static')
|
||||
})
|
||||
|
||||
This will then serve the ``static`` folder in the `myapplication`
|
||||
Python package.
|
||||
|
||||
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
|
||||
rules for files that are not accessible from the web. If `cache` is set to
|
||||
`False` no caching headers are sent.
|
||||
|
||||
Currently the middleware does not support non-ASCII filenames. If the
|
||||
encoding on the file system happens to match the encoding of the URI it may
|
||||
work but this could also be by accident. We strongly suggest using ASCII
|
||||
only file names for static files.
|
||||
|
||||
The middleware will guess the mimetype using the Python `mimetype`
|
||||
module. If it's unable to figure out the charset it will fall back
|
||||
to `fallback_mimetype`.
|
||||
|
||||
:param app: the application to wrap. If you don't want to wrap an
|
||||
application you can pass it :exc:`NotFound`.
|
||||
:param exports: a list or dict of exported files and folders.
|
||||
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
|
||||
:param cache: enable or disable caching headers.
|
||||
:param cache_timeout: the cache timeout in seconds for the headers.
|
||||
:param fallback_mimetype: The fallback mimetype for unknown files.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
The default ``fallback_mimetype`` is
|
||||
``application/octet-stream``. If a filename looks like a text
|
||||
mimetype, the ``utf-8`` charset is added to it.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
Added ``fallback_mimetype``.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
Added ``cache_timeout``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: WSGIApplication,
|
||||
exports: (
|
||||
cabc.Mapping[str, str | tuple[str, str]]
|
||||
| t.Iterable[tuple[str, str | tuple[str, str]]]
|
||||
),
|
||||
disallow: None = None,
|
||||
cache: bool = True,
|
||||
cache_timeout: int = 60 * 60 * 12,
|
||||
fallback_mimetype: str = "application/octet-stream",
|
||||
) -> None:
|
||||
self.app = app
|
||||
self.exports: list[tuple[str, _TLoader]] = []
|
||||
self.cache = cache
|
||||
self.cache_timeout = cache_timeout
|
||||
|
||||
if isinstance(exports, cabc.Mapping):
|
||||
exports = exports.items()
|
||||
|
||||
for key, value in exports:
|
||||
if isinstance(value, tuple):
|
||||
loader = self.get_package_loader(*value)
|
||||
elif isinstance(value, str):
|
||||
if os.path.isfile(value):
|
||||
loader = self.get_file_loader(value)
|
||||
else:
|
||||
loader = self.get_directory_loader(value)
|
||||
else:
|
||||
raise TypeError(f"unknown def {value!r}")
|
||||
|
||||
self.exports.append((key, loader))
|
||||
|
||||
if disallow is not None:
|
||||
from fnmatch import fnmatch
|
||||
|
||||
self.is_allowed = lambda x: not fnmatch(x, disallow)
|
||||
|
||||
self.fallback_mimetype = fallback_mimetype
|
||||
|
||||
def is_allowed(self, filename: str) -> bool:
|
||||
"""Subclasses can override this method to disallow the access to
|
||||
certain files. However by providing `disallow` in the constructor
|
||||
this method is overwritten.
|
||||
"""
|
||||
return True
|
||||
|
||||
def _opener(self, filename: str) -> _TOpener:
|
||||
return lambda: (
|
||||
open(filename, "rb"),
|
||||
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
|
||||
int(os.path.getsize(filename)),
|
||||
)
|
||||
|
||||
def get_file_loader(self, filename: str) -> _TLoader:
|
||||
return lambda x: (os.path.basename(filename), self._opener(filename))
|
||||
|
||||
def get_package_loader(self, package: str, package_path: str) -> _TLoader:
|
||||
load_time = datetime.now(timezone.utc)
|
||||
spec = importlib.util.find_spec(package)
|
||||
reader = spec.loader.get_resource_reader(package) # type: ignore[union-attr]
|
||||
|
||||
def loader(
|
||||
path: str | None,
|
||||
) -> tuple[str | None, _TOpener | None]:
|
||||
if path is None:
|
||||
return None, None
|
||||
|
||||
path = safe_join(package_path, path)
|
||||
|
||||
if path is None:
|
||||
return None, None
|
||||
|
||||
basename = posixpath.basename(path)
|
||||
|
||||
try:
|
||||
resource = reader.open_resource(path)
|
||||
except OSError:
|
||||
return None, None
|
||||
|
||||
if isinstance(resource, BytesIO):
|
||||
return (
|
||||
basename,
|
||||
lambda: (resource, load_time, len(resource.getvalue())),
|
||||
)
|
||||
|
||||
return (
|
||||
basename,
|
||||
lambda: (
|
||||
resource,
|
||||
datetime.fromtimestamp(
|
||||
os.path.getmtime(resource.name), tz=timezone.utc
|
||||
),
|
||||
os.path.getsize(resource.name),
|
||||
),
|
||||
)
|
||||
|
||||
return loader
|
||||
|
||||
def get_directory_loader(self, directory: str) -> _TLoader:
|
||||
def loader(
|
||||
path: str | None,
|
||||
) -> tuple[str | None, _TOpener | None]:
|
||||
if path is not None:
|
||||
path = safe_join(directory, path)
|
||||
|
||||
if path is None:
|
||||
return None, None
|
||||
else:
|
||||
path = directory
|
||||
|
||||
if os.path.isfile(path):
|
||||
return os.path.basename(path), self._opener(path)
|
||||
|
||||
return None, None
|
||||
|
||||
return loader
|
||||
|
||||
def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
|
||||
fn_str = os.fsencode(real_filename)
|
||||
timestamp = mtime.timestamp()
|
||||
checksum = adler32(fn_str) & 0xFFFFFFFF
|
||||
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
path = get_path_info(environ)
|
||||
file_loader = None
|
||||
|
||||
for search_path, loader in self.exports:
|
||||
if search_path == path:
|
||||
real_filename, file_loader = loader(None)
|
||||
|
||||
if file_loader is not None:
|
||||
break
|
||||
|
||||
if not search_path.endswith("/"):
|
||||
search_path += "/"
|
||||
|
||||
if path.startswith(search_path):
|
||||
real_filename, file_loader = loader(path[len(search_path) :])
|
||||
|
||||
if file_loader is not None:
|
||||
break
|
||||
|
||||
if file_loader is None or not self.is_allowed(real_filename): # type: ignore
|
||||
return self.app(environ, start_response)
|
||||
|
||||
guessed_type = mimetypes.guess_type(real_filename) # type: ignore
|
||||
mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
|
||||
f, mtime, file_size = file_loader()
|
||||
|
||||
headers = [("Date", http_date())]
|
||||
|
||||
if self.cache:
|
||||
timeout = self.cache_timeout
|
||||
etag = self.generate_etag(mtime, file_size, real_filename) # type: ignore
|
||||
headers += [
|
||||
("Etag", f'"{etag}"'),
|
||||
("Cache-Control", f"max-age={timeout}, public"),
|
||||
]
|
||||
|
||||
if not is_resource_modified(environ, etag, last_modified=mtime):
|
||||
f.close()
|
||||
start_response("304 Not Modified", headers)
|
||||
return []
|
||||
|
||||
headers.append(("Expires", http_date(time() + timeout)))
|
||||
else:
|
||||
headers.append(("Cache-Control", "public"))
|
||||
|
||||
headers.extend(
|
||||
(
|
||||
("Content-Type", mime_type),
|
||||
("Content-Length", str(file_size)),
|
||||
("Last-Modified", http_date(mtime)),
|
||||
)
|
||||
)
|
||||
start_response("200 OK", headers)
|
||||
return wrap_file(environ, f)
|
||||
0
lib/python3.11/site-packages/werkzeug/py.typed
Normal file
0
lib/python3.11/site-packages/werkzeug/py.typed
Normal file
134
lib/python3.11/site-packages/werkzeug/routing/__init__.py
Normal file
134
lib/python3.11/site-packages/werkzeug/routing/__init__.py
Normal file
@ -0,0 +1,134 @@
|
||||
"""When it comes to combining multiple controller or view functions
|
||||
(however you want to call them) you need a dispatcher. A simple way
|
||||
would be applying regular expression tests on the ``PATH_INFO`` and
|
||||
calling registered callback functions that return the value then.
|
||||
|
||||
This module implements a much more powerful system than simple regular
|
||||
expression matching because it can also convert values in the URLs and
|
||||
build URLs.
|
||||
|
||||
Here a simple example that creates a URL map for an application with
|
||||
two subdomains (www and kb) and some URL rules:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
m = Map([
|
||||
# Static URLs
|
||||
Rule('/', endpoint='static/index'),
|
||||
Rule('/about', endpoint='static/about'),
|
||||
Rule('/help', endpoint='static/help'),
|
||||
# Knowledge Base
|
||||
Subdomain('kb', [
|
||||
Rule('/', endpoint='kb/index'),
|
||||
Rule('/browse/', endpoint='kb/browse'),
|
||||
Rule('/browse/<int:id>/', endpoint='kb/browse'),
|
||||
Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
|
||||
])
|
||||
], default_subdomain='www')
|
||||
|
||||
If the application doesn't use subdomains it's perfectly fine to not set
|
||||
the default subdomain and not use the `Subdomain` rule factory. The
|
||||
endpoint in the rules can be anything, for example import paths or
|
||||
unique identifiers. The WSGI application can use those endpoints to get the
|
||||
handler for that URL. It doesn't have to be a string at all but it's
|
||||
recommended.
|
||||
|
||||
Now it's possible to create a URL adapter for one of the subdomains and
|
||||
build URLs:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
c = m.bind('example.com')
|
||||
|
||||
c.build("kb/browse", dict(id=42))
|
||||
'http://kb.example.com/browse/42/'
|
||||
|
||||
c.build("kb/browse", dict())
|
||||
'http://kb.example.com/browse/'
|
||||
|
||||
c.build("kb/browse", dict(id=42, page=3))
|
||||
'http://kb.example.com/browse/42/3'
|
||||
|
||||
c.build("static/about")
|
||||
'/about'
|
||||
|
||||
c.build("static/index", force_external=True)
|
||||
'http://www.example.com/'
|
||||
|
||||
c = m.bind('example.com', subdomain='kb')
|
||||
|
||||
c.build("static/about")
|
||||
'http://www.example.com/about'
|
||||
|
||||
The first argument to bind is the server name *without* the subdomain.
|
||||
Per default it will assume that the script is mounted on the root, but
|
||||
often that's not the case so you can provide the real mount point as
|
||||
second argument:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
c = m.bind('example.com', '/applications/example')
|
||||
|
||||
The third argument can be the subdomain, if not given the default
|
||||
subdomain is used. For more details about binding have a look at the
|
||||
documentation of the `MapAdapter`.
|
||||
|
||||
And here is how you can match URLs:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
c = m.bind('example.com')
|
||||
|
||||
c.match("/")
|
||||
('static/index', {})
|
||||
|
||||
c.match("/about")
|
||||
('static/about', {})
|
||||
|
||||
c = m.bind('example.com', '/', 'kb')
|
||||
|
||||
c.match("/")
|
||||
('kb/index', {})
|
||||
|
||||
c.match("/browse/42/23")
|
||||
('kb/browse', {'id': 42, 'page': 23})
|
||||
|
||||
If matching fails you get a ``NotFound`` exception, if the rule thinks
|
||||
it's a good idea to redirect (for example because the URL was defined
|
||||
to have a slash at the end but the request was missing that slash) it
|
||||
will raise a ``RequestRedirect`` exception. Both are subclasses of
|
||||
``HTTPException`` so you can use those errors as responses in the
|
||||
application.
|
||||
|
||||
If matching succeeded but the URL rule was incompatible to the given
|
||||
method (for example there were only rules for ``GET`` and ``HEAD`` but
|
||||
routing tried to match a ``POST`` request) a ``MethodNotAllowed``
|
||||
exception is raised.
|
||||
"""
|
||||
|
||||
from .converters import AnyConverter as AnyConverter
|
||||
from .converters import BaseConverter as BaseConverter
|
||||
from .converters import FloatConverter as FloatConverter
|
||||
from .converters import IntegerConverter as IntegerConverter
|
||||
from .converters import PathConverter as PathConverter
|
||||
from .converters import UnicodeConverter as UnicodeConverter
|
||||
from .converters import UUIDConverter as UUIDConverter
|
||||
from .converters import ValidationError as ValidationError
|
||||
from .exceptions import BuildError as BuildError
|
||||
from .exceptions import NoMatch as NoMatch
|
||||
from .exceptions import RequestAliasRedirect as RequestAliasRedirect
|
||||
from .exceptions import RequestPath as RequestPath
|
||||
from .exceptions import RequestRedirect as RequestRedirect
|
||||
from .exceptions import RoutingException as RoutingException
|
||||
from .exceptions import WebsocketMismatch as WebsocketMismatch
|
||||
from .map import Map as Map
|
||||
from .map import MapAdapter as MapAdapter
|
||||
from .matcher import StateMachineMatcher as StateMachineMatcher
|
||||
from .rules import EndpointPrefix as EndpointPrefix
|
||||
from .rules import parse_converter_args as parse_converter_args
|
||||
from .rules import Rule as Rule
|
||||
from .rules import RuleFactory as RuleFactory
|
||||
from .rules import RuleTemplate as RuleTemplate
|
||||
from .rules import RuleTemplateFactory as RuleTemplateFactory
|
||||
from .rules import Subdomain as Subdomain
|
||||
from .rules import Submount as Submount
|
||||
261
lib/python3.11/site-packages/werkzeug/routing/converters.py
Normal file
261
lib/python3.11/site-packages/werkzeug/routing/converters.py
Normal file
@ -0,0 +1,261 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
import uuid
|
||||
from urllib.parse import quote
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from .map import Map
|
||||
|
||||
|
||||
class ValidationError(ValueError):
|
||||
"""Validation error. If a rule converter raises this exception the rule
|
||||
does not match the current URL and the next URL is tried.
|
||||
"""
|
||||
|
||||
|
||||
class BaseConverter:
|
||||
"""Base class for all converters.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
``part_isolating`` defaults to ``False`` if ``regex`` contains a ``/``.
|
||||
"""
|
||||
|
||||
regex = "[^/]+"
|
||||
weight = 100
|
||||
part_isolating = True
|
||||
|
||||
def __init_subclass__(cls, **kwargs: t.Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
# If the converter isn't inheriting its regex, disable part_isolating by default
|
||||
# if the regex contains a / character.
|
||||
if "regex" in cls.__dict__ and "part_isolating" not in cls.__dict__:
|
||||
cls.part_isolating = "/" not in cls.regex
|
||||
|
||||
def __init__(self, map: Map, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
self.map = map
|
||||
|
||||
def to_python(self, value: str) -> t.Any:
|
||||
return value
|
||||
|
||||
def to_url(self, value: t.Any) -> str:
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
return quote(str(value), safe="!$&'()*+,/:;=@")
|
||||
|
||||
|
||||
class UnicodeConverter(BaseConverter):
|
||||
"""This converter is the default converter and accepts any string but
|
||||
only one path segment. Thus the string can not include a slash.
|
||||
|
||||
This is the default validator.
|
||||
|
||||
Example::
|
||||
|
||||
Rule('/pages/<page>'),
|
||||
Rule('/<string(length=2):lang_code>')
|
||||
|
||||
:param map: the :class:`Map`.
|
||||
:param minlength: the minimum length of the string. Must be greater
|
||||
or equal 1.
|
||||
:param maxlength: the maximum length of the string.
|
||||
:param length: the exact length of the string.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
map: Map,
|
||||
minlength: int = 1,
|
||||
maxlength: int | None = None,
|
||||
length: int | None = None,
|
||||
) -> None:
|
||||
super().__init__(map)
|
||||
if length is not None:
|
||||
length_regex = f"{{{int(length)}}}"
|
||||
else:
|
||||
if maxlength is None:
|
||||
maxlength_value = ""
|
||||
else:
|
||||
maxlength_value = str(int(maxlength))
|
||||
length_regex = f"{{{int(minlength)},{maxlength_value}}}"
|
||||
self.regex = f"[^/]{length_regex}"
|
||||
|
||||
|
||||
class AnyConverter(BaseConverter):
|
||||
"""Matches one of the items provided. Items can either be Python
|
||||
identifiers or strings::
|
||||
|
||||
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
|
||||
|
||||
:param map: the :class:`Map`.
|
||||
:param items: this function accepts the possible items as positional
|
||||
arguments.
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Value is validated when building a URL.
|
||||
"""
|
||||
|
||||
def __init__(self, map: Map, *items: str) -> None:
|
||||
super().__init__(map)
|
||||
self.items = set(items)
|
||||
self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
|
||||
|
||||
def to_url(self, value: t.Any) -> str:
|
||||
if value in self.items:
|
||||
return str(value)
|
||||
|
||||
valid_values = ", ".join(f"'{item}'" for item in sorted(self.items))
|
||||
raise ValueError(f"'{value}' is not one of {valid_values}")
|
||||
|
||||
|
||||
class PathConverter(BaseConverter):
|
||||
"""Like the default :class:`UnicodeConverter`, but it also matches
|
||||
slashes. This is useful for wikis and similar applications::
|
||||
|
||||
Rule('/<path:wikipage>')
|
||||
Rule('/<path:wikipage>/edit')
|
||||
|
||||
:param map: the :class:`Map`.
|
||||
"""
|
||||
|
||||
part_isolating = False
|
||||
regex = "[^/].*?"
|
||||
weight = 200
|
||||
|
||||
|
||||
class NumberConverter(BaseConverter):
|
||||
"""Baseclass for `IntegerConverter` and `FloatConverter`.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
|
||||
weight = 50
|
||||
num_convert: t.Callable[[t.Any], t.Any] = int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
map: Map,
|
||||
fixed_digits: int = 0,
|
||||
min: int | None = None,
|
||||
max: int | None = None,
|
||||
signed: bool = False,
|
||||
) -> None:
|
||||
if signed:
|
||||
self.regex = self.signed_regex
|
||||
super().__init__(map)
|
||||
self.fixed_digits = fixed_digits
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.signed = signed
|
||||
|
||||
def to_python(self, value: str) -> t.Any:
|
||||
if self.fixed_digits and len(value) != self.fixed_digits:
|
||||
raise ValidationError()
|
||||
value_num = self.num_convert(value)
|
||||
if (self.min is not None and value_num < self.min) or (
|
||||
self.max is not None and value_num > self.max
|
||||
):
|
||||
raise ValidationError()
|
||||
return value_num
|
||||
|
||||
def to_url(self, value: t.Any) -> str:
|
||||
value_str = str(self.num_convert(value))
|
||||
if self.fixed_digits:
|
||||
value_str = value_str.zfill(self.fixed_digits)
|
||||
return value_str
|
||||
|
||||
@property
|
||||
def signed_regex(self) -> str:
|
||||
return f"-?{self.regex}"
|
||||
|
||||
|
||||
class IntegerConverter(NumberConverter):
|
||||
"""This converter only accepts integer values::
|
||||
|
||||
Rule("/page/<int:page>")
|
||||
|
||||
By default it only accepts unsigned, positive values. The ``signed``
|
||||
parameter will enable signed, negative values. ::
|
||||
|
||||
Rule("/page/<int(signed=True):page>")
|
||||
|
||||
:param map: The :class:`Map`.
|
||||
:param fixed_digits: The number of fixed digits in the URL. If you
|
||||
set this to ``4`` for example, the rule will only match if the
|
||||
URL looks like ``/0001/``. The default is variable length.
|
||||
:param min: The minimal value.
|
||||
:param max: The maximal value.
|
||||
:param signed: Allow signed (negative) values.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
The ``signed`` parameter.
|
||||
"""
|
||||
|
||||
regex = r"\d+"
|
||||
|
||||
|
||||
class FloatConverter(NumberConverter):
|
||||
"""This converter only accepts floating point values::
|
||||
|
||||
Rule("/probability/<float:probability>")
|
||||
|
||||
By default it only accepts unsigned, positive values. The ``signed``
|
||||
parameter will enable signed, negative values. ::
|
||||
|
||||
Rule("/offset/<float(signed=True):offset>")
|
||||
|
||||
:param map: The :class:`Map`.
|
||||
:param min: The minimal value.
|
||||
:param max: The maximal value.
|
||||
:param signed: Allow signed (negative) values.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
The ``signed`` parameter.
|
||||
"""
|
||||
|
||||
regex = r"\d+\.\d+"
|
||||
num_convert = float
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
map: Map,
|
||||
min: float | None = None,
|
||||
max: float | None = None,
|
||||
signed: bool = False,
|
||||
) -> None:
|
||||
super().__init__(map, min=min, max=max, signed=signed) # type: ignore
|
||||
|
||||
|
||||
class UUIDConverter(BaseConverter):
|
||||
"""This converter only accepts UUID strings::
|
||||
|
||||
Rule('/object/<uuid:identifier>')
|
||||
|
||||
.. versionadded:: 0.10
|
||||
|
||||
:param map: the :class:`Map`.
|
||||
"""
|
||||
|
||||
regex = (
|
||||
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
|
||||
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
|
||||
)
|
||||
|
||||
def to_python(self, value: str) -> uuid.UUID:
|
||||
return uuid.UUID(value)
|
||||
|
||||
def to_url(self, value: uuid.UUID) -> str:
|
||||
return str(value)
|
||||
|
||||
|
||||
#: the default converter mapping for the map.
|
||||
DEFAULT_CONVERTERS: t.Mapping[str, type[BaseConverter]] = {
|
||||
"default": UnicodeConverter,
|
||||
"string": UnicodeConverter,
|
||||
"any": AnyConverter,
|
||||
"path": PathConverter,
|
||||
"int": IntegerConverter,
|
||||
"float": FloatConverter,
|
||||
"uuid": UUIDConverter,
|
||||
}
|
||||
152
lib/python3.11/site-packages/werkzeug/routing/exceptions.py
Normal file
152
lib/python3.11/site-packages/werkzeug/routing/exceptions.py
Normal file
@ -0,0 +1,152 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import typing as t
|
||||
|
||||
from ..exceptions import BadRequest
|
||||
from ..exceptions import HTTPException
|
||||
from ..utils import cached_property
|
||||
from ..utils import redirect
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from ..wrappers.request import Request
|
||||
from ..wrappers.response import Response
|
||||
from .map import MapAdapter
|
||||
from .rules import Rule
|
||||
|
||||
|
||||
class RoutingException(Exception):
|
||||
"""Special exceptions that require the application to redirect, notifying
|
||||
about missing urls, etc.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
|
||||
|
||||
class RequestRedirect(HTTPException, RoutingException):
|
||||
"""Raise if the map requests a redirect. This is for example the case if
|
||||
`strict_slashes` are activated and an url that requires a trailing slash.
|
||||
|
||||
The attribute `new_url` contains the absolute destination url.
|
||||
"""
|
||||
|
||||
code = 308
|
||||
|
||||
def __init__(self, new_url: str) -> None:
|
||||
super().__init__(new_url)
|
||||
self.new_url = new_url
|
||||
|
||||
def get_response(
|
||||
self,
|
||||
environ: WSGIEnvironment | Request | None = None,
|
||||
scope: dict[str, t.Any] | None = None,
|
||||
) -> Response:
|
||||
return redirect(self.new_url, self.code)
|
||||
|
||||
|
||||
class RequestPath(RoutingException):
|
||||
"""Internal exception."""
|
||||
|
||||
__slots__ = ("path_info",)
|
||||
|
||||
def __init__(self, path_info: str) -> None:
|
||||
super().__init__()
|
||||
self.path_info = path_info
|
||||
|
||||
|
||||
class RequestAliasRedirect(RoutingException): # noqa: B903
|
||||
"""This rule is an alias and wants to redirect to the canonical URL."""
|
||||
|
||||
def __init__(self, matched_values: t.Mapping[str, t.Any], endpoint: t.Any) -> None:
|
||||
super().__init__()
|
||||
self.matched_values = matched_values
|
||||
self.endpoint = endpoint
|
||||
|
||||
|
||||
class BuildError(RoutingException, LookupError):
|
||||
"""Raised if the build system cannot find a URL for an endpoint with the
|
||||
values provided.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: t.Any,
|
||||
values: t.Mapping[str, t.Any],
|
||||
method: str | None,
|
||||
adapter: MapAdapter | None = None,
|
||||
) -> None:
|
||||
super().__init__(endpoint, values, method)
|
||||
self.endpoint = endpoint
|
||||
self.values = values
|
||||
self.method = method
|
||||
self.adapter = adapter
|
||||
|
||||
@cached_property
|
||||
def suggested(self) -> Rule | None:
|
||||
return self.closest_rule(self.adapter)
|
||||
|
||||
def closest_rule(self, adapter: MapAdapter | None) -> Rule | None:
|
||||
def _score_rule(rule: Rule) -> float:
|
||||
return sum(
|
||||
[
|
||||
0.98
|
||||
* difflib.SequenceMatcher(
|
||||
# endpoints can be any type, compare as strings
|
||||
None,
|
||||
str(rule.endpoint),
|
||||
str(self.endpoint),
|
||||
).ratio(),
|
||||
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
|
||||
0.01 * bool(rule.methods and self.method in rule.methods),
|
||||
]
|
||||
)
|
||||
|
||||
if adapter and adapter.map._rules:
|
||||
return max(adapter.map._rules, key=_score_rule)
|
||||
|
||||
return None
|
||||
|
||||
def __str__(self) -> str:
|
||||
message = [f"Could not build url for endpoint {self.endpoint!r}"]
|
||||
if self.method:
|
||||
message.append(f" ({self.method!r})")
|
||||
if self.values:
|
||||
message.append(f" with values {sorted(self.values)!r}")
|
||||
message.append(".")
|
||||
if self.suggested:
|
||||
if self.endpoint == self.suggested.endpoint:
|
||||
if (
|
||||
self.method
|
||||
and self.suggested.methods is not None
|
||||
and self.method not in self.suggested.methods
|
||||
):
|
||||
message.append(
|
||||
" Did you mean to use methods"
|
||||
f" {sorted(self.suggested.methods)!r}?"
|
||||
)
|
||||
missing_values = self.suggested.arguments.union(
|
||||
set(self.suggested.defaults or ())
|
||||
) - set(self.values.keys())
|
||||
if missing_values:
|
||||
message.append(
|
||||
f" Did you forget to specify values {sorted(missing_values)!r}?"
|
||||
)
|
||||
else:
|
||||
message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
|
||||
return "".join(message)
|
||||
|
||||
|
||||
class WebsocketMismatch(BadRequest):
|
||||
"""The only matched rule is either a WebSocket and the request is
|
||||
HTTP, or the rule is HTTP and the request is a WebSocket.
|
||||
"""
|
||||
|
||||
|
||||
class NoMatch(Exception):
|
||||
__slots__ = ("have_match_for", "websocket_mismatch")
|
||||
|
||||
def __init__(self, have_match_for: set[str], websocket_mismatch: bool) -> None:
|
||||
self.have_match_for = have_match_for
|
||||
self.websocket_mismatch = websocket_mismatch
|
||||
951
lib/python3.11/site-packages/werkzeug/routing/map.py
Normal file
951
lib/python3.11/site-packages/werkzeug/routing/map.py
Normal file
@ -0,0 +1,951 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
import warnings
|
||||
from pprint import pformat
|
||||
from threading import Lock
|
||||
from urllib.parse import quote
|
||||
from urllib.parse import urljoin
|
||||
from urllib.parse import urlunsplit
|
||||
|
||||
from .._internal import _get_environ
|
||||
from .._internal import _wsgi_decoding_dance
|
||||
from ..datastructures import ImmutableDict
|
||||
from ..datastructures import MultiDict
|
||||
from ..exceptions import BadHost
|
||||
from ..exceptions import HTTPException
|
||||
from ..exceptions import MethodNotAllowed
|
||||
from ..exceptions import NotFound
|
||||
from ..urls import _urlencode
|
||||
from ..wsgi import get_host
|
||||
from .converters import DEFAULT_CONVERTERS
|
||||
from .exceptions import BuildError
|
||||
from .exceptions import NoMatch
|
||||
from .exceptions import RequestAliasRedirect
|
||||
from .exceptions import RequestPath
|
||||
from .exceptions import RequestRedirect
|
||||
from .exceptions import WebsocketMismatch
|
||||
from .matcher import StateMachineMatcher
|
||||
from .rules import _simple_rule_re
|
||||
from .rules import Rule
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from ..wrappers.request import Request
|
||||
from .converters import BaseConverter
|
||||
from .rules import RuleFactory
|
||||
|
||||
|
||||
class Map:
|
||||
"""The map class stores all the URL rules and some configuration
|
||||
parameters. Some of the configuration values are only stored on the
|
||||
`Map` instance since those affect all rules, others are just defaults
|
||||
and can be overridden for each rule. Note that you have to specify all
|
||||
arguments besides the `rules` as keyword arguments!
|
||||
|
||||
:param rules: sequence of url rules for this map.
|
||||
:param default_subdomain: The default subdomain for rules without a
|
||||
subdomain defined.
|
||||
:param strict_slashes: If a rule ends with a slash but the matched
|
||||
URL does not, redirect to the URL with a trailing slash.
|
||||
:param merge_slashes: Merge consecutive slashes when matching or
|
||||
building URLs. Matches will redirect to the normalized URL.
|
||||
Slashes in variable parts are not merged.
|
||||
:param redirect_defaults: This will redirect to the default rule if it
|
||||
wasn't visited that way. This helps creating
|
||||
unique URLs.
|
||||
:param converters: A dict of converters that adds additional converters
|
||||
to the list of converters. If you redefine one
|
||||
converter this will override the original one.
|
||||
:param sort_parameters: If set to `True` the url parameters are sorted.
|
||||
See `url_encode` for more details.
|
||||
:param sort_key: The sort key function for `url_encode`.
|
||||
:param host_matching: if set to `True` it enables the host matching
|
||||
feature and disables the subdomain one. If
|
||||
enabled the `host` parameter to rules is used
|
||||
instead of the `subdomain` one.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset`` and ``encoding_errors`` parameters were removed.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
The ``merge_slashes`` parameter was added.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
The ``encoding_errors`` and ``host_matching`` parameters were added.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
The ``sort_parameters`` and ``sort_key`` paramters were added.
|
||||
"""
|
||||
|
||||
#: A dict of default converters to be used.
|
||||
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
|
||||
|
||||
#: The type of lock to use when updating.
|
||||
#:
|
||||
#: .. versionadded:: 1.0
|
||||
lock_class = Lock
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
rules: t.Iterable[RuleFactory] | None = None,
|
||||
default_subdomain: str = "",
|
||||
strict_slashes: bool = True,
|
||||
merge_slashes: bool = True,
|
||||
redirect_defaults: bool = True,
|
||||
converters: t.Mapping[str, type[BaseConverter]] | None = None,
|
||||
sort_parameters: bool = False,
|
||||
sort_key: t.Callable[[t.Any], t.Any] | None = None,
|
||||
host_matching: bool = False,
|
||||
) -> None:
|
||||
self._matcher = StateMachineMatcher(merge_slashes)
|
||||
self._rules_by_endpoint: dict[t.Any, list[Rule]] = {}
|
||||
self._remap = True
|
||||
self._remap_lock = self.lock_class()
|
||||
|
||||
self.default_subdomain = default_subdomain
|
||||
self.strict_slashes = strict_slashes
|
||||
self.redirect_defaults = redirect_defaults
|
||||
self.host_matching = host_matching
|
||||
|
||||
self.converters = self.default_converters.copy()
|
||||
if converters:
|
||||
self.converters.update(converters)
|
||||
|
||||
self.sort_parameters = sort_parameters
|
||||
self.sort_key = sort_key
|
||||
|
||||
for rulefactory in rules or ():
|
||||
self.add(rulefactory)
|
||||
|
||||
@property
|
||||
def merge_slashes(self) -> bool:
|
||||
return self._matcher.merge_slashes
|
||||
|
||||
@merge_slashes.setter
|
||||
def merge_slashes(self, value: bool) -> None:
|
||||
self._matcher.merge_slashes = value
|
||||
|
||||
def is_endpoint_expecting(self, endpoint: t.Any, *arguments: str) -> bool:
|
||||
"""Iterate over all rules and check if the endpoint expects
|
||||
the arguments provided. This is for example useful if you have
|
||||
some URLs that expect a language code and others that do not and
|
||||
you want to wrap the builder a bit so that the current language
|
||||
code is automatically added if not provided but endpoints expect
|
||||
it.
|
||||
|
||||
:param endpoint: the endpoint to check.
|
||||
:param arguments: this function accepts one or more arguments
|
||||
as positional arguments. Each one of them is
|
||||
checked.
|
||||
"""
|
||||
self.update()
|
||||
arguments_set = set(arguments)
|
||||
for rule in self._rules_by_endpoint[endpoint]:
|
||||
if arguments_set.issubset(rule.arguments):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def _rules(self) -> list[Rule]:
|
||||
return [rule for rules in self._rules_by_endpoint.values() for rule in rules]
|
||||
|
||||
def iter_rules(self, endpoint: t.Any | None = None) -> t.Iterator[Rule]:
|
||||
"""Iterate over all rules or the rules of an endpoint.
|
||||
|
||||
:param endpoint: if provided only the rules for that endpoint
|
||||
are returned.
|
||||
:return: an iterator
|
||||
"""
|
||||
self.update()
|
||||
if endpoint is not None:
|
||||
return iter(self._rules_by_endpoint[endpoint])
|
||||
return iter(self._rules)
|
||||
|
||||
def add(self, rulefactory: RuleFactory) -> None:
|
||||
"""Add a new rule or factory to the map and bind it. Requires that the
|
||||
rule is not bound to another map.
|
||||
|
||||
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
|
||||
"""
|
||||
for rule in rulefactory.get_rules(self):
|
||||
rule.bind(self)
|
||||
if not rule.build_only:
|
||||
self._matcher.add(rule)
|
||||
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
|
||||
self._remap = True
|
||||
|
||||
def bind(
|
||||
self,
|
||||
server_name: str,
|
||||
script_name: str | None = None,
|
||||
subdomain: str | None = None,
|
||||
url_scheme: str = "http",
|
||||
default_method: str = "GET",
|
||||
path_info: str | None = None,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
) -> MapAdapter:
|
||||
"""Return a new :class:`MapAdapter` with the details specified to the
|
||||
call. Note that `script_name` will default to ``'/'`` if not further
|
||||
specified or `None`. The `server_name` at least is a requirement
|
||||
because the HTTP RFC requires absolute URLs for redirects and so all
|
||||
redirect exceptions raised by Werkzeug will contain the full canonical
|
||||
URL.
|
||||
|
||||
If no path_info is passed to :meth:`match` it will use the default path
|
||||
info passed to bind. While this doesn't really make sense for
|
||||
manual bind calls, it's useful if you bind a map to a WSGI
|
||||
environment which already contains the path info.
|
||||
|
||||
`subdomain` will default to the `default_subdomain` for this map if
|
||||
no defined. If there is no `default_subdomain` you cannot use the
|
||||
subdomain feature.
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
|
||||
will match.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
``path_info`` defaults to ``'/'`` if ``None``.
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
``query_args`` can be a string.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
Added ``query_args``.
|
||||
"""
|
||||
server_name = server_name.lower()
|
||||
if self.host_matching:
|
||||
if subdomain is not None:
|
||||
raise RuntimeError("host matching enabled and a subdomain was provided")
|
||||
elif subdomain is None:
|
||||
subdomain = self.default_subdomain
|
||||
if script_name is None:
|
||||
script_name = "/"
|
||||
if path_info is None:
|
||||
path_info = "/"
|
||||
|
||||
# Port isn't part of IDNA, and might push a name over the 63 octet limit.
|
||||
server_name, port_sep, port = server_name.partition(":")
|
||||
|
||||
try:
|
||||
server_name = server_name.encode("idna").decode("ascii")
|
||||
except UnicodeError as e:
|
||||
raise BadHost() from e
|
||||
|
||||
return MapAdapter(
|
||||
self,
|
||||
f"{server_name}{port_sep}{port}",
|
||||
script_name,
|
||||
subdomain,
|
||||
url_scheme,
|
||||
path_info,
|
||||
default_method,
|
||||
query_args,
|
||||
)
|
||||
|
||||
def bind_to_environ(
|
||||
self,
|
||||
environ: WSGIEnvironment | Request,
|
||||
server_name: str | None = None,
|
||||
subdomain: str | None = None,
|
||||
) -> MapAdapter:
|
||||
"""Like :meth:`bind` but you can pass it an WSGI environment and it
|
||||
will fetch the information from that dictionary. Note that because of
|
||||
limitations in the protocol there is no way to get the current
|
||||
subdomain and real `server_name` from the environment. If you don't
|
||||
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
|
||||
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
|
||||
feature.
|
||||
|
||||
If `subdomain` is `None` but an environment and a server name is
|
||||
provided it will calculate the current subdomain automatically.
|
||||
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
|
||||
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
|
||||
subdomain will be ``'staging.dev'``.
|
||||
|
||||
If the object passed as environ has an environ attribute, the value of
|
||||
this attribute is used instead. This allows you to pass request
|
||||
objects. Additionally `PATH_INFO` added as a default of the
|
||||
:class:`MapAdapter` so that you don't have to pass the path info to
|
||||
the match method.
|
||||
|
||||
.. versionchanged:: 1.0.0
|
||||
If the passed server name specifies port 443, it will match
|
||||
if the incoming scheme is ``https`` without a port.
|
||||
|
||||
.. versionchanged:: 1.0.0
|
||||
A warning is shown when the passed server name does not
|
||||
match the incoming WSGI server name.
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
This will no longer raise a ValueError when an unexpected server
|
||||
name was passed.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
previously this method accepted a bogus `calculate_subdomain`
|
||||
parameter that did not have any effect. It was removed because
|
||||
of that.
|
||||
|
||||
:param environ: a WSGI environment.
|
||||
:param server_name: an optional server name hint (see above).
|
||||
:param subdomain: optionally the current subdomain (see above).
|
||||
"""
|
||||
env = _get_environ(environ)
|
||||
wsgi_server_name = get_host(env).lower()
|
||||
scheme = env["wsgi.url_scheme"]
|
||||
upgrade = any(
|
||||
v.strip() == "upgrade"
|
||||
for v in env.get("HTTP_CONNECTION", "").lower().split(",")
|
||||
)
|
||||
|
||||
if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
|
||||
scheme = "wss" if scheme == "https" else "ws"
|
||||
|
||||
if server_name is None:
|
||||
server_name = wsgi_server_name
|
||||
else:
|
||||
server_name = server_name.lower()
|
||||
|
||||
# strip standard port to match get_host()
|
||||
if scheme in {"http", "ws"} and server_name.endswith(":80"):
|
||||
server_name = server_name[:-3]
|
||||
elif scheme in {"https", "wss"} and server_name.endswith(":443"):
|
||||
server_name = server_name[:-4]
|
||||
|
||||
if subdomain is None and not self.host_matching:
|
||||
cur_server_name = wsgi_server_name.split(".")
|
||||
real_server_name = server_name.split(".")
|
||||
offset = -len(real_server_name)
|
||||
|
||||
if cur_server_name[offset:] != real_server_name:
|
||||
# This can happen even with valid configs if the server was
|
||||
# accessed directly by IP address under some situations.
|
||||
# Instead of raising an exception like in Werkzeug 0.7 or
|
||||
# earlier we go by an invalid subdomain which will result
|
||||
# in a 404 error on matching.
|
||||
warnings.warn(
|
||||
f"Current server name {wsgi_server_name!r} doesn't match configured"
|
||||
f" server name {server_name!r}",
|
||||
stacklevel=2,
|
||||
)
|
||||
subdomain = "<invalid>"
|
||||
else:
|
||||
subdomain = ".".join(filter(None, cur_server_name[:offset]))
|
||||
|
||||
def _get_wsgi_string(name: str) -> str | None:
|
||||
val = env.get(name)
|
||||
if val is not None:
|
||||
return _wsgi_decoding_dance(val)
|
||||
return None
|
||||
|
||||
script_name = _get_wsgi_string("SCRIPT_NAME")
|
||||
path_info = _get_wsgi_string("PATH_INFO")
|
||||
query_args = _get_wsgi_string("QUERY_STRING")
|
||||
return Map.bind(
|
||||
self,
|
||||
server_name,
|
||||
script_name,
|
||||
subdomain,
|
||||
scheme,
|
||||
env["REQUEST_METHOD"],
|
||||
path_info,
|
||||
query_args=query_args,
|
||||
)
|
||||
|
||||
def update(self) -> None:
|
||||
"""Called before matching and building to keep the compiled rules
|
||||
in the correct order after things changed.
|
||||
"""
|
||||
if not self._remap:
|
||||
return
|
||||
|
||||
with self._remap_lock:
|
||||
if not self._remap:
|
||||
return
|
||||
|
||||
self._matcher.update()
|
||||
for rules in self._rules_by_endpoint.values():
|
||||
rules.sort(key=lambda x: x.build_compare_key())
|
||||
self._remap = False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
rules = self.iter_rules()
|
||||
return f"{type(self).__name__}({pformat(list(rules))})"
|
||||
|
||||
|
||||
class MapAdapter:
|
||||
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
|
||||
the URL matching and building based on runtime information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
map: Map,
|
||||
server_name: str,
|
||||
script_name: str,
|
||||
subdomain: str | None,
|
||||
url_scheme: str,
|
||||
path_info: str,
|
||||
default_method: str,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
):
|
||||
self.map = map
|
||||
self.server_name = server_name
|
||||
|
||||
if not script_name.endswith("/"):
|
||||
script_name += "/"
|
||||
|
||||
self.script_name = script_name
|
||||
self.subdomain = subdomain
|
||||
self.url_scheme = url_scheme
|
||||
self.path_info = path_info
|
||||
self.default_method = default_method
|
||||
self.query_args = query_args
|
||||
self.websocket = self.url_scheme in {"ws", "wss"}
|
||||
|
||||
def dispatch(
|
||||
self,
|
||||
view_func: t.Callable[[str, t.Mapping[str, t.Any]], WSGIApplication],
|
||||
path_info: str | None = None,
|
||||
method: str | None = None,
|
||||
catch_http_exceptions: bool = False,
|
||||
) -> WSGIApplication:
|
||||
"""Does the complete dispatching process. `view_func` is called with
|
||||
the endpoint and a dict with the values for the view. It should
|
||||
look up the view function, call it, and return a response object
|
||||
or WSGI application. http exceptions are not caught by default
|
||||
so that applications can display nicer error messages by just
|
||||
catching them by hand. If you want to stick with the default
|
||||
error messages you can pass it ``catch_http_exceptions=True`` and
|
||||
it will catch the http exceptions.
|
||||
|
||||
Here a small example for the dispatch usage::
|
||||
|
||||
from werkzeug.wrappers import Request, Response
|
||||
from werkzeug.wsgi import responder
|
||||
from werkzeug.routing import Map, Rule
|
||||
|
||||
def on_index(request):
|
||||
return Response('Hello from the index')
|
||||
|
||||
url_map = Map([Rule('/', endpoint='index')])
|
||||
views = {'index': on_index}
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
request = Request(environ)
|
||||
urls = url_map.bind_to_environ(environ)
|
||||
return urls.dispatch(lambda e, v: views[e](request, **v),
|
||||
catch_http_exceptions=True)
|
||||
|
||||
Keep in mind that this method might return exception objects, too, so
|
||||
use :class:`Response.force_type` to get a response object.
|
||||
|
||||
:param view_func: a function that is called with the endpoint as
|
||||
first argument and the value dict as second. Has
|
||||
to dispatch to the actual view function with this
|
||||
information. (see above)
|
||||
:param path_info: the path info to use for matching. Overrides the
|
||||
path info specified on binding.
|
||||
:param method: the HTTP method used for matching. Overrides the
|
||||
method specified on binding.
|
||||
:param catch_http_exceptions: set to `True` to catch any of the
|
||||
werkzeug :class:`HTTPException`\\s.
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
endpoint, args = self.match(path_info, method)
|
||||
except RequestRedirect as e:
|
||||
return e
|
||||
return view_func(endpoint, args)
|
||||
except HTTPException as e:
|
||||
if catch_http_exceptions:
|
||||
return e
|
||||
raise
|
||||
|
||||
@t.overload
|
||||
def match(
|
||||
self,
|
||||
path_info: str | None = None,
|
||||
method: str | None = None,
|
||||
return_rule: t.Literal[False] = False,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
websocket: bool | None = None,
|
||||
) -> tuple[t.Any, t.Mapping[str, t.Any]]: ...
|
||||
|
||||
@t.overload
|
||||
def match(
|
||||
self,
|
||||
path_info: str | None = None,
|
||||
method: str | None = None,
|
||||
return_rule: t.Literal[True] = True,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
websocket: bool | None = None,
|
||||
) -> tuple[Rule, t.Mapping[str, t.Any]]: ...
|
||||
|
||||
def match(
|
||||
self,
|
||||
path_info: str | None = None,
|
||||
method: str | None = None,
|
||||
return_rule: bool = False,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
websocket: bool | None = None,
|
||||
) -> tuple[t.Any | Rule, t.Mapping[str, t.Any]]:
|
||||
"""The usage is simple: you just pass the match method the current
|
||||
path info as well as the method (which defaults to `GET`). The
|
||||
following things can then happen:
|
||||
|
||||
- you receive a `NotFound` exception that indicates that no URL is
|
||||
matching. A `NotFound` exception is also a WSGI application you
|
||||
can call to get a default page not found page (happens to be the
|
||||
same object as `werkzeug.exceptions.NotFound`)
|
||||
|
||||
- you receive a `MethodNotAllowed` exception that indicates that there
|
||||
is a match for this URL but not for the current request method.
|
||||
This is useful for RESTful applications.
|
||||
|
||||
- you receive a `RequestRedirect` exception with a `new_url`
|
||||
attribute. This exception is used to notify you about a request
|
||||
Werkzeug requests from your WSGI application. This is for example the
|
||||
case if you request ``/foo`` although the correct URL is ``/foo/``
|
||||
You can use the `RequestRedirect` instance as response-like object
|
||||
similar to all other subclasses of `HTTPException`.
|
||||
|
||||
- you receive a ``WebsocketMismatch`` exception if the only
|
||||
match is a WebSocket rule but the bind is an HTTP request, or
|
||||
if the match is an HTTP rule but the bind is a WebSocket
|
||||
request.
|
||||
|
||||
- you get a tuple in the form ``(endpoint, arguments)`` if there is
|
||||
a match (unless `return_rule` is True, in which case you get a tuple
|
||||
in the form ``(rule, arguments)``)
|
||||
|
||||
If the path info is not passed to the match method the default path
|
||||
info of the map is used (defaults to the root URL if not defined
|
||||
explicitly).
|
||||
|
||||
All of the exceptions raised are subclasses of `HTTPException` so they
|
||||
can be used as WSGI responses. They will all render generic error or
|
||||
redirect pages.
|
||||
|
||||
Here is a small example for matching:
|
||||
|
||||
>>> m = Map([
|
||||
... Rule('/', endpoint='index'),
|
||||
... Rule('/downloads/', endpoint='downloads/index'),
|
||||
... Rule('/downloads/<int:id>', endpoint='downloads/show')
|
||||
... ])
|
||||
>>> urls = m.bind("example.com", "/")
|
||||
>>> urls.match("/", "GET")
|
||||
('index', {})
|
||||
>>> urls.match("/downloads/42")
|
||||
('downloads/show', {'id': 42})
|
||||
|
||||
And here is what happens on redirect and missing URLs:
|
||||
|
||||
>>> urls.match("/downloads")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
RequestRedirect: http://example.com/downloads/
|
||||
>>> urls.match("/missing")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
NotFound: 404 Not Found
|
||||
|
||||
:param path_info: the path info to use for matching. Overrides the
|
||||
path info specified on binding.
|
||||
:param method: the HTTP method used for matching. Overrides the
|
||||
method specified on binding.
|
||||
:param return_rule: return the rule that matched instead of just the
|
||||
endpoint (defaults to `False`).
|
||||
:param query_args: optional query arguments that are used for
|
||||
automatic redirects as string or dictionary. It's
|
||||
currently not possible to use the query arguments
|
||||
for URL matching.
|
||||
:param websocket: Match WebSocket instead of HTTP requests. A
|
||||
websocket request has a ``ws`` or ``wss``
|
||||
:attr:`url_scheme`. This overrides that detection.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
Added ``websocket``.
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
``query_args`` can be a string.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
Added ``query_args``.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
Added ``return_rule``.
|
||||
"""
|
||||
self.map.update()
|
||||
if path_info is None:
|
||||
path_info = self.path_info
|
||||
if query_args is None:
|
||||
query_args = self.query_args or {}
|
||||
method = (method or self.default_method).upper()
|
||||
|
||||
if websocket is None:
|
||||
websocket = self.websocket
|
||||
|
||||
domain_part = self.server_name
|
||||
|
||||
if not self.map.host_matching and self.subdomain is not None:
|
||||
domain_part = self.subdomain
|
||||
|
||||
path_part = f"/{path_info.lstrip('/')}" if path_info else ""
|
||||
|
||||
try:
|
||||
result = self.map._matcher.match(domain_part, path_part, method, websocket)
|
||||
except RequestPath as e:
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
new_path = quote(e.path_info, safe="!$&'()*+,/:;=@")
|
||||
raise RequestRedirect(
|
||||
self.make_redirect_url(new_path, query_args)
|
||||
) from None
|
||||
except RequestAliasRedirect as e:
|
||||
raise RequestRedirect(
|
||||
self.make_alias_redirect_url(
|
||||
f"{domain_part}|{path_part}",
|
||||
e.endpoint,
|
||||
e.matched_values,
|
||||
method,
|
||||
query_args,
|
||||
)
|
||||
) from None
|
||||
except NoMatch as e:
|
||||
if e.have_match_for:
|
||||
raise MethodNotAllowed(valid_methods=list(e.have_match_for)) from None
|
||||
|
||||
if e.websocket_mismatch:
|
||||
raise WebsocketMismatch() from None
|
||||
|
||||
raise NotFound() from None
|
||||
else:
|
||||
rule, rv = result
|
||||
|
||||
if self.map.redirect_defaults:
|
||||
redirect_url = self.get_default_redirect(rule, method, rv, query_args)
|
||||
if redirect_url is not None:
|
||||
raise RequestRedirect(redirect_url)
|
||||
|
||||
if rule.redirect_to is not None:
|
||||
if isinstance(rule.redirect_to, str):
|
||||
|
||||
def _handle_match(match: t.Match[str]) -> str:
|
||||
value = rv[match.group(1)]
|
||||
return rule._converters[match.group(1)].to_url(value)
|
||||
|
||||
redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
|
||||
else:
|
||||
redirect_url = rule.redirect_to(self, **rv)
|
||||
|
||||
if self.subdomain:
|
||||
netloc = f"{self.subdomain}.{self.server_name}"
|
||||
else:
|
||||
netloc = self.server_name
|
||||
|
||||
raise RequestRedirect(
|
||||
urljoin(
|
||||
f"{self.url_scheme or 'http'}://{netloc}{self.script_name}",
|
||||
redirect_url,
|
||||
)
|
||||
)
|
||||
|
||||
if return_rule:
|
||||
return rule, rv
|
||||
else:
|
||||
return rule.endpoint, rv
|
||||
|
||||
def test(self, path_info: str | None = None, method: str | None = None) -> bool:
|
||||
"""Test if a rule would match. Works like `match` but returns `True`
|
||||
if the URL matches, or `False` if it does not exist.
|
||||
|
||||
:param path_info: the path info to use for matching. Overrides the
|
||||
path info specified on binding.
|
||||
:param method: the HTTP method used for matching. Overrides the
|
||||
method specified on binding.
|
||||
"""
|
||||
try:
|
||||
self.match(path_info, method)
|
||||
except RequestRedirect:
|
||||
pass
|
||||
except HTTPException:
|
||||
return False
|
||||
return True
|
||||
|
||||
def allowed_methods(self, path_info: str | None = None) -> t.Iterable[str]:
|
||||
"""Returns the valid methods that match for a given path.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
try:
|
||||
self.match(path_info, method="--")
|
||||
except MethodNotAllowed as e:
|
||||
return e.valid_methods # type: ignore
|
||||
except HTTPException:
|
||||
pass
|
||||
return []
|
||||
|
||||
def get_host(self, domain_part: str | None) -> str:
|
||||
"""Figures out the full host name for the given domain part. The
|
||||
domain part is a subdomain in case host matching is disabled or
|
||||
a full host name.
|
||||
"""
|
||||
if self.map.host_matching:
|
||||
if domain_part is None:
|
||||
return self.server_name
|
||||
|
||||
return domain_part
|
||||
|
||||
if domain_part is None:
|
||||
subdomain = self.subdomain
|
||||
else:
|
||||
subdomain = domain_part
|
||||
|
||||
if subdomain:
|
||||
return f"{subdomain}.{self.server_name}"
|
||||
else:
|
||||
return self.server_name
|
||||
|
||||
def get_default_redirect(
|
||||
self,
|
||||
rule: Rule,
|
||||
method: str,
|
||||
values: t.MutableMapping[str, t.Any],
|
||||
query_args: t.Mapping[str, t.Any] | str,
|
||||
) -> str | None:
|
||||
"""A helper that returns the URL to redirect to if it finds one.
|
||||
This is used for default redirecting only.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
assert self.map.redirect_defaults
|
||||
for r in self.map._rules_by_endpoint[rule.endpoint]:
|
||||
# every rule that comes after this one, including ourself
|
||||
# has a lower priority for the defaults. We order the ones
|
||||
# with the highest priority up for building.
|
||||
if r is rule:
|
||||
break
|
||||
if r.provides_defaults_for(rule) and r.suitable_for(values, method):
|
||||
values.update(r.defaults) # type: ignore
|
||||
domain_part, path = r.build(values) # type: ignore
|
||||
return self.make_redirect_url(path, query_args, domain_part=domain_part)
|
||||
return None
|
||||
|
||||
def encode_query_args(self, query_args: t.Mapping[str, t.Any] | str) -> str:
|
||||
if not isinstance(query_args, str):
|
||||
return _urlencode(query_args)
|
||||
return query_args
|
||||
|
||||
def make_redirect_url(
|
||||
self,
|
||||
path_info: str,
|
||||
query_args: t.Mapping[str, t.Any] | str | None = None,
|
||||
domain_part: str | None = None,
|
||||
) -> str:
|
||||
"""Creates a redirect URL.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
if query_args is None:
|
||||
query_args = self.query_args
|
||||
|
||||
if query_args:
|
||||
query_str = self.encode_query_args(query_args)
|
||||
else:
|
||||
query_str = None
|
||||
|
||||
scheme = self.url_scheme or "http"
|
||||
host = self.get_host(domain_part)
|
||||
path = "/".join((self.script_name.strip("/"), path_info.lstrip("/")))
|
||||
return urlunsplit((scheme, host, path, query_str, None))
|
||||
|
||||
def make_alias_redirect_url(
|
||||
self,
|
||||
path: str,
|
||||
endpoint: t.Any,
|
||||
values: t.Mapping[str, t.Any],
|
||||
method: str,
|
||||
query_args: t.Mapping[str, t.Any] | str,
|
||||
) -> str:
|
||||
"""Internally called to make an alias redirect URL."""
|
||||
url = self.build(
|
||||
endpoint, values, method, append_unknown=False, force_external=True
|
||||
)
|
||||
if query_args:
|
||||
url += f"?{self.encode_query_args(query_args)}"
|
||||
assert url != path, "detected invalid alias setting. No canonical URL found"
|
||||
return url
|
||||
|
||||
def _partial_build(
|
||||
self,
|
||||
endpoint: t.Any,
|
||||
values: t.Mapping[str, t.Any],
|
||||
method: str | None,
|
||||
append_unknown: bool,
|
||||
) -> tuple[str, str, bool] | None:
|
||||
"""Helper for :meth:`build`. Returns subdomain and path for the
|
||||
rule that accepts this endpoint, values and method.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
# in case the method is none, try with the default method first
|
||||
if method is None:
|
||||
rv = self._partial_build(
|
||||
endpoint, values, self.default_method, append_unknown
|
||||
)
|
||||
if rv is not None:
|
||||
return rv
|
||||
|
||||
# Default method did not match or a specific method is passed.
|
||||
# Check all for first match with matching host. If no matching
|
||||
# host is found, go with first result.
|
||||
first_match = None
|
||||
|
||||
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
|
||||
if rule.suitable_for(values, method):
|
||||
build_rv = rule.build(values, append_unknown)
|
||||
|
||||
if build_rv is not None:
|
||||
rv = (build_rv[0], build_rv[1], rule.websocket)
|
||||
if self.map.host_matching:
|
||||
if rv[0] == self.server_name:
|
||||
return rv
|
||||
elif first_match is None:
|
||||
first_match = rv
|
||||
else:
|
||||
return rv
|
||||
|
||||
return first_match
|
||||
|
||||
def build(
|
||||
self,
|
||||
endpoint: t.Any,
|
||||
values: t.Mapping[str, t.Any] | None = None,
|
||||
method: str | None = None,
|
||||
force_external: bool = False,
|
||||
append_unknown: bool = True,
|
||||
url_scheme: str | None = None,
|
||||
) -> str:
|
||||
"""Building URLs works pretty much the other way round. Instead of
|
||||
`match` you call `build` and pass it the endpoint and a dict of
|
||||
arguments for the placeholders.
|
||||
|
||||
The `build` function also accepts an argument called `force_external`
|
||||
which, if you set it to `True` will force external URLs. Per default
|
||||
external URLs (include the server name) will only be used if the
|
||||
target URL is on a different subdomain.
|
||||
|
||||
>>> m = Map([
|
||||
... Rule('/', endpoint='index'),
|
||||
... Rule('/downloads/', endpoint='downloads/index'),
|
||||
... Rule('/downloads/<int:id>', endpoint='downloads/show')
|
||||
... ])
|
||||
>>> urls = m.bind("example.com", "/")
|
||||
>>> urls.build("index", {})
|
||||
'/'
|
||||
>>> urls.build("downloads/show", {'id': 42})
|
||||
'/downloads/42'
|
||||
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
|
||||
'http://example.com/downloads/42'
|
||||
|
||||
Because URLs cannot contain non ASCII data you will always get
|
||||
bytes back. Non ASCII characters are urlencoded with the
|
||||
charset defined on the map instance.
|
||||
|
||||
Additional values are converted to strings and appended to the URL as
|
||||
URL querystring parameters:
|
||||
|
||||
>>> urls.build("index", {'q': 'My Searchstring'})
|
||||
'/?q=My+Searchstring'
|
||||
|
||||
When processing those additional values, lists are furthermore
|
||||
interpreted as multiple values (as per
|
||||
:py:class:`werkzeug.datastructures.MultiDict`):
|
||||
|
||||
>>> urls.build("index", {'q': ['a', 'b', 'c']})
|
||||
'/?q=a&q=b&q=c'
|
||||
|
||||
Passing a ``MultiDict`` will also add multiple values:
|
||||
|
||||
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
|
||||
'/?p=z&q=a&q=b'
|
||||
|
||||
If a rule does not exist when building a `BuildError` exception is
|
||||
raised.
|
||||
|
||||
The build method accepts an argument called `method` which allows you
|
||||
to specify the method you want to have an URL built for if you have
|
||||
different methods for the same endpoint specified.
|
||||
|
||||
:param endpoint: the endpoint of the URL to build.
|
||||
:param values: the values for the URL to build. Unhandled values are
|
||||
appended to the URL as query parameters.
|
||||
:param method: the HTTP method for the rule if there are different
|
||||
URLs for different methods on the same endpoint.
|
||||
:param force_external: enforce full canonical external URLs. If the URL
|
||||
scheme is not provided, this will generate
|
||||
a protocol-relative URL.
|
||||
:param append_unknown: unknown parameters are appended to the generated
|
||||
URL as query string argument. Disable this
|
||||
if you want the builder to ignore those.
|
||||
:param url_scheme: Scheme to use in place of the bound
|
||||
:attr:`url_scheme`.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Added the ``url_scheme`` parameter.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
Added the ``append_unknown`` parameter.
|
||||
"""
|
||||
self.map.update()
|
||||
|
||||
if values:
|
||||
if isinstance(values, MultiDict):
|
||||
values = {
|
||||
k: (v[0] if len(v) == 1 else v)
|
||||
for k, v in dict.items(values)
|
||||
if len(v) != 0
|
||||
}
|
||||
else: # plain dict
|
||||
values = {k: v for k, v in values.items() if v is not None}
|
||||
else:
|
||||
values = {}
|
||||
|
||||
rv = self._partial_build(endpoint, values, method, append_unknown)
|
||||
if rv is None:
|
||||
raise BuildError(endpoint, values, method, self)
|
||||
|
||||
domain_part, path, websocket = rv
|
||||
host = self.get_host(domain_part)
|
||||
|
||||
if url_scheme is None:
|
||||
url_scheme = self.url_scheme
|
||||
|
||||
# Always build WebSocket routes with the scheme (browsers
|
||||
# require full URLs). If bound to a WebSocket, ensure that HTTP
|
||||
# routes are built with an HTTP scheme.
|
||||
secure = url_scheme in {"https", "wss"}
|
||||
|
||||
if websocket:
|
||||
force_external = True
|
||||
url_scheme = "wss" if secure else "ws"
|
||||
elif url_scheme:
|
||||
url_scheme = "https" if secure else "http"
|
||||
|
||||
# shortcut this.
|
||||
if not force_external and (
|
||||
(self.map.host_matching and host == self.server_name)
|
||||
or (not self.map.host_matching and domain_part == self.subdomain)
|
||||
):
|
||||
return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}"
|
||||
|
||||
scheme = f"{url_scheme}:" if url_scheme else ""
|
||||
return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"
|
||||
202
lib/python3.11/site-packages/werkzeug/routing/matcher.py
Normal file
202
lib/python3.11/site-packages/werkzeug/routing/matcher.py
Normal file
@ -0,0 +1,202 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import field
|
||||
|
||||
from .converters import ValidationError
|
||||
from .exceptions import NoMatch
|
||||
from .exceptions import RequestAliasRedirect
|
||||
from .exceptions import RequestPath
|
||||
from .rules import Rule
|
||||
from .rules import RulePart
|
||||
|
||||
|
||||
class SlashRequired(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class State:
|
||||
"""A representation of a rule state.
|
||||
|
||||
This includes the *rules* that correspond to the state and the
|
||||
possible *static* and *dynamic* transitions to the next state.
|
||||
"""
|
||||
|
||||
dynamic: list[tuple[RulePart, State]] = field(default_factory=list)
|
||||
rules: list[Rule] = field(default_factory=list)
|
||||
static: dict[str, State] = field(default_factory=dict)
|
||||
|
||||
|
||||
class StateMachineMatcher:
|
||||
def __init__(self, merge_slashes: bool) -> None:
|
||||
self._root = State()
|
||||
self.merge_slashes = merge_slashes
|
||||
|
||||
def add(self, rule: Rule) -> None:
|
||||
state = self._root
|
||||
for part in rule._parts:
|
||||
if part.static:
|
||||
state.static.setdefault(part.content, State())
|
||||
state = state.static[part.content]
|
||||
else:
|
||||
for test_part, new_state in state.dynamic:
|
||||
if test_part == part:
|
||||
state = new_state
|
||||
break
|
||||
else:
|
||||
new_state = State()
|
||||
state.dynamic.append((part, new_state))
|
||||
state = new_state
|
||||
state.rules.append(rule)
|
||||
|
||||
def update(self) -> None:
|
||||
# For every state the dynamic transitions should be sorted by
|
||||
# the weight of the transition
|
||||
state = self._root
|
||||
|
||||
def _update_state(state: State) -> None:
|
||||
state.dynamic.sort(key=lambda entry: entry[0].weight)
|
||||
for new_state in state.static.values():
|
||||
_update_state(new_state)
|
||||
for _, new_state in state.dynamic:
|
||||
_update_state(new_state)
|
||||
|
||||
_update_state(state)
|
||||
|
||||
def match(
|
||||
self, domain: str, path: str, method: str, websocket: bool
|
||||
) -> tuple[Rule, t.MutableMapping[str, t.Any]]:
|
||||
# To match to a rule we need to start at the root state and
|
||||
# try to follow the transitions until we find a match, or find
|
||||
# there is no transition to follow.
|
||||
|
||||
have_match_for = set()
|
||||
websocket_mismatch = False
|
||||
|
||||
def _match(
|
||||
state: State, parts: list[str], values: list[str]
|
||||
) -> tuple[Rule, list[str]] | None:
|
||||
# This function is meant to be called recursively, and will attempt
|
||||
# to match the head part to the state's transitions.
|
||||
nonlocal have_match_for, websocket_mismatch
|
||||
|
||||
# The base case is when all parts have been matched via
|
||||
# transitions. Hence if there is a rule with methods &
|
||||
# websocket that work return it and the dynamic values
|
||||
# extracted.
|
||||
if parts == []:
|
||||
for rule in state.rules:
|
||||
if rule.methods is not None and method not in rule.methods:
|
||||
have_match_for.update(rule.methods)
|
||||
elif rule.websocket != websocket:
|
||||
websocket_mismatch = True
|
||||
else:
|
||||
return rule, values
|
||||
|
||||
# Test if there is a match with this path with a
|
||||
# trailing slash, if so raise an exception to report
|
||||
# that matching is possible with an additional slash
|
||||
if "" in state.static:
|
||||
for rule in state.static[""].rules:
|
||||
if websocket == rule.websocket and (
|
||||
rule.methods is None or method in rule.methods
|
||||
):
|
||||
if rule.strict_slashes:
|
||||
raise SlashRequired()
|
||||
else:
|
||||
return rule, values
|
||||
return None
|
||||
|
||||
part = parts[0]
|
||||
# To match this part try the static transitions first
|
||||
if part in state.static:
|
||||
rv = _match(state.static[part], parts[1:], values)
|
||||
if rv is not None:
|
||||
return rv
|
||||
# No match via the static transitions, so try the dynamic
|
||||
# ones.
|
||||
for test_part, new_state in state.dynamic:
|
||||
target = part
|
||||
remaining = parts[1:]
|
||||
# A final part indicates a transition that always
|
||||
# consumes the remaining parts i.e. transitions to a
|
||||
# final state.
|
||||
if test_part.final:
|
||||
target = "/".join(parts)
|
||||
remaining = []
|
||||
match = re.compile(test_part.content).match(target)
|
||||
if match is not None:
|
||||
if test_part.suffixed:
|
||||
# If a part_isolating=False part has a slash suffix, remove the
|
||||
# suffix from the match and check for the slash redirect next.
|
||||
suffix = match.groups()[-1]
|
||||
if suffix == "/":
|
||||
remaining = [""]
|
||||
|
||||
converter_groups = sorted(
|
||||
match.groupdict().items(), key=lambda entry: entry[0]
|
||||
)
|
||||
groups = [
|
||||
value
|
||||
for key, value in converter_groups
|
||||
if key[:11] == "__werkzeug_"
|
||||
]
|
||||
rv = _match(new_state, remaining, values + groups)
|
||||
if rv is not None:
|
||||
return rv
|
||||
|
||||
# If there is no match and the only part left is a
|
||||
# trailing slash ("") consider rules that aren't
|
||||
# strict-slashes as these should match if there is a final
|
||||
# slash part.
|
||||
if parts == [""]:
|
||||
for rule in state.rules:
|
||||
if rule.strict_slashes:
|
||||
continue
|
||||
if rule.methods is not None and method not in rule.methods:
|
||||
have_match_for.update(rule.methods)
|
||||
elif rule.websocket != websocket:
|
||||
websocket_mismatch = True
|
||||
else:
|
||||
return rule, values
|
||||
|
||||
return None
|
||||
|
||||
try:
|
||||
rv = _match(self._root, [domain, *path.split("/")], [])
|
||||
except SlashRequired:
|
||||
raise RequestPath(f"{path}/") from None
|
||||
|
||||
if self.merge_slashes and rv is None:
|
||||
# Try to match again, but with slashes merged
|
||||
path = re.sub("/{2,}?", "/", path)
|
||||
try:
|
||||
rv = _match(self._root, [domain, *path.split("/")], [])
|
||||
except SlashRequired:
|
||||
raise RequestPath(f"{path}/") from None
|
||||
if rv is None or rv[0].merge_slashes is False:
|
||||
raise NoMatch(have_match_for, websocket_mismatch)
|
||||
else:
|
||||
raise RequestPath(f"{path}")
|
||||
elif rv is not None:
|
||||
rule, values = rv
|
||||
|
||||
result = {}
|
||||
for name, value in zip(rule._converters.keys(), values):
|
||||
try:
|
||||
value = rule._converters[name].to_python(value)
|
||||
except ValidationError:
|
||||
raise NoMatch(have_match_for, websocket_mismatch) from None
|
||||
result[str(name)] = value
|
||||
if rule.defaults:
|
||||
result.update(rule.defaults)
|
||||
|
||||
if rule.alias and rule.map.redirect_defaults:
|
||||
raise RequestAliasRedirect(result, rule.endpoint)
|
||||
|
||||
return rule, result
|
||||
|
||||
raise NoMatch(have_match_for, websocket_mismatch)
|
||||
928
lib/python3.11/site-packages/werkzeug/routing/rules.py
Normal file
928
lib/python3.11/site-packages/werkzeug/routing/rules.py
Normal file
@ -0,0 +1,928 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import re
|
||||
import typing as t
|
||||
from dataclasses import dataclass
|
||||
from string import Template
|
||||
from types import CodeType
|
||||
from urllib.parse import quote
|
||||
|
||||
from ..datastructures import iter_multi_items
|
||||
from ..urls import _urlencode
|
||||
from .converters import ValidationError
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from .converters import BaseConverter
|
||||
from .map import Map
|
||||
|
||||
|
||||
class Weighting(t.NamedTuple):
|
||||
number_static_weights: int
|
||||
static_weights: list[tuple[int, int]]
|
||||
number_argument_weights: int
|
||||
argument_weights: list[int]
|
||||
|
||||
|
||||
@dataclass
|
||||
class RulePart:
|
||||
"""A part of a rule.
|
||||
|
||||
Rules can be represented by parts as delimited by `/` with
|
||||
instances of this class representing those parts. The *content* is
|
||||
either the raw content if *static* or a regex string to match
|
||||
against. The *weight* can be used to order parts when matching.
|
||||
|
||||
"""
|
||||
|
||||
content: str
|
||||
final: bool
|
||||
static: bool
|
||||
suffixed: bool
|
||||
weight: Weighting
|
||||
|
||||
|
||||
_part_re = re.compile(
|
||||
r"""
|
||||
(?:
|
||||
(?P<slash>/) # a slash
|
||||
|
|
||||
(?P<static>[^</]+) # static rule data
|
||||
|
|
||||
(?:
|
||||
<
|
||||
(?:
|
||||
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
|
||||
(?:\((?P<arguments>.*?)\))? # converter arguments
|
||||
: # variable delimiter
|
||||
)?
|
||||
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
|
||||
>
|
||||
)
|
||||
)
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
_simple_rule_re = re.compile(r"<([^>]+)>")
|
||||
_converter_args_re = re.compile(
|
||||
r"""
|
||||
\s*
|
||||
((?P<name>\w+)\s*=\s*)?
|
||||
(?P<value>
|
||||
True|False|
|
||||
\d+.\d+|
|
||||
\d+.|
|
||||
\d+|
|
||||
[\w\d_.]+|
|
||||
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
|
||||
)\s*,
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
|
||||
|
||||
|
||||
def _find(value: str, target: str, pos: int) -> int:
|
||||
"""Find the *target* in *value* after *pos*.
|
||||
|
||||
Returns the *value* length if *target* isn't found.
|
||||
"""
|
||||
try:
|
||||
return value.index(target, pos)
|
||||
except ValueError:
|
||||
return len(value)
|
||||
|
||||
|
||||
def _pythonize(value: str) -> None | bool | int | float | str:
|
||||
if value in _PYTHON_CONSTANTS:
|
||||
return _PYTHON_CONSTANTS[value]
|
||||
for convert in int, float:
|
||||
try:
|
||||
return convert(value)
|
||||
except ValueError:
|
||||
pass
|
||||
if value[:1] == value[-1:] and value[0] in "\"'":
|
||||
value = value[1:-1]
|
||||
return str(value)
|
||||
|
||||
|
||||
def parse_converter_args(argstr: str) -> tuple[tuple[t.Any, ...], dict[str, t.Any]]:
|
||||
argstr += ","
|
||||
args = []
|
||||
kwargs = {}
|
||||
position = 0
|
||||
|
||||
for item in _converter_args_re.finditer(argstr):
|
||||
if item.start() != position:
|
||||
raise ValueError(
|
||||
f"Cannot parse converter argument '{argstr[position:item.start()]}'"
|
||||
)
|
||||
|
||||
value = item.group("stringval")
|
||||
if value is None:
|
||||
value = item.group("value")
|
||||
value = _pythonize(value)
|
||||
if not item.group("name"):
|
||||
args.append(value)
|
||||
else:
|
||||
name = item.group("name")
|
||||
kwargs[name] = value
|
||||
position = item.end()
|
||||
|
||||
return tuple(args), kwargs
|
||||
|
||||
|
||||
class RuleFactory:
|
||||
"""As soon as you have more complex URL setups it's a good idea to use rule
|
||||
factories to avoid repetitive tasks. Some of them are builtin, others can
|
||||
be added by subclassing `RuleFactory` and overriding `get_rules`.
|
||||
"""
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterable[Rule]:
|
||||
"""Subclasses of `RuleFactory` have to override this method and return
|
||||
an iterable of rules."""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Subdomain(RuleFactory):
|
||||
"""All URLs provided by this factory have the subdomain set to a
|
||||
specific domain. For example if you want to use the subdomain for
|
||||
the current language this can be a good setup::
|
||||
|
||||
url_map = Map([
|
||||
Rule('/', endpoint='#select_language'),
|
||||
Subdomain('<string(length=2):lang_code>', [
|
||||
Rule('/', endpoint='index'),
|
||||
Rule('/about', endpoint='about'),
|
||||
Rule('/help', endpoint='help')
|
||||
])
|
||||
])
|
||||
|
||||
All the rules except for the ``'#select_language'`` endpoint will now
|
||||
listen on a two letter long subdomain that holds the language code
|
||||
for the current request.
|
||||
"""
|
||||
|
||||
def __init__(self, subdomain: str, rules: t.Iterable[RuleFactory]) -> None:
|
||||
self.subdomain = subdomain
|
||||
self.rules = rules
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterator[Rule]:
|
||||
for rulefactory in self.rules:
|
||||
for rule in rulefactory.get_rules(map):
|
||||
rule = rule.empty()
|
||||
rule.subdomain = self.subdomain
|
||||
yield rule
|
||||
|
||||
|
||||
class Submount(RuleFactory):
|
||||
"""Like `Subdomain` but prefixes the URL rule with a given string::
|
||||
|
||||
url_map = Map([
|
||||
Rule('/', endpoint='index'),
|
||||
Submount('/blog', [
|
||||
Rule('/', endpoint='blog/index'),
|
||||
Rule('/entry/<entry_slug>', endpoint='blog/show')
|
||||
])
|
||||
])
|
||||
|
||||
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
|
||||
"""
|
||||
|
||||
def __init__(self, path: str, rules: t.Iterable[RuleFactory]) -> None:
|
||||
self.path = path.rstrip("/")
|
||||
self.rules = rules
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterator[Rule]:
|
||||
for rulefactory in self.rules:
|
||||
for rule in rulefactory.get_rules(map):
|
||||
rule = rule.empty()
|
||||
rule.rule = self.path + rule.rule
|
||||
yield rule
|
||||
|
||||
|
||||
class EndpointPrefix(RuleFactory):
|
||||
"""Prefixes all endpoints (which must be strings for this factory) with
|
||||
another string. This can be useful for sub applications::
|
||||
|
||||
url_map = Map([
|
||||
Rule('/', endpoint='index'),
|
||||
EndpointPrefix('blog/', [Submount('/blog', [
|
||||
Rule('/', endpoint='index'),
|
||||
Rule('/entry/<entry_slug>', endpoint='show')
|
||||
])])
|
||||
])
|
||||
"""
|
||||
|
||||
def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
|
||||
self.prefix = prefix
|
||||
self.rules = rules
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterator[Rule]:
|
||||
for rulefactory in self.rules:
|
||||
for rule in rulefactory.get_rules(map):
|
||||
rule = rule.empty()
|
||||
rule.endpoint = self.prefix + rule.endpoint
|
||||
yield rule
|
||||
|
||||
|
||||
class RuleTemplate:
|
||||
"""Returns copies of the rules wrapped and expands string templates in
|
||||
the endpoint, rule, defaults or subdomain sections.
|
||||
|
||||
Here a small example for such a rule template::
|
||||
|
||||
from werkzeug.routing import Map, Rule, RuleTemplate
|
||||
|
||||
resource = RuleTemplate([
|
||||
Rule('/$name/', endpoint='$name.list'),
|
||||
Rule('/$name/<int:id>', endpoint='$name.show')
|
||||
])
|
||||
|
||||
url_map = Map([resource(name='user'), resource(name='page')])
|
||||
|
||||
When a rule template is called the keyword arguments are used to
|
||||
replace the placeholders in all the string parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, rules: t.Iterable[Rule]) -> None:
|
||||
self.rules = list(rules)
|
||||
|
||||
def __call__(self, *args: t.Any, **kwargs: t.Any) -> RuleTemplateFactory:
|
||||
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
|
||||
|
||||
|
||||
class RuleTemplateFactory(RuleFactory):
|
||||
"""A factory that fills in template variables into rules. Used by
|
||||
`RuleTemplate` internally.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, rules: t.Iterable[RuleFactory], context: dict[str, t.Any]
|
||||
) -> None:
|
||||
self.rules = rules
|
||||
self.context = context
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterator[Rule]:
|
||||
for rulefactory in self.rules:
|
||||
for rule in rulefactory.get_rules(map):
|
||||
new_defaults = subdomain = None
|
||||
if rule.defaults:
|
||||
new_defaults = {}
|
||||
for key, value in rule.defaults.items():
|
||||
if isinstance(value, str):
|
||||
value = Template(value).substitute(self.context)
|
||||
new_defaults[key] = value
|
||||
if rule.subdomain is not None:
|
||||
subdomain = Template(rule.subdomain).substitute(self.context)
|
||||
new_endpoint = rule.endpoint
|
||||
if isinstance(new_endpoint, str):
|
||||
new_endpoint = Template(new_endpoint).substitute(self.context)
|
||||
yield Rule(
|
||||
Template(rule.rule).substitute(self.context),
|
||||
new_defaults,
|
||||
subdomain,
|
||||
rule.methods,
|
||||
rule.build_only,
|
||||
new_endpoint,
|
||||
rule.strict_slashes,
|
||||
)
|
||||
|
||||
|
||||
_ASTT = t.TypeVar("_ASTT", bound=ast.AST)
|
||||
|
||||
|
||||
def _prefix_names(src: str, expected_type: type[_ASTT]) -> _ASTT:
|
||||
"""ast parse and prefix names with `.` to avoid collision with user vars"""
|
||||
tree: ast.AST = ast.parse(src).body[0]
|
||||
if isinstance(tree, ast.Expr):
|
||||
tree = tree.value
|
||||
if not isinstance(tree, expected_type):
|
||||
raise TypeError(
|
||||
f"AST node is of type {type(tree).__name__}, not {expected_type.__name__}"
|
||||
)
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Name):
|
||||
node.id = f".{node.id}"
|
||||
return tree
|
||||
|
||||
|
||||
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
|
||||
_IF_KWARGS_URL_ENCODE_CODE = """\
|
||||
if kwargs:
|
||||
params = self._encode_query_vars(kwargs)
|
||||
q = "?" if params else ""
|
||||
else:
|
||||
q = params = ""
|
||||
"""
|
||||
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE, ast.If)
|
||||
_URL_ENCODE_AST_NAMES = (
|
||||
_prefix_names("q", ast.Name),
|
||||
_prefix_names("params", ast.Name),
|
||||
)
|
||||
|
||||
|
||||
class Rule(RuleFactory):
|
||||
"""A Rule represents one URL pattern. There are some options for `Rule`
|
||||
that change the way it behaves and are passed to the `Rule` constructor.
|
||||
Note that besides the rule-string all arguments *must* be keyword arguments
|
||||
in order to not break the application on Werkzeug upgrades.
|
||||
|
||||
`string`
|
||||
Rule strings basically are just normal URL paths with placeholders in
|
||||
the format ``<converter(arguments):name>`` where the converter and the
|
||||
arguments are optional. If no converter is defined the `default`
|
||||
converter is used which means `string` in the normal configuration.
|
||||
|
||||
URL rules that end with a slash are branch URLs, others are leaves.
|
||||
If you have `strict_slashes` enabled (which is the default), all
|
||||
branch URLs that are matched without a trailing slash will trigger a
|
||||
redirect to the same URL with the missing slash appended.
|
||||
|
||||
The converters are defined on the `Map`.
|
||||
|
||||
`endpoint`
|
||||
The endpoint for this rule. This can be anything. A reference to a
|
||||
function, a string, a number etc. The preferred way is using a string
|
||||
because the endpoint is used for URL generation.
|
||||
|
||||
`defaults`
|
||||
An optional dict with defaults for other rules with the same endpoint.
|
||||
This is a bit tricky but useful if you want to have unique URLs::
|
||||
|
||||
url_map = Map([
|
||||
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
|
||||
Rule('/all/page/<int:page>', endpoint='all_entries')
|
||||
])
|
||||
|
||||
If a user now visits ``http://example.com/all/page/1`` they will be
|
||||
redirected to ``http://example.com/all/``. If `redirect_defaults` is
|
||||
disabled on the `Map` instance this will only affect the URL
|
||||
generation.
|
||||
|
||||
`subdomain`
|
||||
The subdomain rule string for this rule. If not specified the rule
|
||||
only matches for the `default_subdomain` of the map. If the map is
|
||||
not bound to a subdomain this feature is disabled.
|
||||
|
||||
Can be useful if you want to have user profiles on different subdomains
|
||||
and all subdomains are forwarded to your application::
|
||||
|
||||
url_map = Map([
|
||||
Rule('/', subdomain='<username>', endpoint='user/homepage'),
|
||||
Rule('/stats', subdomain='<username>', endpoint='user/stats')
|
||||
])
|
||||
|
||||
`methods`
|
||||
A sequence of http methods this rule applies to. If not specified, all
|
||||
methods are allowed. For example this can be useful if you want different
|
||||
endpoints for `POST` and `GET`. If methods are defined and the path
|
||||
matches but the method matched against is not in this list or in the
|
||||
list of another rule for that path the error raised is of the type
|
||||
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
|
||||
list of methods and `HEAD` is not, `HEAD` is added automatically.
|
||||
|
||||
`strict_slashes`
|
||||
Override the `Map` setting for `strict_slashes` only for this rule. If
|
||||
not specified the `Map` setting is used.
|
||||
|
||||
`merge_slashes`
|
||||
Override :attr:`Map.merge_slashes` for this rule.
|
||||
|
||||
`build_only`
|
||||
Set this to True and the rule will never match but will create a URL
|
||||
that can be build. This is useful if you have resources on a subdomain
|
||||
or folder that are not handled by the WSGI application (like static data)
|
||||
|
||||
`redirect_to`
|
||||
If given this must be either a string or callable. In case of a
|
||||
callable it's called with the url adapter that triggered the match and
|
||||
the values of the URL as keyword arguments and has to return the target
|
||||
for the redirect, otherwise it has to be a string with placeholders in
|
||||
rule syntax::
|
||||
|
||||
def foo_with_slug(adapter, id):
|
||||
# ask the database for the slug for the old id. this of
|
||||
# course has nothing to do with werkzeug.
|
||||
return f'foo/{Foo.get_slug_for_id(id)}'
|
||||
|
||||
url_map = Map([
|
||||
Rule('/foo/<slug>', endpoint='foo'),
|
||||
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
|
||||
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
|
||||
])
|
||||
|
||||
When the rule is matched the routing system will raise a
|
||||
`RequestRedirect` exception with the target for the redirect.
|
||||
|
||||
Keep in mind that the URL will be joined against the URL root of the
|
||||
script so don't use a leading slash on the target URL unless you
|
||||
really mean root of that domain.
|
||||
|
||||
`alias`
|
||||
If enabled this rule serves as an alias for another rule with the same
|
||||
endpoint and arguments.
|
||||
|
||||
`host`
|
||||
If provided and the URL map has host matching enabled this can be
|
||||
used to provide a match rule for the whole host. This also means
|
||||
that the subdomain feature is disabled.
|
||||
|
||||
`websocket`
|
||||
If ``True``, this rule is only matches for WebSocket (``ws://``,
|
||||
``wss://``) requests. By default, rules will only match for HTTP
|
||||
requests.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Percent-encoded newlines (``%0a``), which are decoded by WSGI
|
||||
servers, are considered when routing instead of terminating the
|
||||
match early.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
Added ``websocket``.
|
||||
|
||||
.. versionadded:: 1.0
|
||||
Added ``merge_slashes``.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
Added ``alias`` and ``host``.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
``HEAD`` is added to ``methods`` if ``GET`` is present.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
string: str,
|
||||
defaults: t.Mapping[str, t.Any] | None = None,
|
||||
subdomain: str | None = None,
|
||||
methods: t.Iterable[str] | None = None,
|
||||
build_only: bool = False,
|
||||
endpoint: t.Any | None = None,
|
||||
strict_slashes: bool | None = None,
|
||||
merge_slashes: bool | None = None,
|
||||
redirect_to: str | t.Callable[..., str] | None = None,
|
||||
alias: bool = False,
|
||||
host: str | None = None,
|
||||
websocket: bool = False,
|
||||
) -> None:
|
||||
if not string.startswith("/"):
|
||||
raise ValueError(f"URL rule '{string}' must start with a slash.")
|
||||
|
||||
self.rule = string
|
||||
self.is_leaf = not string.endswith("/")
|
||||
self.is_branch = string.endswith("/")
|
||||
|
||||
self.map: Map = None # type: ignore
|
||||
self.strict_slashes = strict_slashes
|
||||
self.merge_slashes = merge_slashes
|
||||
self.subdomain = subdomain
|
||||
self.host = host
|
||||
self.defaults = defaults
|
||||
self.build_only = build_only
|
||||
self.alias = alias
|
||||
self.websocket = websocket
|
||||
|
||||
if methods is not None:
|
||||
if isinstance(methods, str):
|
||||
raise TypeError("'methods' should be a list of strings.")
|
||||
|
||||
methods = {x.upper() for x in methods}
|
||||
|
||||
if "HEAD" not in methods and "GET" in methods:
|
||||
methods.add("HEAD")
|
||||
|
||||
if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
|
||||
raise ValueError(
|
||||
"WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
|
||||
)
|
||||
|
||||
self.methods = methods
|
||||
self.endpoint: t.Any = endpoint
|
||||
self.redirect_to = redirect_to
|
||||
|
||||
if defaults:
|
||||
self.arguments = set(map(str, defaults))
|
||||
else:
|
||||
self.arguments = set()
|
||||
|
||||
self._converters: dict[str, BaseConverter] = {}
|
||||
self._trace: list[tuple[bool, str]] = []
|
||||
self._parts: list[RulePart] = []
|
||||
|
||||
def empty(self) -> Rule:
|
||||
"""
|
||||
Return an unbound copy of this rule.
|
||||
|
||||
This can be useful if want to reuse an already bound URL for another
|
||||
map. See ``get_empty_kwargs`` to override what keyword arguments are
|
||||
provided to the new copy.
|
||||
"""
|
||||
return type(self)(self.rule, **self.get_empty_kwargs())
|
||||
|
||||
def get_empty_kwargs(self) -> t.Mapping[str, t.Any]:
|
||||
"""
|
||||
Provides kwargs for instantiating empty copy with empty()
|
||||
|
||||
Use this method to provide custom keyword arguments to the subclass of
|
||||
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
|
||||
has custom keyword arguments that are needed at instantiation.
|
||||
|
||||
Must return a ``dict`` that will be provided as kwargs to the new
|
||||
instance of ``Rule``, following the initial ``self.rule`` value which
|
||||
is always provided as the first, required positional argument.
|
||||
"""
|
||||
defaults = None
|
||||
if self.defaults:
|
||||
defaults = dict(self.defaults)
|
||||
return dict(
|
||||
defaults=defaults,
|
||||
subdomain=self.subdomain,
|
||||
methods=self.methods,
|
||||
build_only=self.build_only,
|
||||
endpoint=self.endpoint,
|
||||
strict_slashes=self.strict_slashes,
|
||||
redirect_to=self.redirect_to,
|
||||
alias=self.alias,
|
||||
host=self.host,
|
||||
)
|
||||
|
||||
def get_rules(self, map: Map) -> t.Iterator[Rule]:
|
||||
yield self
|
||||
|
||||
def refresh(self) -> None:
|
||||
"""Rebinds and refreshes the URL. Call this if you modified the
|
||||
rule in place.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
self.bind(self.map, rebind=True)
|
||||
|
||||
def bind(self, map: Map, rebind: bool = False) -> None:
|
||||
"""Bind the url to a map and create a regular expression based on
|
||||
the information from the rule itself and the defaults from the map.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
if self.map is not None and not rebind:
|
||||
raise RuntimeError(f"url rule {self!r} already bound to map {self.map!r}")
|
||||
self.map = map
|
||||
if self.strict_slashes is None:
|
||||
self.strict_slashes = map.strict_slashes
|
||||
if self.merge_slashes is None:
|
||||
self.merge_slashes = map.merge_slashes
|
||||
if self.subdomain is None:
|
||||
self.subdomain = map.default_subdomain
|
||||
self.compile()
|
||||
|
||||
def get_converter(
|
||||
self,
|
||||
variable_name: str,
|
||||
converter_name: str,
|
||||
args: tuple[t.Any, ...],
|
||||
kwargs: t.Mapping[str, t.Any],
|
||||
) -> BaseConverter:
|
||||
"""Looks up the converter for the given parameter.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
if converter_name not in self.map.converters:
|
||||
raise LookupError(f"the converter {converter_name!r} does not exist")
|
||||
return self.map.converters[converter_name](self.map, *args, **kwargs)
|
||||
|
||||
def _encode_query_vars(self, query_vars: t.Mapping[str, t.Any]) -> str:
|
||||
items: t.Iterable[tuple[str, str]] = iter_multi_items(query_vars)
|
||||
|
||||
if self.map.sort_parameters:
|
||||
items = sorted(items, key=self.map.sort_key)
|
||||
|
||||
return _urlencode(items)
|
||||
|
||||
def _parse_rule(self, rule: str) -> t.Iterable[RulePart]:
|
||||
content = ""
|
||||
static = True
|
||||
argument_weights = []
|
||||
static_weights: list[tuple[int, int]] = []
|
||||
final = False
|
||||
convertor_number = 0
|
||||
|
||||
pos = 0
|
||||
while pos < len(rule):
|
||||
match = _part_re.match(rule, pos)
|
||||
if match is None:
|
||||
raise ValueError(f"malformed url rule: {rule!r}")
|
||||
|
||||
data = match.groupdict()
|
||||
if data["static"] is not None:
|
||||
static_weights.append((len(static_weights), -len(data["static"])))
|
||||
self._trace.append((False, data["static"]))
|
||||
content += data["static"] if static else re.escape(data["static"])
|
||||
|
||||
if data["variable"] is not None:
|
||||
if static:
|
||||
# Switching content to represent regex, hence the need to escape
|
||||
content = re.escape(content)
|
||||
static = False
|
||||
c_args, c_kwargs = parse_converter_args(data["arguments"] or "")
|
||||
convobj = self.get_converter(
|
||||
data["variable"], data["converter"] or "default", c_args, c_kwargs
|
||||
)
|
||||
self._converters[data["variable"]] = convobj
|
||||
self.arguments.add(data["variable"])
|
||||
if not convobj.part_isolating:
|
||||
final = True
|
||||
content += f"(?P<__werkzeug_{convertor_number}>{convobj.regex})"
|
||||
convertor_number += 1
|
||||
argument_weights.append(convobj.weight)
|
||||
self._trace.append((True, data["variable"]))
|
||||
|
||||
if data["slash"] is not None:
|
||||
self._trace.append((False, "/"))
|
||||
if final:
|
||||
content += "/"
|
||||
else:
|
||||
if not static:
|
||||
content += r"\Z"
|
||||
weight = Weighting(
|
||||
-len(static_weights),
|
||||
static_weights,
|
||||
-len(argument_weights),
|
||||
argument_weights,
|
||||
)
|
||||
yield RulePart(
|
||||
content=content,
|
||||
final=final,
|
||||
static=static,
|
||||
suffixed=False,
|
||||
weight=weight,
|
||||
)
|
||||
content = ""
|
||||
static = True
|
||||
argument_weights = []
|
||||
static_weights = []
|
||||
final = False
|
||||
convertor_number = 0
|
||||
|
||||
pos = match.end()
|
||||
|
||||
suffixed = False
|
||||
if final and content[-1] == "/":
|
||||
# If a converter is part_isolating=False (matches slashes) and ends with a
|
||||
# slash, augment the regex to support slash redirects.
|
||||
suffixed = True
|
||||
content = content[:-1] + "(?<!/)(/?)"
|
||||
if not static:
|
||||
content += r"\Z"
|
||||
weight = Weighting(
|
||||
-len(static_weights),
|
||||
static_weights,
|
||||
-len(argument_weights),
|
||||
argument_weights,
|
||||
)
|
||||
yield RulePart(
|
||||
content=content,
|
||||
final=final,
|
||||
static=static,
|
||||
suffixed=suffixed,
|
||||
weight=weight,
|
||||
)
|
||||
if suffixed:
|
||||
yield RulePart(
|
||||
content="", final=False, static=True, suffixed=False, weight=weight
|
||||
)
|
||||
|
||||
def compile(self) -> None:
|
||||
"""Compiles the regular expression and stores it."""
|
||||
assert self.map is not None, "rule not bound"
|
||||
|
||||
if self.map.host_matching:
|
||||
domain_rule = self.host or ""
|
||||
else:
|
||||
domain_rule = self.subdomain or ""
|
||||
self._parts = []
|
||||
self._trace = []
|
||||
self._converters = {}
|
||||
if domain_rule == "":
|
||||
self._parts = [
|
||||
RulePart(
|
||||
content="",
|
||||
final=False,
|
||||
static=True,
|
||||
suffixed=False,
|
||||
weight=Weighting(0, [], 0, []),
|
||||
)
|
||||
]
|
||||
else:
|
||||
self._parts.extend(self._parse_rule(domain_rule))
|
||||
self._trace.append((False, "|"))
|
||||
rule = self.rule
|
||||
if self.merge_slashes:
|
||||
rule = re.sub("/{2,}?", "/", self.rule)
|
||||
self._parts.extend(self._parse_rule(rule))
|
||||
|
||||
self._build: t.Callable[..., tuple[str, str]]
|
||||
self._build = self._compile_builder(False).__get__(self, None)
|
||||
self._build_unknown: t.Callable[..., tuple[str, str]]
|
||||
self._build_unknown = self._compile_builder(True).__get__(self, None)
|
||||
|
||||
@staticmethod
|
||||
def _get_func_code(code: CodeType, name: str) -> t.Callable[..., tuple[str, str]]:
|
||||
globs: dict[str, t.Any] = {}
|
||||
locs: dict[str, t.Any] = {}
|
||||
exec(code, globs, locs)
|
||||
return locs[name] # type: ignore
|
||||
|
||||
def _compile_builder(
|
||||
self, append_unknown: bool = True
|
||||
) -> t.Callable[..., tuple[str, str]]:
|
||||
defaults = self.defaults or {}
|
||||
dom_ops: list[tuple[bool, str]] = []
|
||||
url_ops: list[tuple[bool, str]] = []
|
||||
|
||||
opl = dom_ops
|
||||
for is_dynamic, data in self._trace:
|
||||
if data == "|" and opl is dom_ops:
|
||||
opl = url_ops
|
||||
continue
|
||||
# this seems like a silly case to ever come up but:
|
||||
# if a default is given for a value that appears in the rule,
|
||||
# resolve it to a constant ahead of time
|
||||
if is_dynamic and data in defaults:
|
||||
data = self._converters[data].to_url(defaults[data])
|
||||
opl.append((False, data))
|
||||
elif not is_dynamic:
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
opl.append((False, quote(data, safe="!$&'()*+,/:;=@")))
|
||||
else:
|
||||
opl.append((True, data))
|
||||
|
||||
def _convert(elem: str) -> ast.Call:
|
||||
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem), ast.Call)
|
||||
ret.args = [ast.Name(elem, ast.Load())]
|
||||
return ret
|
||||
|
||||
def _parts(ops: list[tuple[bool, str]]) -> list[ast.expr]:
|
||||
parts: list[ast.expr] = [
|
||||
_convert(elem) if is_dynamic else ast.Constant(elem)
|
||||
for is_dynamic, elem in ops
|
||||
]
|
||||
parts = parts or [ast.Constant("")]
|
||||
# constant fold
|
||||
ret = [parts[0]]
|
||||
for p in parts[1:]:
|
||||
if isinstance(p, ast.Constant) and isinstance(ret[-1], ast.Constant):
|
||||
ret[-1] = ast.Constant(ret[-1].value + p.value)
|
||||
else:
|
||||
ret.append(p)
|
||||
return ret
|
||||
|
||||
dom_parts = _parts(dom_ops)
|
||||
url_parts = _parts(url_ops)
|
||||
body: list[ast.stmt]
|
||||
if not append_unknown:
|
||||
body = []
|
||||
else:
|
||||
body = [_IF_KWARGS_URL_ENCODE_AST]
|
||||
url_parts.extend(_URL_ENCODE_AST_NAMES)
|
||||
|
||||
def _join(parts: list[ast.expr]) -> ast.expr:
|
||||
if len(parts) == 1: # shortcut
|
||||
return parts[0]
|
||||
return ast.JoinedStr(parts)
|
||||
|
||||
body.append(
|
||||
ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
|
||||
)
|
||||
|
||||
pargs = [
|
||||
elem
|
||||
for is_dynamic, elem in dom_ops + url_ops
|
||||
if is_dynamic and elem not in defaults
|
||||
]
|
||||
kargs = [str(k) for k in defaults]
|
||||
|
||||
func_ast = _prefix_names("def _(): pass", ast.FunctionDef)
|
||||
func_ast.name = f"<builder:{self.rule!r}>"
|
||||
func_ast.args.args.append(ast.arg(".self", None))
|
||||
for arg in pargs + kargs:
|
||||
func_ast.args.args.append(ast.arg(arg, None))
|
||||
func_ast.args.kwarg = ast.arg(".kwargs", None)
|
||||
for _ in kargs:
|
||||
func_ast.args.defaults.append(ast.Constant(""))
|
||||
func_ast.body = body
|
||||
|
||||
# Use `ast.parse` instead of `ast.Module` for better portability, since the
|
||||
# signature of `ast.Module` can change.
|
||||
module = ast.parse("")
|
||||
module.body = [func_ast]
|
||||
|
||||
# mark everything as on line 1, offset 0
|
||||
# less error-prone than `ast.fix_missing_locations`
|
||||
# bad line numbers cause an assert to fail in debug builds
|
||||
for node in ast.walk(module):
|
||||
if "lineno" in node._attributes:
|
||||
node.lineno = 1 # type: ignore[attr-defined]
|
||||
if "end_lineno" in node._attributes:
|
||||
node.end_lineno = node.lineno # type: ignore[attr-defined]
|
||||
if "col_offset" in node._attributes:
|
||||
node.col_offset = 0 # type: ignore[attr-defined]
|
||||
if "end_col_offset" in node._attributes:
|
||||
node.end_col_offset = node.col_offset # type: ignore[attr-defined]
|
||||
|
||||
code = compile(module, "<werkzeug routing>", "exec")
|
||||
return self._get_func_code(code, func_ast.name)
|
||||
|
||||
def build(
|
||||
self, values: t.Mapping[str, t.Any], append_unknown: bool = True
|
||||
) -> tuple[str, str] | None:
|
||||
"""Assembles the relative url for that rule and the subdomain.
|
||||
If building doesn't work for some reasons `None` is returned.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
try:
|
||||
if append_unknown:
|
||||
return self._build_unknown(**values)
|
||||
else:
|
||||
return self._build(**values)
|
||||
except ValidationError:
|
||||
return None
|
||||
|
||||
def provides_defaults_for(self, rule: Rule) -> bool:
|
||||
"""Check if this rule has defaults for a given rule.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
return bool(
|
||||
not self.build_only
|
||||
and self.defaults
|
||||
and self.endpoint == rule.endpoint
|
||||
and self != rule
|
||||
and self.arguments == rule.arguments
|
||||
)
|
||||
|
||||
def suitable_for(
|
||||
self, values: t.Mapping[str, t.Any], method: str | None = None
|
||||
) -> bool:
|
||||
"""Check if the dict of values has enough data for url generation.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
# if a method was given explicitly and that method is not supported
|
||||
# by this rule, this rule is not suitable.
|
||||
if (
|
||||
method is not None
|
||||
and self.methods is not None
|
||||
and method not in self.methods
|
||||
):
|
||||
return False
|
||||
|
||||
defaults = self.defaults or ()
|
||||
|
||||
# all arguments required must be either in the defaults dict or
|
||||
# the value dictionary otherwise it's not suitable
|
||||
for key in self.arguments:
|
||||
if key not in defaults and key not in values:
|
||||
return False
|
||||
|
||||
# in case defaults are given we ensure that either the value was
|
||||
# skipped or the value is the same as the default value.
|
||||
if defaults:
|
||||
for key, value in defaults.items():
|
||||
if key in values and value != values[key]:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def build_compare_key(self) -> tuple[int, int, int]:
|
||||
"""The build compare key for sorting.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
return (1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()))
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, type(self)) and self._trace == other._trace
|
||||
|
||||
__hash__ = None # type: ignore
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.rule
|
||||
|
||||
def __repr__(self) -> str:
|
||||
if self.map is None:
|
||||
return f"<{type(self).__name__} (unbound)>"
|
||||
parts = []
|
||||
for is_dynamic, data in self._trace:
|
||||
if is_dynamic:
|
||||
parts.append(f"<{data}>")
|
||||
else:
|
||||
parts.append(data)
|
||||
parts_str = "".join(parts).lstrip("|")
|
||||
methods = f" ({', '.join(self.methods)})" if self.methods is not None else ""
|
||||
return f"<{type(self).__name__} {parts_str!r}{methods} -> {self.endpoint}>"
|
||||
170
lib/python3.11/site-packages/werkzeug/sansio/http.py
Normal file
170
lib/python3.11/site-packages/werkzeug/sansio/http.py
Normal file
@ -0,0 +1,170 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
|
||||
from .._internal import _dt_as_utc
|
||||
from ..http import generate_etag
|
||||
from ..http import parse_date
|
||||
from ..http import parse_etags
|
||||
from ..http import parse_if_range_header
|
||||
from ..http import unquote_etag
|
||||
|
||||
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
|
||||
|
||||
|
||||
def is_resource_modified(
|
||||
http_range: str | None = None,
|
||||
http_if_range: str | None = None,
|
||||
http_if_modified_since: str | None = None,
|
||||
http_if_none_match: str | None = None,
|
||||
http_if_match: str | None = None,
|
||||
etag: str | None = None,
|
||||
data: bytes | None = None,
|
||||
last_modified: datetime | str | None = None,
|
||||
ignore_if_range: bool = True,
|
||||
) -> bool:
|
||||
"""Convenience method for conditional requests.
|
||||
:param http_range: Range HTTP header
|
||||
:param http_if_range: If-Range HTTP header
|
||||
:param http_if_modified_since: If-Modified-Since HTTP header
|
||||
:param http_if_none_match: If-None-Match HTTP header
|
||||
:param http_if_match: If-Match HTTP header
|
||||
:param etag: the etag for the response for comparison.
|
||||
:param data: or alternatively the data of the response to automatically
|
||||
generate an etag using :func:`generate_etag`.
|
||||
:param last_modified: an optional date of the last modification.
|
||||
:param ignore_if_range: If `False`, `If-Range` header will be taken into
|
||||
account.
|
||||
:return: `True` if the resource was modified, otherwise `False`.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
if etag is None and data is not None:
|
||||
etag = generate_etag(data)
|
||||
elif data is not None:
|
||||
raise TypeError("both data and etag given")
|
||||
|
||||
unmodified = False
|
||||
if isinstance(last_modified, str):
|
||||
last_modified = parse_date(last_modified)
|
||||
|
||||
# HTTP doesn't use microsecond, remove it to avoid false positive
|
||||
# comparisons. Mark naive datetimes as UTC.
|
||||
if last_modified is not None:
|
||||
last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
|
||||
|
||||
if_range = None
|
||||
if not ignore_if_range and http_range is not None:
|
||||
# https://tools.ietf.org/html/rfc7233#section-3.2
|
||||
# A server MUST ignore an If-Range header field received in a request
|
||||
# that does not contain a Range header field.
|
||||
if_range = parse_if_range_header(http_if_range)
|
||||
|
||||
if if_range is not None and if_range.date is not None:
|
||||
modified_since: datetime | None = if_range.date
|
||||
else:
|
||||
modified_since = parse_date(http_if_modified_since)
|
||||
|
||||
if modified_since and last_modified and last_modified <= modified_since:
|
||||
unmodified = True
|
||||
|
||||
if etag:
|
||||
etag, _ = unquote_etag(etag)
|
||||
|
||||
if if_range is not None and if_range.etag is not None:
|
||||
unmodified = parse_etags(if_range.etag).contains(etag)
|
||||
else:
|
||||
if_none_match = parse_etags(http_if_none_match)
|
||||
if if_none_match:
|
||||
# https://tools.ietf.org/html/rfc7232#section-3.2
|
||||
# "A recipient MUST use the weak comparison function when comparing
|
||||
# entity-tags for If-None-Match"
|
||||
unmodified = if_none_match.contains_weak(etag)
|
||||
|
||||
# https://tools.ietf.org/html/rfc7232#section-3.1
|
||||
# "Origin server MUST use the strong comparison function when
|
||||
# comparing entity-tags for If-Match"
|
||||
if_match = parse_etags(http_if_match)
|
||||
if if_match:
|
||||
unmodified = not if_match.is_strong(etag)
|
||||
|
||||
return not unmodified
|
||||
|
||||
|
||||
_cookie_re = re.compile(
|
||||
r"""
|
||||
([^=;]*)
|
||||
(?:\s*=\s*
|
||||
(
|
||||
"(?:[^\\"]|\\.)*"
|
||||
|
|
||||
.*?
|
||||
)
|
||||
)?
|
||||
\s*;\s*
|
||||
""",
|
||||
flags=re.ASCII | re.VERBOSE,
|
||||
)
|
||||
_cookie_unslash_re = re.compile(rb"\\([0-3][0-7]{2}|.)")
|
||||
|
||||
|
||||
def _cookie_unslash_replace(m: t.Match[bytes]) -> bytes:
|
||||
v = m.group(1)
|
||||
|
||||
if len(v) == 1:
|
||||
return v
|
||||
|
||||
return int(v, 8).to_bytes(1, "big")
|
||||
|
||||
|
||||
def parse_cookie(
|
||||
cookie: str | None = None,
|
||||
cls: type[ds.MultiDict[str, str]] | None = None,
|
||||
) -> ds.MultiDict[str, str]:
|
||||
"""Parse a cookie from a string.
|
||||
|
||||
The same key can be provided multiple times, the values are stored
|
||||
in-order. The default :class:`MultiDict` will have the first value
|
||||
first, and all values can be retrieved with
|
||||
:meth:`MultiDict.getlist`.
|
||||
|
||||
:param cookie: The cookie header as a string.
|
||||
:param cls: A dict-like class to store the parsed cookies in.
|
||||
Defaults to :class:`MultiDict`.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Passing bytes, and the ``charset`` and ``errors`` parameters, were removed.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
if cls is None:
|
||||
cls = t.cast("type[ds.MultiDict[str, str]]", ds.MultiDict)
|
||||
|
||||
if not cookie:
|
||||
return cls()
|
||||
|
||||
cookie = f"{cookie};"
|
||||
out = []
|
||||
|
||||
for ck, cv in _cookie_re.findall(cookie):
|
||||
ck = ck.strip()
|
||||
cv = cv.strip()
|
||||
|
||||
if not ck:
|
||||
continue
|
||||
|
||||
if len(cv) >= 2 and cv[0] == cv[-1] == '"':
|
||||
# Work with bytes here, since a UTF-8 character could be multiple bytes.
|
||||
cv = _cookie_unslash_re.sub(
|
||||
_cookie_unslash_replace, cv[1:-1].encode()
|
||||
).decode(errors="replace")
|
||||
|
||||
out.append((ck, cv))
|
||||
|
||||
return cls(out)
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from .. import datastructures as ds
|
||||
323
lib/python3.11/site-packages/werkzeug/sansio/multipart.py
Normal file
323
lib/python3.11/site-packages/werkzeug/sansio/multipart.py
Normal file
@ -0,0 +1,323 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
from dataclasses import dataclass
|
||||
from enum import auto
|
||||
from enum import Enum
|
||||
|
||||
from ..datastructures import Headers
|
||||
from ..exceptions import RequestEntityTooLarge
|
||||
from ..http import parse_options_header
|
||||
|
||||
|
||||
class Event:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Preamble(Event):
|
||||
data: bytes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Field(Event):
|
||||
name: str
|
||||
headers: Headers
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class File(Event):
|
||||
name: str
|
||||
filename: str
|
||||
headers: Headers
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Data(Event):
|
||||
data: bytes
|
||||
more_data: bool
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Epilogue(Event):
|
||||
data: bytes
|
||||
|
||||
|
||||
class NeedData(Event):
|
||||
pass
|
||||
|
||||
|
||||
NEED_DATA = NeedData()
|
||||
|
||||
|
||||
class State(Enum):
|
||||
PREAMBLE = auto()
|
||||
PART = auto()
|
||||
DATA = auto()
|
||||
DATA_START = auto()
|
||||
EPILOGUE = auto()
|
||||
COMPLETE = auto()
|
||||
|
||||
|
||||
# Multipart line breaks MUST be CRLF (\r\n) by RFC-7578, except that
|
||||
# many implementations break this and either use CR or LF alone.
|
||||
LINE_BREAK = b"(?:\r\n|\n|\r)"
|
||||
BLANK_LINE_RE = re.compile(b"(?:\r\n\r\n|\r\r|\n\n)", re.MULTILINE)
|
||||
LINE_BREAK_RE = re.compile(LINE_BREAK, re.MULTILINE)
|
||||
# Header values can be continued via a space or tab after the linebreak, as
|
||||
# per RFC2231
|
||||
HEADER_CONTINUATION_RE = re.compile(b"%s[ \t]" % LINE_BREAK, re.MULTILINE)
|
||||
# This must be long enough to contain any line breaks plus any
|
||||
# additional boundary markers (--) such that they will be found in a
|
||||
# subsequent search
|
||||
SEARCH_EXTRA_LENGTH = 8
|
||||
|
||||
|
||||
class MultipartDecoder:
|
||||
"""Decodes a multipart message as bytes into Python events.
|
||||
|
||||
The part data is returned as available to allow the caller to save
|
||||
the data from memory to disk, if desired.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
boundary: bytes,
|
||||
max_form_memory_size: int | None = None,
|
||||
*,
|
||||
max_parts: int | None = None,
|
||||
) -> None:
|
||||
self.buffer = bytearray()
|
||||
self.complete = False
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.max_parts = max_parts
|
||||
self.state = State.PREAMBLE
|
||||
self.boundary = boundary
|
||||
|
||||
# Note in the below \h i.e. horizontal whitespace is used
|
||||
# as [^\S\n\r] as \h isn't supported in python.
|
||||
|
||||
# The preamble must end with a boundary where the boundary is
|
||||
# prefixed by a line break, RFC2046. Except that many
|
||||
# implementations including Werkzeug's tests omit the line
|
||||
# break prefix. In addition the first boundary could be the
|
||||
# epilogue boundary (for empty form-data) hence the matching
|
||||
# group to understand if it is an epilogue boundary.
|
||||
self.preamble_re = re.compile(
|
||||
rb"%s?--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
|
||||
% (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
|
||||
re.MULTILINE,
|
||||
)
|
||||
# A boundary must include a line break prefix and suffix, and
|
||||
# may include trailing whitespace. In addition the boundary
|
||||
# could be the epilogue boundary hence the matching group to
|
||||
# understand if it is an epilogue boundary.
|
||||
self.boundary_re = re.compile(
|
||||
rb"%s--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
|
||||
% (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
|
||||
re.MULTILINE,
|
||||
)
|
||||
self._search_position = 0
|
||||
self._parts_decoded = 0
|
||||
|
||||
def last_newline(self, data: bytes) -> int:
|
||||
try:
|
||||
last_nl = data.rindex(b"\n")
|
||||
except ValueError:
|
||||
last_nl = len(data)
|
||||
try:
|
||||
last_cr = data.rindex(b"\r")
|
||||
except ValueError:
|
||||
last_cr = len(data)
|
||||
|
||||
return min(last_nl, last_cr)
|
||||
|
||||
def receive_data(self, data: bytes | None) -> None:
|
||||
if data is None:
|
||||
self.complete = True
|
||||
elif (
|
||||
self.max_form_memory_size is not None
|
||||
and len(self.buffer) + len(data) > self.max_form_memory_size
|
||||
):
|
||||
# Ensure that data within single event does not exceed limit.
|
||||
# Also checked across accumulated events in MultiPartParser.
|
||||
raise RequestEntityTooLarge()
|
||||
else:
|
||||
self.buffer.extend(data)
|
||||
|
||||
def next_event(self) -> Event:
|
||||
event: Event = NEED_DATA
|
||||
|
||||
if self.state == State.PREAMBLE:
|
||||
match = self.preamble_re.search(self.buffer, self._search_position)
|
||||
if match is not None:
|
||||
if match.group(1).startswith(b"--"):
|
||||
self.state = State.EPILOGUE
|
||||
else:
|
||||
self.state = State.PART
|
||||
data = bytes(self.buffer[: match.start()])
|
||||
del self.buffer[: match.end()]
|
||||
event = Preamble(data=data)
|
||||
self._search_position = 0
|
||||
else:
|
||||
# Update the search start position to be equal to the
|
||||
# current buffer length (already searched) minus a
|
||||
# safe buffer for part of the search target.
|
||||
self._search_position = max(
|
||||
0, len(self.buffer) - len(self.boundary) - SEARCH_EXTRA_LENGTH
|
||||
)
|
||||
|
||||
elif self.state == State.PART:
|
||||
match = BLANK_LINE_RE.search(self.buffer, self._search_position)
|
||||
if match is not None:
|
||||
headers = self._parse_headers(self.buffer[: match.start()])
|
||||
# The final header ends with a single CRLF, however a
|
||||
# blank line indicates the start of the
|
||||
# body. Therefore the end is after the first CRLF.
|
||||
headers_end = (match.start() + match.end()) // 2
|
||||
del self.buffer[:headers_end]
|
||||
|
||||
if "content-disposition" not in headers:
|
||||
raise ValueError("Missing Content-Disposition header")
|
||||
|
||||
disposition, extra = parse_options_header(
|
||||
headers["content-disposition"]
|
||||
)
|
||||
name = t.cast(str, extra.get("name"))
|
||||
filename = extra.get("filename")
|
||||
if filename is not None:
|
||||
event = File(
|
||||
filename=filename,
|
||||
headers=headers,
|
||||
name=name,
|
||||
)
|
||||
else:
|
||||
event = Field(
|
||||
headers=headers,
|
||||
name=name,
|
||||
)
|
||||
self.state = State.DATA_START
|
||||
self._search_position = 0
|
||||
self._parts_decoded += 1
|
||||
|
||||
if self.max_parts is not None and self._parts_decoded > self.max_parts:
|
||||
raise RequestEntityTooLarge()
|
||||
else:
|
||||
# Update the search start position to be equal to the
|
||||
# current buffer length (already searched) minus a
|
||||
# safe buffer for part of the search target.
|
||||
self._search_position = max(0, len(self.buffer) - SEARCH_EXTRA_LENGTH)
|
||||
|
||||
elif self.state == State.DATA_START:
|
||||
data, del_index, more_data = self._parse_data(self.buffer, start=True)
|
||||
del self.buffer[:del_index]
|
||||
event = Data(data=data, more_data=more_data)
|
||||
if more_data:
|
||||
self.state = State.DATA
|
||||
|
||||
elif self.state == State.DATA:
|
||||
data, del_index, more_data = self._parse_data(self.buffer, start=False)
|
||||
del self.buffer[:del_index]
|
||||
if data or not more_data:
|
||||
event = Data(data=data, more_data=more_data)
|
||||
|
||||
elif self.state == State.EPILOGUE and self.complete:
|
||||
event = Epilogue(data=bytes(self.buffer))
|
||||
del self.buffer[:]
|
||||
self.state = State.COMPLETE
|
||||
|
||||
if self.complete and isinstance(event, NeedData):
|
||||
raise ValueError(f"Invalid form-data cannot parse beyond {self.state}")
|
||||
|
||||
return event
|
||||
|
||||
def _parse_headers(self, data: bytes) -> Headers:
|
||||
headers: list[tuple[str, str]] = []
|
||||
# Merge the continued headers into one line
|
||||
data = HEADER_CONTINUATION_RE.sub(b" ", data)
|
||||
# Now there is one header per line
|
||||
for line in data.splitlines():
|
||||
line = line.strip()
|
||||
|
||||
if line != b"":
|
||||
name, _, value = line.decode().partition(":")
|
||||
headers.append((name.strip(), value.strip()))
|
||||
return Headers(headers)
|
||||
|
||||
def _parse_data(self, data: bytes, *, start: bool) -> tuple[bytes, int, bool]:
|
||||
# Body parts must start with CRLF (or CR or LF)
|
||||
if start:
|
||||
match = LINE_BREAK_RE.match(data)
|
||||
data_start = t.cast(t.Match[bytes], match).end()
|
||||
else:
|
||||
data_start = 0
|
||||
|
||||
boundary = b"--" + self.boundary
|
||||
|
||||
if self.buffer.find(boundary) == -1:
|
||||
# No complete boundary in the buffer, but there may be
|
||||
# a partial boundary at the end. As the boundary
|
||||
# starts with either a nl or cr find the earliest and
|
||||
# return up to that as data.
|
||||
data_end = del_index = self.last_newline(data[data_start:]) + data_start
|
||||
# If amount of data after last newline is far from
|
||||
# possible length of partial boundary, we should
|
||||
# assume that there is no partial boundary in the buffer
|
||||
# and return all pending data.
|
||||
if (len(data) - data_end) > len(b"\n" + boundary):
|
||||
data_end = del_index = len(data)
|
||||
more_data = True
|
||||
else:
|
||||
match = self.boundary_re.search(data)
|
||||
if match is not None:
|
||||
if match.group(1).startswith(b"--"):
|
||||
self.state = State.EPILOGUE
|
||||
else:
|
||||
self.state = State.PART
|
||||
data_end = match.start()
|
||||
del_index = match.end()
|
||||
else:
|
||||
data_end = del_index = self.last_newline(data[data_start:]) + data_start
|
||||
more_data = match is None
|
||||
|
||||
return bytes(data[data_start:data_end]), del_index, more_data
|
||||
|
||||
|
||||
class MultipartEncoder:
|
||||
def __init__(self, boundary: bytes) -> None:
|
||||
self.boundary = boundary
|
||||
self.state = State.PREAMBLE
|
||||
|
||||
def send_event(self, event: Event) -> bytes:
|
||||
if isinstance(event, Preamble) and self.state == State.PREAMBLE:
|
||||
self.state = State.PART
|
||||
return event.data
|
||||
elif isinstance(event, (Field, File)) and self.state in {
|
||||
State.PREAMBLE,
|
||||
State.PART,
|
||||
State.DATA,
|
||||
}:
|
||||
data = b"\r\n--" + self.boundary + b"\r\n"
|
||||
data += b'Content-Disposition: form-data; name="%s"' % event.name.encode()
|
||||
if isinstance(event, File):
|
||||
data += b'; filename="%s"' % event.filename.encode()
|
||||
data += b"\r\n"
|
||||
for name, value in t.cast(Field, event).headers:
|
||||
if name.lower() != "content-disposition":
|
||||
data += f"{name}: {value}\r\n".encode()
|
||||
self.state = State.DATA_START
|
||||
return data
|
||||
elif isinstance(event, Data) and self.state == State.DATA_START:
|
||||
self.state = State.DATA
|
||||
if len(event.data) > 0:
|
||||
return b"\r\n" + event.data
|
||||
else:
|
||||
return event.data
|
||||
elif isinstance(event, Data) and self.state == State.DATA:
|
||||
return event.data
|
||||
elif isinstance(event, Epilogue):
|
||||
self.state = State.COMPLETE
|
||||
return b"\r\n--" + self.boundary + b"--\r\n" + event.data
|
||||
else:
|
||||
raise ValueError(f"Cannot generate {event} in state: {self.state}")
|
||||
534
lib/python3.11/site-packages/werkzeug/sansio/request.py
Normal file
534
lib/python3.11/site-packages/werkzeug/sansio/request.py
Normal file
@ -0,0 +1,534 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
from urllib.parse import parse_qsl
|
||||
|
||||
from ..datastructures import Accept
|
||||
from ..datastructures import Authorization
|
||||
from ..datastructures import CharsetAccept
|
||||
from ..datastructures import ETags
|
||||
from ..datastructures import Headers
|
||||
from ..datastructures import HeaderSet
|
||||
from ..datastructures import IfRange
|
||||
from ..datastructures import ImmutableList
|
||||
from ..datastructures import ImmutableMultiDict
|
||||
from ..datastructures import LanguageAccept
|
||||
from ..datastructures import MIMEAccept
|
||||
from ..datastructures import MultiDict
|
||||
from ..datastructures import Range
|
||||
from ..datastructures import RequestCacheControl
|
||||
from ..http import parse_accept_header
|
||||
from ..http import parse_cache_control_header
|
||||
from ..http import parse_date
|
||||
from ..http import parse_etags
|
||||
from ..http import parse_if_range_header
|
||||
from ..http import parse_list_header
|
||||
from ..http import parse_options_header
|
||||
from ..http import parse_range_header
|
||||
from ..http import parse_set_header
|
||||
from ..user_agent import UserAgent
|
||||
from ..utils import cached_property
|
||||
from ..utils import header_property
|
||||
from .http import parse_cookie
|
||||
from .utils import get_content_length
|
||||
from .utils import get_current_url
|
||||
from .utils import get_host
|
||||
|
||||
|
||||
class Request:
|
||||
"""Represents the non-IO parts of a HTTP request, including the
|
||||
method, URL info, and headers.
|
||||
|
||||
This class is not meant for general use. It should only be used when
|
||||
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
|
||||
provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
|
||||
|
||||
:param method: The method the request was made with, such as
|
||||
``GET``.
|
||||
:param scheme: The URL scheme of the protocol the request used, such
|
||||
as ``https`` or ``wss``.
|
||||
:param server: The address of the server. ``(host, port)``,
|
||||
``(path, None)`` for unix sockets, or ``None`` if not known.
|
||||
:param root_path: The prefix that the application is mounted under.
|
||||
This is prepended to generated URLs, but is not part of route
|
||||
matching.
|
||||
:param path: The path part of the URL after ``root_path``.
|
||||
:param query_string: The part of the URL after the "?".
|
||||
:param headers: The headers received with the request.
|
||||
:param remote_addr: The address of the client sending the request.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset``, ``url_charset``, and ``encoding_errors`` attributes
|
||||
were removed.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
#: the class to use for `args` and `form`. The default is an
|
||||
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
|
||||
#: multiple values per key. A :class:`~werkzeug.datastructures.ImmutableDict`
|
||||
#: is faster but only remembers the last key. It is also
|
||||
#: possible to use mutable structures, but this is not recommended.
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
parameter_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
|
||||
|
||||
#: The type to be used for dict values from the incoming WSGI
|
||||
#: environment. (For example for :attr:`cookies`.) By default an
|
||||
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
|
||||
#:
|
||||
#: .. versionchanged:: 1.0.0
|
||||
#: Changed to ``ImmutableMultiDict`` to support multiple values.
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
dict_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
|
||||
|
||||
#: the type to be used for list values from the incoming WSGI environment.
|
||||
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
|
||||
#: (for example for :attr:`access_list`).
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
list_storage_class: type[list[t.Any]] = ImmutableList
|
||||
|
||||
user_agent_class: type[UserAgent] = UserAgent
|
||||
"""The class used and returned by the :attr:`user_agent` property to
|
||||
parse the header. Defaults to
|
||||
:class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
|
||||
extension can provide a subclass that uses a parser to provide other
|
||||
data.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
#: Valid host names when handling requests. By default all hosts are
|
||||
#: trusted, which means that whatever the client says the host is
|
||||
#: will be accepted.
|
||||
#:
|
||||
#: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to
|
||||
#: any value by a malicious client, it is recommended to either set
|
||||
#: this property or implement similar validation in the proxy (if
|
||||
#: the application is being run behind one).
|
||||
#:
|
||||
#: .. versionadded:: 0.9
|
||||
trusted_hosts: list[str] | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
method: str,
|
||||
scheme: str,
|
||||
server: tuple[str, int | None] | None,
|
||||
root_path: str,
|
||||
path: str,
|
||||
query_string: bytes,
|
||||
headers: Headers,
|
||||
remote_addr: str | None,
|
||||
) -> None:
|
||||
#: The method the request was made with, such as ``GET``.
|
||||
self.method = method.upper()
|
||||
#: The URL scheme of the protocol the request used, such as
|
||||
#: ``https`` or ``wss``.
|
||||
self.scheme = scheme
|
||||
#: The address of the server. ``(host, port)``, ``(path, None)``
|
||||
#: for unix sockets, or ``None`` if not known.
|
||||
self.server = server
|
||||
#: The prefix that the application is mounted under, without a
|
||||
#: trailing slash. :attr:`path` comes after this.
|
||||
self.root_path = root_path.rstrip("/")
|
||||
#: The path part of the URL after :attr:`root_path`. This is the
|
||||
#: path used for routing within the application.
|
||||
self.path = "/" + path.lstrip("/")
|
||||
#: The part of the URL after the "?". This is the raw value, use
|
||||
#: :attr:`args` for the parsed values.
|
||||
self.query_string = query_string
|
||||
#: The headers received with the request.
|
||||
self.headers = headers
|
||||
#: The address of the client sending the request.
|
||||
self.remote_addr = remote_addr
|
||||
|
||||
def __repr__(self) -> str:
|
||||
try:
|
||||
url = self.url
|
||||
except Exception as e:
|
||||
url = f"(invalid URL: {e})"
|
||||
|
||||
return f"<{type(self).__name__} {url!r} [{self.method}]>"
|
||||
|
||||
@cached_property
|
||||
def args(self) -> MultiDict[str, str]:
|
||||
"""The parsed URL parameters (the part in the URL after the question
|
||||
mark).
|
||||
|
||||
By default an
|
||||
:class:`~werkzeug.datastructures.ImmutableMultiDict`
|
||||
is returned from this function. This can be changed by setting
|
||||
:attr:`parameter_storage_class` to a different type. This might
|
||||
be necessary if the order of the form data is important.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Invalid bytes remain percent encoded.
|
||||
"""
|
||||
return self.parameter_storage_class(
|
||||
parse_qsl(
|
||||
self.query_string.decode(),
|
||||
keep_blank_values=True,
|
||||
errors="werkzeug.url_quote",
|
||||
)
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def access_route(self) -> list[str]:
|
||||
"""If a forwarded header exists this is a list of all ip addresses
|
||||
from the client ip to the last proxy server.
|
||||
"""
|
||||
if "X-Forwarded-For" in self.headers:
|
||||
return self.list_storage_class(
|
||||
parse_list_header(self.headers["X-Forwarded-For"])
|
||||
)
|
||||
elif self.remote_addr is not None:
|
||||
return self.list_storage_class([self.remote_addr])
|
||||
return self.list_storage_class()
|
||||
|
||||
@cached_property
|
||||
def full_path(self) -> str:
|
||||
"""Requested path, including the query string."""
|
||||
return f"{self.path}?{self.query_string.decode()}"
|
||||
|
||||
@property
|
||||
def is_secure(self) -> bool:
|
||||
"""``True`` if the request was made with a secure protocol
|
||||
(HTTPS or WSS).
|
||||
"""
|
||||
return self.scheme in {"https", "wss"}
|
||||
|
||||
@cached_property
|
||||
def url(self) -> str:
|
||||
"""The full request URL with the scheme, host, root path, path,
|
||||
and query string."""
|
||||
return get_current_url(
|
||||
self.scheme, self.host, self.root_path, self.path, self.query_string
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def base_url(self) -> str:
|
||||
"""Like :attr:`url` but without the query string."""
|
||||
return get_current_url(self.scheme, self.host, self.root_path, self.path)
|
||||
|
||||
@cached_property
|
||||
def root_url(self) -> str:
|
||||
"""The request URL scheme, host, and root path. This is the root
|
||||
that the application is accessed from.
|
||||
"""
|
||||
return get_current_url(self.scheme, self.host, self.root_path)
|
||||
|
||||
@cached_property
|
||||
def host_url(self) -> str:
|
||||
"""The request URL scheme and host only."""
|
||||
return get_current_url(self.scheme, self.host)
|
||||
|
||||
@cached_property
|
||||
def host(self) -> str:
|
||||
"""The host name the request was made to, including the port if
|
||||
it's non-standard. Validated with :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_host(
|
||||
self.scheme, self.headers.get("host"), self.server, self.trusted_hosts
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def cookies(self) -> ImmutableMultiDict[str, str]:
|
||||
"""A :class:`dict` with the contents of all cookies transmitted with
|
||||
the request."""
|
||||
wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie"))
|
||||
return parse_cookie( # type: ignore
|
||||
wsgi_combined_cookie, cls=self.dict_storage_class
|
||||
)
|
||||
|
||||
# Common Descriptors
|
||||
|
||||
content_type = header_property[str](
|
||||
"Content-Type",
|
||||
doc="""The Content-Type entity-header field indicates the media
|
||||
type of the entity-body sent to the recipient or, in the case of
|
||||
the HEAD method, the media type that would have been sent had
|
||||
the request been a GET.""",
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def content_length(self) -> int | None:
|
||||
"""The Content-Length entity-header field indicates the size of the
|
||||
entity-body in bytes or, in the case of the HEAD method, the size of
|
||||
the entity-body that would have been sent had the request been a
|
||||
GET.
|
||||
"""
|
||||
return get_content_length(
|
||||
http_content_length=self.headers.get("Content-Length"),
|
||||
http_transfer_encoding=self.headers.get("Transfer-Encoding"),
|
||||
)
|
||||
|
||||
content_encoding = header_property[str](
|
||||
"Content-Encoding",
|
||||
doc="""The Content-Encoding entity-header field is used as a
|
||||
modifier to the media-type. When present, its value indicates
|
||||
what additional content codings have been applied to the
|
||||
entity-body, and thus what decoding mechanisms must be applied
|
||||
in order to obtain the media-type referenced by the Content-Type
|
||||
header field.
|
||||
|
||||
.. versionadded:: 0.9""",
|
||||
read_only=True,
|
||||
)
|
||||
content_md5 = header_property[str](
|
||||
"Content-MD5",
|
||||
doc="""The Content-MD5 entity-header field, as defined in
|
||||
RFC 1864, is an MD5 digest of the entity-body for the purpose of
|
||||
providing an end-to-end message integrity check (MIC) of the
|
||||
entity-body. (Note: a MIC is good for detecting accidental
|
||||
modification of the entity-body in transit, but is not proof
|
||||
against malicious attacks.)
|
||||
|
||||
.. versionadded:: 0.9""",
|
||||
read_only=True,
|
||||
)
|
||||
referrer = header_property[str](
|
||||
"Referer",
|
||||
doc="""The Referer[sic] request-header field allows the client
|
||||
to specify, for the server's benefit, the address (URI) of the
|
||||
resource from which the Request-URI was obtained (the
|
||||
"referrer", although the header field is misspelled).""",
|
||||
read_only=True,
|
||||
)
|
||||
date = header_property(
|
||||
"Date",
|
||||
None,
|
||||
parse_date,
|
||||
doc="""The Date general-header field represents the date and
|
||||
time at which the message was originated, having the same
|
||||
semantics as orig-date in RFC 822.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
""",
|
||||
read_only=True,
|
||||
)
|
||||
max_forwards = header_property(
|
||||
"Max-Forwards",
|
||||
None,
|
||||
int,
|
||||
doc="""The Max-Forwards request-header field provides a
|
||||
mechanism with the TRACE and OPTIONS methods to limit the number
|
||||
of proxies or gateways that can forward the request to the next
|
||||
inbound server.""",
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
def _parse_content_type(self) -> None:
|
||||
if not hasattr(self, "_parsed_content_type"):
|
||||
self._parsed_content_type = parse_options_header(
|
||||
self.headers.get("Content-Type", "")
|
||||
)
|
||||
|
||||
@property
|
||||
def mimetype(self) -> str:
|
||||
"""Like :attr:`content_type`, but without parameters (eg, without
|
||||
charset, type etc.) and always lowercase. For example if the content
|
||||
type is ``text/HTML; charset=utf-8`` the mimetype would be
|
||||
``'text/html'``.
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[0].lower()
|
||||
|
||||
@property
|
||||
def mimetype_params(self) -> dict[str, str]:
|
||||
"""The mimetype parameters as dict. For example if the content
|
||||
type is ``text/html; charset=utf-8`` the params would be
|
||||
``{'charset': 'utf-8'}``.
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[1]
|
||||
|
||||
@cached_property
|
||||
def pragma(self) -> HeaderSet:
|
||||
"""The Pragma general-header field is used to include
|
||||
implementation-specific directives that might apply to any recipient
|
||||
along the request/response chain. All pragma directives specify
|
||||
optional behavior from the viewpoint of the protocol; however, some
|
||||
systems MAY require that behavior be consistent with the directives.
|
||||
"""
|
||||
return parse_set_header(self.headers.get("Pragma", ""))
|
||||
|
||||
# Accept
|
||||
|
||||
@cached_property
|
||||
def accept_mimetypes(self) -> MIMEAccept:
|
||||
"""List of mimetypes this client supports as
|
||||
:class:`~werkzeug.datastructures.MIMEAccept` object.
|
||||
"""
|
||||
return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
|
||||
|
||||
@cached_property
|
||||
def accept_charsets(self) -> CharsetAccept:
|
||||
"""List of charsets this client supports as
|
||||
:class:`~werkzeug.datastructures.CharsetAccept` object.
|
||||
"""
|
||||
return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
|
||||
|
||||
@cached_property
|
||||
def accept_encodings(self) -> Accept:
|
||||
"""List of encodings this client accepts. Encodings in a HTTP term
|
||||
are compression encodings such as gzip. For charsets have a look at
|
||||
:attr:`accept_charset`.
|
||||
"""
|
||||
return parse_accept_header(self.headers.get("Accept-Encoding"))
|
||||
|
||||
@cached_property
|
||||
def accept_languages(self) -> LanguageAccept:
|
||||
"""List of languages this client accepts as
|
||||
:class:`~werkzeug.datastructures.LanguageAccept` object.
|
||||
|
||||
.. versionchanged 0.5
|
||||
In previous versions this was a regular
|
||||
:class:`~werkzeug.datastructures.Accept` object.
|
||||
"""
|
||||
return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
|
||||
|
||||
# ETag
|
||||
|
||||
@cached_property
|
||||
def cache_control(self) -> RequestCacheControl:
|
||||
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
|
||||
for the incoming cache control headers.
|
||||
"""
|
||||
cache_control = self.headers.get("Cache-Control")
|
||||
return parse_cache_control_header(cache_control, None, RequestCacheControl)
|
||||
|
||||
@cached_property
|
||||
def if_match(self) -> ETags:
|
||||
"""An object containing all the etags in the `If-Match` header.
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.ETags`
|
||||
"""
|
||||
return parse_etags(self.headers.get("If-Match"))
|
||||
|
||||
@cached_property
|
||||
def if_none_match(self) -> ETags:
|
||||
"""An object containing all the etags in the `If-None-Match` header.
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.ETags`
|
||||
"""
|
||||
return parse_etags(self.headers.get("If-None-Match"))
|
||||
|
||||
@cached_property
|
||||
def if_modified_since(self) -> datetime | None:
|
||||
"""The parsed `If-Modified-Since` header as a datetime object.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
"""
|
||||
return parse_date(self.headers.get("If-Modified-Since"))
|
||||
|
||||
@cached_property
|
||||
def if_unmodified_since(self) -> datetime | None:
|
||||
"""The parsed `If-Unmodified-Since` header as a datetime object.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
"""
|
||||
return parse_date(self.headers.get("If-Unmodified-Since"))
|
||||
|
||||
@cached_property
|
||||
def if_range(self) -> IfRange:
|
||||
"""The parsed ``If-Range`` header.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
``IfRange.date`` is timezone-aware.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
return parse_if_range_header(self.headers.get("If-Range"))
|
||||
|
||||
@cached_property
|
||||
def range(self) -> Range | None:
|
||||
"""The parsed `Range` header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.Range`
|
||||
"""
|
||||
return parse_range_header(self.headers.get("Range"))
|
||||
|
||||
# User Agent
|
||||
|
||||
@cached_property
|
||||
def user_agent(self) -> UserAgent:
|
||||
"""The user agent. Use ``user_agent.string`` to get the header
|
||||
value. Set :attr:`user_agent_class` to a subclass of
|
||||
:class:`~werkzeug.user_agent.UserAgent` to provide parsing for
|
||||
the other properties or other extended data.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
The built-in parser was removed. Set ``user_agent_class`` to a ``UserAgent``
|
||||
subclass to parse data from the string.
|
||||
"""
|
||||
return self.user_agent_class(self.headers.get("User-Agent", ""))
|
||||
|
||||
# Authorization
|
||||
|
||||
@cached_property
|
||||
def authorization(self) -> Authorization | None:
|
||||
"""The ``Authorization`` header parsed into an :class:`.Authorization` object.
|
||||
``None`` if the header is not present.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
:class:`Authorization` is no longer a ``dict``. The ``token`` attribute
|
||||
was added for auth schemes that use a token instead of parameters.
|
||||
"""
|
||||
return Authorization.from_header(self.headers.get("Authorization"))
|
||||
|
||||
# CORS
|
||||
|
||||
origin = header_property[str](
|
||||
"Origin",
|
||||
doc=(
|
||||
"The host that the request originated from. Set"
|
||||
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
|
||||
" the response to indicate which origins are allowed."
|
||||
),
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
access_control_request_headers = header_property(
|
||||
"Access-Control-Request-Headers",
|
||||
load_func=parse_set_header,
|
||||
doc=(
|
||||
"Sent with a preflight request to indicate which headers"
|
||||
" will be sent with the cross origin request. Set"
|
||||
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
|
||||
" on the response to indicate which headers are allowed."
|
||||
),
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
access_control_request_method = header_property[str](
|
||||
"Access-Control-Request-Method",
|
||||
doc=(
|
||||
"Sent with a preflight request to indicate which method"
|
||||
" will be used for the cross origin request. Set"
|
||||
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
|
||||
" on the response to indicate which methods are allowed."
|
||||
),
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
@property
|
||||
def is_json(self) -> bool:
|
||||
"""Check if the mimetype indicates JSON data, either
|
||||
:mimetype:`application/json` or :mimetype:`application/*+json`.
|
||||
"""
|
||||
mt = self.mimetype
|
||||
return (
|
||||
mt == "application/json"
|
||||
or mt.startswith("application/")
|
||||
and mt.endswith("+json")
|
||||
)
|
||||
763
lib/python3.11/site-packages/werkzeug/sansio/response.py
Normal file
763
lib/python3.11/site-packages/werkzeug/sansio/response.py
Normal file
@ -0,0 +1,763 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import timezone
|
||||
from http import HTTPStatus
|
||||
|
||||
from ..datastructures import CallbackDict
|
||||
from ..datastructures import ContentRange
|
||||
from ..datastructures import ContentSecurityPolicy
|
||||
from ..datastructures import Headers
|
||||
from ..datastructures import HeaderSet
|
||||
from ..datastructures import ResponseCacheControl
|
||||
from ..datastructures import WWWAuthenticate
|
||||
from ..http import COEP
|
||||
from ..http import COOP
|
||||
from ..http import dump_age
|
||||
from ..http import dump_cookie
|
||||
from ..http import dump_header
|
||||
from ..http import dump_options_header
|
||||
from ..http import http_date
|
||||
from ..http import HTTP_STATUS_CODES
|
||||
from ..http import parse_age
|
||||
from ..http import parse_cache_control_header
|
||||
from ..http import parse_content_range_header
|
||||
from ..http import parse_csp_header
|
||||
from ..http import parse_date
|
||||
from ..http import parse_options_header
|
||||
from ..http import parse_set_header
|
||||
from ..http import quote_etag
|
||||
from ..http import unquote_etag
|
||||
from ..utils import get_content_type
|
||||
from ..utils import header_property
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ..datastructures.cache_control import _CacheControl
|
||||
|
||||
|
||||
def _set_property(name: str, doc: str | None = None) -> property:
|
||||
def fget(self: Response) -> HeaderSet:
|
||||
def on_update(header_set: HeaderSet) -> None:
|
||||
if not header_set and name in self.headers:
|
||||
del self.headers[name]
|
||||
elif header_set:
|
||||
self.headers[name] = header_set.to_header()
|
||||
|
||||
return parse_set_header(self.headers.get(name), on_update)
|
||||
|
||||
def fset(
|
||||
self: Response,
|
||||
value: None | (str | dict[str, str | int] | t.Iterable[str]),
|
||||
) -> None:
|
||||
if not value:
|
||||
del self.headers[name]
|
||||
elif isinstance(value, str):
|
||||
self.headers[name] = value
|
||||
else:
|
||||
self.headers[name] = dump_header(value)
|
||||
|
||||
return property(fget, fset, doc=doc)
|
||||
|
||||
|
||||
class Response:
|
||||
"""Represents the non-IO parts of an HTTP response, specifically the
|
||||
status and headers but not the body.
|
||||
|
||||
This class is not meant for general use. It should only be used when
|
||||
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
|
||||
provides a WSGI implementation at :cls:`werkzeug.wrappers.Response`.
|
||||
|
||||
:param status: The status code for the response. Either an int, in
|
||||
which case the default status message is added, or a string in
|
||||
the form ``{code} {message}``, like ``404 Not Found``. Defaults
|
||||
to 200.
|
||||
:param headers: A :class:`~werkzeug.datastructures.Headers` object,
|
||||
or a list of ``(key, value)`` tuples that will be converted to a
|
||||
``Headers`` object.
|
||||
:param mimetype: The mime type (content type without charset or
|
||||
other parameters) of the response. If the value starts with
|
||||
``text/`` (or matches some other special cases), the charset
|
||||
will be added to create the ``content_type``.
|
||||
:param content_type: The full content type of the response.
|
||||
Overrides building the value from ``mimetype``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset`` attribute was removed.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
#: the default status if none is provided.
|
||||
default_status = 200
|
||||
|
||||
#: the default mimetype if none is provided.
|
||||
default_mimetype: str | None = "text/plain"
|
||||
|
||||
#: Warn if a cookie header exceeds this size. The default, 4093, should be
|
||||
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
|
||||
#: this size will still be sent, but it may be ignored or handled
|
||||
#: incorrectly by some browsers. Set to 0 to disable this check.
|
||||
#:
|
||||
#: .. versionadded:: 0.13
|
||||
#:
|
||||
#: .. _`cookie`: http://browsercookielimits.squawky.net/
|
||||
max_cookie_size = 4093
|
||||
|
||||
# A :class:`Headers` object representing the response headers.
|
||||
headers: Headers
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
status: int | str | HTTPStatus | None = None,
|
||||
headers: t.Mapping[str, str | t.Iterable[str]]
|
||||
| t.Iterable[tuple[str, str]]
|
||||
| None = None,
|
||||
mimetype: str | None = None,
|
||||
content_type: str | None = None,
|
||||
) -> None:
|
||||
if isinstance(headers, Headers):
|
||||
self.headers = headers
|
||||
elif not headers:
|
||||
self.headers = Headers()
|
||||
else:
|
||||
self.headers = Headers(headers)
|
||||
|
||||
if content_type is None:
|
||||
if mimetype is None and "content-type" not in self.headers:
|
||||
mimetype = self.default_mimetype
|
||||
if mimetype is not None:
|
||||
mimetype = get_content_type(mimetype, "utf-8")
|
||||
content_type = mimetype
|
||||
if content_type is not None:
|
||||
self.headers["Content-Type"] = content_type
|
||||
if status is None:
|
||||
status = self.default_status
|
||||
self.status = status # type: ignore
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} [{self.status}]>"
|
||||
|
||||
@property
|
||||
def status_code(self) -> int:
|
||||
"""The HTTP status code as a number."""
|
||||
return self._status_code
|
||||
|
||||
@status_code.setter
|
||||
def status_code(self, code: int) -> None:
|
||||
self.status = code # type: ignore
|
||||
|
||||
@property
|
||||
def status(self) -> str:
|
||||
"""The HTTP status code as a string."""
|
||||
return self._status
|
||||
|
||||
@status.setter
|
||||
def status(self, value: str | int | HTTPStatus) -> None:
|
||||
self._status, self._status_code = self._clean_status(value)
|
||||
|
||||
def _clean_status(self, value: str | int | HTTPStatus) -> tuple[str, int]:
|
||||
if isinstance(value, (int, HTTPStatus)):
|
||||
status_code = int(value)
|
||||
else:
|
||||
value = value.strip()
|
||||
|
||||
if not value:
|
||||
raise ValueError("Empty status argument")
|
||||
|
||||
code_str, sep, _ = value.partition(" ")
|
||||
|
||||
try:
|
||||
status_code = int(code_str)
|
||||
except ValueError:
|
||||
# only message
|
||||
return f"0 {value}", 0
|
||||
|
||||
if sep:
|
||||
# code and message
|
||||
return value, status_code
|
||||
|
||||
# only code, look up message
|
||||
try:
|
||||
status = f"{status_code} {HTTP_STATUS_CODES[status_code].upper()}"
|
||||
except KeyError:
|
||||
status = f"{status_code} UNKNOWN"
|
||||
|
||||
return status, status_code
|
||||
|
||||
def set_cookie(
|
||||
self,
|
||||
key: str,
|
||||
value: str = "",
|
||||
max_age: timedelta | int | None = None,
|
||||
expires: str | datetime | int | float | None = None,
|
||||
path: str | None = "/",
|
||||
domain: str | None = None,
|
||||
secure: bool = False,
|
||||
httponly: bool = False,
|
||||
samesite: str | None = None,
|
||||
partitioned: bool = False,
|
||||
) -> None:
|
||||
"""Sets a cookie.
|
||||
|
||||
A warning is raised if the size of the cookie header exceeds
|
||||
:attr:`max_cookie_size`, but the header will still be set.
|
||||
|
||||
:param key: the key (name) of the cookie to be set.
|
||||
:param value: the value of the cookie.
|
||||
:param max_age: should be a number of seconds, or `None` (default) if
|
||||
the cookie should last only as long as the client's
|
||||
browser session.
|
||||
:param expires: should be a `datetime` object or UNIX timestamp.
|
||||
:param path: limits the cookie to a given path, per default it will
|
||||
span the whole domain.
|
||||
:param domain: if you want to set a cross-domain cookie. For example,
|
||||
``domain="example.com"`` will set a cookie that is
|
||||
readable by the domain ``www.example.com``,
|
||||
``foo.example.com`` etc. Otherwise, a cookie will only
|
||||
be readable by the domain that set it.
|
||||
:param secure: If ``True``, the cookie will only be available
|
||||
via HTTPS.
|
||||
:param httponly: Disallow JavaScript access to the cookie.
|
||||
:param samesite: Limit the scope of the cookie to only be
|
||||
attached to requests that are "same-site".
|
||||
:param partitioned: If ``True``, the cookie will be partitioned.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
The ``partitioned`` parameter was added.
|
||||
"""
|
||||
self.headers.add(
|
||||
"Set-Cookie",
|
||||
dump_cookie(
|
||||
key,
|
||||
value=value,
|
||||
max_age=max_age,
|
||||
expires=expires,
|
||||
path=path,
|
||||
domain=domain,
|
||||
secure=secure,
|
||||
httponly=httponly,
|
||||
max_size=self.max_cookie_size,
|
||||
samesite=samesite,
|
||||
partitioned=partitioned,
|
||||
),
|
||||
)
|
||||
|
||||
def delete_cookie(
|
||||
self,
|
||||
key: str,
|
||||
path: str | None = "/",
|
||||
domain: str | None = None,
|
||||
secure: bool = False,
|
||||
httponly: bool = False,
|
||||
samesite: str | None = None,
|
||||
partitioned: bool = False,
|
||||
) -> None:
|
||||
"""Delete a cookie. Fails silently if key doesn't exist.
|
||||
|
||||
:param key: the key (name) of the cookie to be deleted.
|
||||
:param path: if the cookie that should be deleted was limited to a
|
||||
path, the path has to be defined here.
|
||||
:param domain: if the cookie that should be deleted was limited to a
|
||||
domain, that domain has to be defined here.
|
||||
:param secure: If ``True``, the cookie will only be available
|
||||
via HTTPS.
|
||||
:param httponly: Disallow JavaScript access to the cookie.
|
||||
:param samesite: Limit the scope of the cookie to only be
|
||||
attached to requests that are "same-site".
|
||||
:param partitioned: If ``True``, the cookie will be partitioned.
|
||||
"""
|
||||
self.set_cookie(
|
||||
key,
|
||||
expires=0,
|
||||
max_age=0,
|
||||
path=path,
|
||||
domain=domain,
|
||||
secure=secure,
|
||||
httponly=httponly,
|
||||
samesite=samesite,
|
||||
partitioned=partitioned,
|
||||
)
|
||||
|
||||
@property
|
||||
def is_json(self) -> bool:
|
||||
"""Check if the mimetype indicates JSON data, either
|
||||
:mimetype:`application/json` or :mimetype:`application/*+json`.
|
||||
"""
|
||||
mt = self.mimetype
|
||||
return mt is not None and (
|
||||
mt == "application/json"
|
||||
or mt.startswith("application/")
|
||||
and mt.endswith("+json")
|
||||
)
|
||||
|
||||
# Common Descriptors
|
||||
|
||||
@property
|
||||
def mimetype(self) -> str | None:
|
||||
"""The mimetype (content type without charset etc.)"""
|
||||
ct = self.headers.get("content-type")
|
||||
|
||||
if ct:
|
||||
return ct.split(";")[0].strip()
|
||||
else:
|
||||
return None
|
||||
|
||||
@mimetype.setter
|
||||
def mimetype(self, value: str) -> None:
|
||||
self.headers["Content-Type"] = get_content_type(value, "utf-8")
|
||||
|
||||
@property
|
||||
def mimetype_params(self) -> dict[str, str]:
|
||||
"""The mimetype parameters as dict. For example if the
|
||||
content type is ``text/html; charset=utf-8`` the params would be
|
||||
``{'charset': 'utf-8'}``.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
"""
|
||||
|
||||
def on_update(d: CallbackDict[str, str]) -> None:
|
||||
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
|
||||
|
||||
d = parse_options_header(self.headers.get("content-type", ""))[1]
|
||||
return CallbackDict(d, on_update)
|
||||
|
||||
location = header_property[str](
|
||||
"Location",
|
||||
doc="""The Location response-header field is used to redirect
|
||||
the recipient to a location other than the Request-URI for
|
||||
completion of the request or identification of a new
|
||||
resource.""",
|
||||
)
|
||||
age = header_property(
|
||||
"Age",
|
||||
None,
|
||||
parse_age,
|
||||
dump_age, # type: ignore
|
||||
doc="""The Age response-header field conveys the sender's
|
||||
estimate of the amount of time since the response (or its
|
||||
revalidation) was generated at the origin server.
|
||||
|
||||
Age values are non-negative decimal integers, representing time
|
||||
in seconds.""",
|
||||
)
|
||||
content_type = header_property[str](
|
||||
"Content-Type",
|
||||
doc="""The Content-Type entity-header field indicates the media
|
||||
type of the entity-body sent to the recipient or, in the case of
|
||||
the HEAD method, the media type that would have been sent had
|
||||
the request been a GET.""",
|
||||
)
|
||||
content_length = header_property(
|
||||
"Content-Length",
|
||||
None,
|
||||
int,
|
||||
str,
|
||||
doc="""The Content-Length entity-header field indicates the size
|
||||
of the entity-body, in decimal number of OCTETs, sent to the
|
||||
recipient or, in the case of the HEAD method, the size of the
|
||||
entity-body that would have been sent had the request been a
|
||||
GET.""",
|
||||
)
|
||||
content_location = header_property[str](
|
||||
"Content-Location",
|
||||
doc="""The Content-Location entity-header field MAY be used to
|
||||
supply the resource location for the entity enclosed in the
|
||||
message when that entity is accessible from a location separate
|
||||
from the requested resource's URI.""",
|
||||
)
|
||||
content_encoding = header_property[str](
|
||||
"Content-Encoding",
|
||||
doc="""The Content-Encoding entity-header field is used as a
|
||||
modifier to the media-type. When present, its value indicates
|
||||
what additional content codings have been applied to the
|
||||
entity-body, and thus what decoding mechanisms must be applied
|
||||
in order to obtain the media-type referenced by the Content-Type
|
||||
header field.""",
|
||||
)
|
||||
content_md5 = header_property[str](
|
||||
"Content-MD5",
|
||||
doc="""The Content-MD5 entity-header field, as defined in
|
||||
RFC 1864, is an MD5 digest of the entity-body for the purpose of
|
||||
providing an end-to-end message integrity check (MIC) of the
|
||||
entity-body. (Note: a MIC is good for detecting accidental
|
||||
modification of the entity-body in transit, but is not proof
|
||||
against malicious attacks.)""",
|
||||
)
|
||||
date = header_property(
|
||||
"Date",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Date general-header field represents the date and
|
||||
time at which the message was originated, having the same
|
||||
semantics as orig-date in RFC 822.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
""",
|
||||
)
|
||||
expires = header_property(
|
||||
"Expires",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Expires entity-header field gives the date/time after
|
||||
which the response is considered stale. A stale cache entry may
|
||||
not normally be returned by a cache.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
""",
|
||||
)
|
||||
last_modified = header_property(
|
||||
"Last-Modified",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Last-Modified entity-header field indicates the date
|
||||
and time at which the origin server believes the variant was
|
||||
last modified.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
""",
|
||||
)
|
||||
|
||||
@property
|
||||
def retry_after(self) -> datetime | None:
|
||||
"""The Retry-After response-header field can be used with a
|
||||
503 (Service Unavailable) response to indicate how long the
|
||||
service is expected to be unavailable to the requesting client.
|
||||
|
||||
Time in seconds until expiration or date.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
The datetime object is timezone-aware.
|
||||
"""
|
||||
value = self.headers.get("retry-after")
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
seconds = int(value)
|
||||
except ValueError:
|
||||
return parse_date(value)
|
||||
|
||||
return datetime.now(timezone.utc) + timedelta(seconds=seconds)
|
||||
|
||||
@retry_after.setter
|
||||
def retry_after(self, value: datetime | int | str | None) -> None:
|
||||
if value is None:
|
||||
if "retry-after" in self.headers:
|
||||
del self.headers["retry-after"]
|
||||
return
|
||||
elif isinstance(value, datetime):
|
||||
value = http_date(value)
|
||||
else:
|
||||
value = str(value)
|
||||
self.headers["Retry-After"] = value
|
||||
|
||||
vary = _set_property(
|
||||
"Vary",
|
||||
doc="""The Vary field value indicates the set of request-header
|
||||
fields that fully determines, while the response is fresh,
|
||||
whether a cache is permitted to use the response to reply to a
|
||||
subsequent request without revalidation.""",
|
||||
)
|
||||
content_language = _set_property(
|
||||
"Content-Language",
|
||||
doc="""The Content-Language entity-header field describes the
|
||||
natural language(s) of the intended audience for the enclosed
|
||||
entity. Note that this might not be equivalent to all the
|
||||
languages used within the entity-body.""",
|
||||
)
|
||||
allow = _set_property(
|
||||
"Allow",
|
||||
doc="""The Allow entity-header field lists the set of methods
|
||||
supported by the resource identified by the Request-URI. The
|
||||
purpose of this field is strictly to inform the recipient of
|
||||
valid methods associated with the resource. An Allow header
|
||||
field MUST be present in a 405 (Method Not Allowed)
|
||||
response.""",
|
||||
)
|
||||
|
||||
# ETag
|
||||
|
||||
@property
|
||||
def cache_control(self) -> ResponseCacheControl:
|
||||
"""The Cache-Control general-header field is used to specify
|
||||
directives that MUST be obeyed by all caching mechanisms along the
|
||||
request/response chain.
|
||||
"""
|
||||
|
||||
def on_update(cache_control: _CacheControl) -> None:
|
||||
if not cache_control and "cache-control" in self.headers:
|
||||
del self.headers["cache-control"]
|
||||
elif cache_control:
|
||||
self.headers["Cache-Control"] = cache_control.to_header()
|
||||
|
||||
return parse_cache_control_header(
|
||||
self.headers.get("cache-control"), on_update, ResponseCacheControl
|
||||
)
|
||||
|
||||
def set_etag(self, etag: str, weak: bool = False) -> None:
|
||||
"""Set the etag, and override the old one if there was one."""
|
||||
self.headers["ETag"] = quote_etag(etag, weak)
|
||||
|
||||
def get_etag(self) -> tuple[str, bool] | tuple[None, None]:
|
||||
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
|
||||
ETag the return value is ``(None, None)``.
|
||||
"""
|
||||
return unquote_etag(self.headers.get("ETag"))
|
||||
|
||||
accept_ranges = header_property[str](
|
||||
"Accept-Ranges",
|
||||
doc="""The `Accept-Ranges` header. Even though the name would
|
||||
indicate that multiple values are supported, it must be one
|
||||
string token only.
|
||||
|
||||
The values ``'bytes'`` and ``'none'`` are common.
|
||||
|
||||
.. versionadded:: 0.7""",
|
||||
)
|
||||
|
||||
@property
|
||||
def content_range(self) -> ContentRange:
|
||||
"""The ``Content-Range`` header as a
|
||||
:class:`~werkzeug.datastructures.ContentRange` object. Available
|
||||
even if the header is not set.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
def on_update(rng: ContentRange) -> None:
|
||||
if not rng:
|
||||
del self.headers["content-range"]
|
||||
else:
|
||||
self.headers["Content-Range"] = rng.to_header()
|
||||
|
||||
rv = parse_content_range_header(self.headers.get("content-range"), on_update)
|
||||
# always provide a content range object to make the descriptor
|
||||
# more user friendly. It provides an unset() method that can be
|
||||
# used to remove the header quickly.
|
||||
if rv is None:
|
||||
rv = ContentRange(None, None, None, on_update=on_update)
|
||||
return rv
|
||||
|
||||
@content_range.setter
|
||||
def content_range(self, value: ContentRange | str | None) -> None:
|
||||
if not value:
|
||||
del self.headers["content-range"]
|
||||
elif isinstance(value, str):
|
||||
self.headers["Content-Range"] = value
|
||||
else:
|
||||
self.headers["Content-Range"] = value.to_header()
|
||||
|
||||
# Authorization
|
||||
|
||||
@property
|
||||
def www_authenticate(self) -> WWWAuthenticate:
|
||||
"""The ``WWW-Authenticate`` header parsed into a :class:`.WWWAuthenticate`
|
||||
object. Modifying the object will modify the header value.
|
||||
|
||||
This header is not set by default. To set this header, assign an instance of
|
||||
:class:`.WWWAuthenticate` to this attribute.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
response.www_authenticate = WWWAuthenticate(
|
||||
"basic", {"realm": "Authentication Required"}
|
||||
)
|
||||
|
||||
Multiple values for this header can be sent to give the client multiple options.
|
||||
Assign a list to set multiple headers. However, modifying the items in the list
|
||||
will not automatically update the header values, and accessing this attribute
|
||||
will only ever return the first value.
|
||||
|
||||
To unset this header, assign ``None`` or use ``del``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
This attribute can be assigned to to set the header. A list can be assigned
|
||||
to set multiple header values. Use ``del`` to unset the header.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
:class:`WWWAuthenticate` is no longer a ``dict``. The ``token`` attribute
|
||||
was added for auth challenges that use a token instead of parameters.
|
||||
"""
|
||||
value = WWWAuthenticate.from_header(self.headers.get("WWW-Authenticate"))
|
||||
|
||||
if value is None:
|
||||
value = WWWAuthenticate("basic")
|
||||
|
||||
def on_update(value: WWWAuthenticate) -> None:
|
||||
self.www_authenticate = value
|
||||
|
||||
value._on_update = on_update
|
||||
return value
|
||||
|
||||
@www_authenticate.setter
|
||||
def www_authenticate(
|
||||
self, value: WWWAuthenticate | list[WWWAuthenticate] | None
|
||||
) -> None:
|
||||
if not value: # None or empty list
|
||||
del self.www_authenticate
|
||||
elif isinstance(value, list):
|
||||
# Clear any existing header by setting the first item.
|
||||
self.headers.set("WWW-Authenticate", value[0].to_header())
|
||||
|
||||
for item in value[1:]:
|
||||
# Add additional header lines for additional items.
|
||||
self.headers.add("WWW-Authenticate", item.to_header())
|
||||
else:
|
||||
self.headers.set("WWW-Authenticate", value.to_header())
|
||||
|
||||
def on_update(value: WWWAuthenticate) -> None:
|
||||
self.www_authenticate = value
|
||||
|
||||
# When setting a single value, allow updating it directly.
|
||||
value._on_update = on_update
|
||||
|
||||
@www_authenticate.deleter
|
||||
def www_authenticate(self) -> None:
|
||||
if "WWW-Authenticate" in self.headers:
|
||||
del self.headers["WWW-Authenticate"]
|
||||
|
||||
# CSP
|
||||
|
||||
@property
|
||||
def content_security_policy(self) -> ContentSecurityPolicy:
|
||||
"""The ``Content-Security-Policy`` header as a
|
||||
:class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
|
||||
even if the header is not set.
|
||||
|
||||
The Content-Security-Policy header adds an additional layer of
|
||||
security to help detect and mitigate certain types of attacks.
|
||||
"""
|
||||
|
||||
def on_update(csp: ContentSecurityPolicy) -> None:
|
||||
if not csp:
|
||||
del self.headers["content-security-policy"]
|
||||
else:
|
||||
self.headers["Content-Security-Policy"] = csp.to_header()
|
||||
|
||||
rv = parse_csp_header(self.headers.get("content-security-policy"), on_update)
|
||||
if rv is None:
|
||||
rv = ContentSecurityPolicy(None, on_update=on_update)
|
||||
return rv
|
||||
|
||||
@content_security_policy.setter
|
||||
def content_security_policy(
|
||||
self, value: ContentSecurityPolicy | str | None
|
||||
) -> None:
|
||||
if not value:
|
||||
del self.headers["content-security-policy"]
|
||||
elif isinstance(value, str):
|
||||
self.headers["Content-Security-Policy"] = value
|
||||
else:
|
||||
self.headers["Content-Security-Policy"] = value.to_header()
|
||||
|
||||
@property
|
||||
def content_security_policy_report_only(self) -> ContentSecurityPolicy:
|
||||
"""The ``Content-Security-policy-report-only`` header as a
|
||||
:class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
|
||||
even if the header is not set.
|
||||
|
||||
The Content-Security-Policy-Report-Only header adds a csp policy
|
||||
that is not enforced but is reported thereby helping detect
|
||||
certain types of attacks.
|
||||
"""
|
||||
|
||||
def on_update(csp: ContentSecurityPolicy) -> None:
|
||||
if not csp:
|
||||
del self.headers["content-security-policy-report-only"]
|
||||
else:
|
||||
self.headers["Content-Security-policy-report-only"] = csp.to_header()
|
||||
|
||||
rv = parse_csp_header(
|
||||
self.headers.get("content-security-policy-report-only"), on_update
|
||||
)
|
||||
if rv is None:
|
||||
rv = ContentSecurityPolicy(None, on_update=on_update)
|
||||
return rv
|
||||
|
||||
@content_security_policy_report_only.setter
|
||||
def content_security_policy_report_only(
|
||||
self, value: ContentSecurityPolicy | str | None
|
||||
) -> None:
|
||||
if not value:
|
||||
del self.headers["content-security-policy-report-only"]
|
||||
elif isinstance(value, str):
|
||||
self.headers["Content-Security-policy-report-only"] = value
|
||||
else:
|
||||
self.headers["Content-Security-policy-report-only"] = value.to_header()
|
||||
|
||||
# CORS
|
||||
|
||||
@property
|
||||
def access_control_allow_credentials(self) -> bool:
|
||||
"""Whether credentials can be shared by the browser to
|
||||
JavaScript code. As part of the preflight request it indicates
|
||||
whether credentials can be used on the cross origin request.
|
||||
"""
|
||||
return "Access-Control-Allow-Credentials" in self.headers
|
||||
|
||||
@access_control_allow_credentials.setter
|
||||
def access_control_allow_credentials(self, value: bool | None) -> None:
|
||||
if value is True:
|
||||
self.headers["Access-Control-Allow-Credentials"] = "true"
|
||||
else:
|
||||
self.headers.pop("Access-Control-Allow-Credentials", None)
|
||||
|
||||
access_control_allow_headers = header_property(
|
||||
"Access-Control-Allow-Headers",
|
||||
load_func=parse_set_header,
|
||||
dump_func=dump_header,
|
||||
doc="Which headers can be sent with the cross origin request.",
|
||||
)
|
||||
|
||||
access_control_allow_methods = header_property(
|
||||
"Access-Control-Allow-Methods",
|
||||
load_func=parse_set_header,
|
||||
dump_func=dump_header,
|
||||
doc="Which methods can be used for the cross origin request.",
|
||||
)
|
||||
|
||||
access_control_allow_origin = header_property[str](
|
||||
"Access-Control-Allow-Origin",
|
||||
doc="The origin or '*' for any origin that may make cross origin requests.",
|
||||
)
|
||||
|
||||
access_control_expose_headers = header_property(
|
||||
"Access-Control-Expose-Headers",
|
||||
load_func=parse_set_header,
|
||||
dump_func=dump_header,
|
||||
doc="Which headers can be shared by the browser to JavaScript code.",
|
||||
)
|
||||
|
||||
access_control_max_age = header_property(
|
||||
"Access-Control-Max-Age",
|
||||
load_func=int,
|
||||
dump_func=str,
|
||||
doc="The maximum age in seconds the access control settings can be cached for.",
|
||||
)
|
||||
|
||||
cross_origin_opener_policy = header_property[COOP](
|
||||
"Cross-Origin-Opener-Policy",
|
||||
load_func=lambda value: COOP(value),
|
||||
dump_func=lambda value: value.value,
|
||||
default=COOP.UNSAFE_NONE,
|
||||
doc="""Allows control over sharing of browsing context group with cross-origin
|
||||
documents. Values must be a member of the :class:`werkzeug.http.COOP` enum.""",
|
||||
)
|
||||
|
||||
cross_origin_embedder_policy = header_property[COEP](
|
||||
"Cross-Origin-Embedder-Policy",
|
||||
load_func=lambda value: COEP(value),
|
||||
dump_func=lambda value: value.value,
|
||||
default=COEP.UNSAFE_NONE,
|
||||
doc="""Prevents a document from loading any cross-origin resources that do not
|
||||
explicitly grant the document permission. Values must be a member of the
|
||||
:class:`werkzeug.http.COEP` enum.""",
|
||||
)
|
||||
167
lib/python3.11/site-packages/werkzeug/sansio/utils.py
Normal file
167
lib/python3.11/site-packages/werkzeug/sansio/utils.py
Normal file
@ -0,0 +1,167 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from urllib.parse import quote
|
||||
|
||||
from .._internal import _plain_int
|
||||
from ..exceptions import SecurityError
|
||||
from ..urls import uri_to_iri
|
||||
|
||||
|
||||
def host_is_trusted(hostname: str | None, trusted_list: t.Iterable[str]) -> bool:
|
||||
"""Check if a host matches a list of trusted names.
|
||||
|
||||
:param hostname: The name to check.
|
||||
:param trusted_list: A list of valid names to match. If a name
|
||||
starts with a dot it will match all subdomains.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
if not hostname:
|
||||
return False
|
||||
|
||||
try:
|
||||
hostname = hostname.partition(":")[0].encode("idna").decode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
return False
|
||||
|
||||
if isinstance(trusted_list, str):
|
||||
trusted_list = [trusted_list]
|
||||
|
||||
for ref in trusted_list:
|
||||
if ref.startswith("."):
|
||||
ref = ref[1:]
|
||||
suffix_match = True
|
||||
else:
|
||||
suffix_match = False
|
||||
|
||||
try:
|
||||
ref = ref.partition(":")[0].encode("idna").decode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
return False
|
||||
|
||||
if ref == hostname or (suffix_match and hostname.endswith(f".{ref}")):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_host(
|
||||
scheme: str,
|
||||
host_header: str | None,
|
||||
server: tuple[str, int | None] | None = None,
|
||||
trusted_hosts: t.Iterable[str] | None = None,
|
||||
) -> str:
|
||||
"""Return the host for the given parameters.
|
||||
|
||||
This first checks the ``host_header``. If it's not present, then
|
||||
``server`` is used. The host will only contain the port if it is
|
||||
different than the standard port for the protocol.
|
||||
|
||||
Optionally, verify that the host is trusted using
|
||||
:func:`host_is_trusted` and raise a
|
||||
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
|
||||
|
||||
:param scheme: The protocol the request used, like ``"https"``.
|
||||
:param host_header: The ``Host`` header value.
|
||||
:param server: Address of the server. ``(host, port)``, or
|
||||
``(path, None)`` for unix sockets.
|
||||
:param trusted_hosts: A list of trusted host names.
|
||||
|
||||
:return: Host, with port if necessary.
|
||||
:raise ~werkzeug.exceptions.SecurityError: If the host is not
|
||||
trusted.
|
||||
|
||||
.. versionchanged:: 3.1.3
|
||||
If ``SERVER_NAME`` is IPv6, it is wrapped in ``[]``.
|
||||
"""
|
||||
host = ""
|
||||
|
||||
if host_header is not None:
|
||||
host = host_header
|
||||
elif server is not None:
|
||||
host = server[0]
|
||||
|
||||
# If SERVER_NAME is IPv6, wrap it in [] to match Host header.
|
||||
# Check for : because domain or IPv4 can't have that.
|
||||
if ":" in host and host[0] != "[":
|
||||
host = f"[{host}]"
|
||||
|
||||
if server[1] is not None:
|
||||
host = f"{host}:{server[1]}"
|
||||
|
||||
if scheme in {"http", "ws"} and host.endswith(":80"):
|
||||
host = host[:-3]
|
||||
elif scheme in {"https", "wss"} and host.endswith(":443"):
|
||||
host = host[:-4]
|
||||
|
||||
if trusted_hosts is not None:
|
||||
if not host_is_trusted(host, trusted_hosts):
|
||||
raise SecurityError(f"Host {host!r} is not trusted.")
|
||||
|
||||
return host
|
||||
|
||||
|
||||
def get_current_url(
|
||||
scheme: str,
|
||||
host: str,
|
||||
root_path: str | None = None,
|
||||
path: str | None = None,
|
||||
query_string: bytes | None = None,
|
||||
) -> str:
|
||||
"""Recreate the URL for a request. If an optional part isn't
|
||||
provided, it and subsequent parts are not included in the URL.
|
||||
|
||||
The URL is an IRI, not a URI, so it may contain Unicode characters.
|
||||
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
|
||||
|
||||
:param scheme: The protocol the request used, like ``"https"``.
|
||||
:param host: The host the request was made to. See :func:`get_host`.
|
||||
:param root_path: Prefix that the application is mounted under. This
|
||||
is prepended to ``path``.
|
||||
:param path: The path part of the URL after ``root_path``.
|
||||
:param query_string: The portion of the URL after the "?".
|
||||
"""
|
||||
url = [scheme, "://", host]
|
||||
|
||||
if root_path is None:
|
||||
url.append("/")
|
||||
return uri_to_iri("".join(url))
|
||||
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
# as well as percent for things that are already quoted
|
||||
url.append(quote(root_path.rstrip("/"), safe="!$&'()*+,/:;=@%"))
|
||||
url.append("/")
|
||||
|
||||
if path is None:
|
||||
return uri_to_iri("".join(url))
|
||||
|
||||
url.append(quote(path.lstrip("/"), safe="!$&'()*+,/:;=@%"))
|
||||
|
||||
if query_string:
|
||||
url.append("?")
|
||||
url.append(quote(query_string, safe="!$&'()*+,/:;=?@%"))
|
||||
|
||||
return uri_to_iri("".join(url))
|
||||
|
||||
|
||||
def get_content_length(
|
||||
http_content_length: str | None = None,
|
||||
http_transfer_encoding: str | None = None,
|
||||
) -> int | None:
|
||||
"""Return the ``Content-Length`` header value as an int. If the header is not given
|
||||
or the ``Transfer-Encoding`` header is ``chunked``, ``None`` is returned to indicate
|
||||
a streaming request. If the value is not an integer, or negative, 0 is returned.
|
||||
|
||||
:param http_content_length: The Content-Length HTTP header.
|
||||
:param http_transfer_encoding: The Transfer-Encoding HTTP header.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
if http_transfer_encoding == "chunked" or http_content_length is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
return max(0, _plain_int(http_content_length))
|
||||
except ValueError:
|
||||
return 0
|
||||
166
lib/python3.11/site-packages/werkzeug/security.py
Normal file
166
lib/python3.11/site-packages/werkzeug/security.py
Normal file
@ -0,0 +1,166 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import posixpath
|
||||
import secrets
|
||||
|
||||
SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
DEFAULT_PBKDF2_ITERATIONS = 1_000_000
|
||||
|
||||
_os_alt_seps: list[str] = list(
|
||||
sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != "/"
|
||||
)
|
||||
|
||||
|
||||
def gen_salt(length: int) -> str:
|
||||
"""Generate a random string of SALT_CHARS with specified ``length``."""
|
||||
if length <= 0:
|
||||
raise ValueError("Salt length must be at least 1.")
|
||||
|
||||
return "".join(secrets.choice(SALT_CHARS) for _ in range(length))
|
||||
|
||||
|
||||
def _hash_internal(method: str, salt: str, password: str) -> tuple[str, str]:
|
||||
method, *args = method.split(":")
|
||||
salt_bytes = salt.encode()
|
||||
password_bytes = password.encode()
|
||||
|
||||
if method == "scrypt":
|
||||
if not args:
|
||||
n = 2**15
|
||||
r = 8
|
||||
p = 1
|
||||
else:
|
||||
try:
|
||||
n, r, p = map(int, args)
|
||||
except ValueError:
|
||||
raise ValueError("'scrypt' takes 3 arguments.") from None
|
||||
|
||||
maxmem = 132 * n * r * p # ideally 128, but some extra seems needed
|
||||
return (
|
||||
hashlib.scrypt(
|
||||
password_bytes, salt=salt_bytes, n=n, r=r, p=p, maxmem=maxmem
|
||||
).hex(),
|
||||
f"scrypt:{n}:{r}:{p}",
|
||||
)
|
||||
elif method == "pbkdf2":
|
||||
len_args = len(args)
|
||||
|
||||
if len_args == 0:
|
||||
hash_name = "sha256"
|
||||
iterations = DEFAULT_PBKDF2_ITERATIONS
|
||||
elif len_args == 1:
|
||||
hash_name = args[0]
|
||||
iterations = DEFAULT_PBKDF2_ITERATIONS
|
||||
elif len_args == 2:
|
||||
hash_name = args[0]
|
||||
iterations = int(args[1])
|
||||
else:
|
||||
raise ValueError("'pbkdf2' takes 2 arguments.")
|
||||
|
||||
return (
|
||||
hashlib.pbkdf2_hmac(
|
||||
hash_name, password_bytes, salt_bytes, iterations
|
||||
).hex(),
|
||||
f"pbkdf2:{hash_name}:{iterations}",
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid hash method '{method}'.")
|
||||
|
||||
|
||||
def generate_password_hash(
|
||||
password: str, method: str = "scrypt", salt_length: int = 16
|
||||
) -> str:
|
||||
"""Securely hash a password for storage. A password can be compared to a stored hash
|
||||
using :func:`check_password_hash`.
|
||||
|
||||
The following methods are supported:
|
||||
|
||||
- ``scrypt``, the default. The parameters are ``n``, ``r``, and ``p``, the default
|
||||
is ``scrypt:32768:8:1``. See :func:`hashlib.scrypt`.
|
||||
- ``pbkdf2``, less secure. The parameters are ``hash_method`` and ``iterations``,
|
||||
the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.
|
||||
|
||||
Default parameters may be updated to reflect current guidelines, and methods may be
|
||||
deprecated and removed if they are no longer considered secure. To migrate old
|
||||
hashes, you may generate a new hash when checking an old hash, or you may contact
|
||||
users with a link to reset their password.
|
||||
|
||||
:param password: The plaintext password.
|
||||
:param method: The key derivation function and parameters.
|
||||
:param salt_length: The number of characters to generate for the salt.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
The default iterations for pbkdf2 was increased to 1,000,000.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Scrypt support was added.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
The default iterations for pbkdf2 was increased to 600,000.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
|
||||
"""
|
||||
salt = gen_salt(salt_length)
|
||||
h, actual_method = _hash_internal(method, salt, password)
|
||||
return f"{actual_method}${salt}${h}"
|
||||
|
||||
|
||||
def check_password_hash(pwhash: str, password: str) -> bool:
|
||||
"""Securely check that the given stored password hash, previously generated using
|
||||
:func:`generate_password_hash`, matches the given password.
|
||||
|
||||
Methods may be deprecated and removed if they are no longer considered secure. To
|
||||
migrate old hashes, you may generate a new hash when checking an old hash, or you
|
||||
may contact users with a link to reset their password.
|
||||
|
||||
:param pwhash: The hashed password.
|
||||
:param password: The plaintext password.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
|
||||
"""
|
||||
try:
|
||||
method, salt, hashval = pwhash.split("$", 2)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)
|
||||
|
||||
|
||||
def safe_join(directory: str, *pathnames: str) -> str | None:
|
||||
"""Safely join zero or more untrusted path components to a base
|
||||
directory to avoid escaping the base directory.
|
||||
|
||||
:param directory: The trusted base directory.
|
||||
:param pathnames: The untrusted path components relative to the
|
||||
base directory.
|
||||
:return: A safe path, otherwise ``None``.
|
||||
"""
|
||||
if not directory:
|
||||
# Ensure we end up with ./path if directory="" is given,
|
||||
# otherwise the first untrusted part could become trusted.
|
||||
directory = "."
|
||||
|
||||
parts = [directory]
|
||||
|
||||
for filename in pathnames:
|
||||
if filename != "":
|
||||
filename = posixpath.normpath(filename)
|
||||
|
||||
if (
|
||||
any(sep in filename for sep in _os_alt_seps)
|
||||
or os.path.isabs(filename)
|
||||
# ntpath.isabs doesn't catch this on Python < 3.11
|
||||
or filename.startswith("/")
|
||||
or filename == ".."
|
||||
or filename.startswith("../")
|
||||
):
|
||||
return None
|
||||
|
||||
parts.append(filename)
|
||||
|
||||
return posixpath.join(*parts)
|
||||
1125
lib/python3.11/site-packages/werkzeug/serving.py
Normal file
1125
lib/python3.11/site-packages/werkzeug/serving.py
Normal file
File diff suppressed because it is too large
Load Diff
1464
lib/python3.11/site-packages/werkzeug/test.py
Normal file
1464
lib/python3.11/site-packages/werkzeug/test.py
Normal file
File diff suppressed because it is too large
Load Diff
194
lib/python3.11/site-packages/werkzeug/testapp.py
Normal file
194
lib/python3.11/site-packages/werkzeug/testapp.py
Normal file
@ -0,0 +1,194 @@
|
||||
"""A small application that can be used to test a WSGI server and check
|
||||
it for WSGI compliance.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.metadata
|
||||
import os
|
||||
import sys
|
||||
import typing as t
|
||||
from textwrap import wrap
|
||||
|
||||
from markupsafe import escape
|
||||
|
||||
from .wrappers.request import Request
|
||||
from .wrappers.response import Response
|
||||
|
||||
TEMPLATE = """\
|
||||
<!doctype html>
|
||||
<html lang=en>
|
||||
<title>WSGI Information</title>
|
||||
<style type="text/css">
|
||||
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
|
||||
|
||||
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif; background-color: white; color: #000;
|
||||
font-size: 15px; text-align: center; }
|
||||
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
|
||||
background-color: white; }
|
||||
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
|
||||
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
|
||||
h1 { margin: 0 0 30px 0; }
|
||||
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
|
||||
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
|
||||
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
|
||||
font-weight: normal; width: 18em; vertical-align: top;
|
||||
padding: 0.5em 0 0.1em 0.5em; }
|
||||
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
|
||||
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
|
||||
monospace; font-size: 0.7em; }
|
||||
ul li { line-height: 1.5em; }
|
||||
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
|
||||
list-style: none; background: #E8EFF0; }
|
||||
ul.path li { line-height: 1.6em; }
|
||||
li.virtual { color: #999; text-decoration: underline; }
|
||||
li.exp { background: white; }
|
||||
</style>
|
||||
<div class="box">
|
||||
<h1>WSGI Information</h1>
|
||||
<p>
|
||||
This page displays all available information about the WSGI server and
|
||||
the underlying Python interpreter.
|
||||
<h2 id="python-interpreter">Python Interpreter</h2>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Python Version
|
||||
<td>%(python_version)s
|
||||
<tr>
|
||||
<th>Platform
|
||||
<td>%(platform)s [%(os)s]
|
||||
<tr>
|
||||
<th>API Version
|
||||
<td>%(api_version)s
|
||||
<tr>
|
||||
<th>Byteorder
|
||||
<td>%(byteorder)s
|
||||
<tr>
|
||||
<th>Werkzeug Version
|
||||
<td>%(werkzeug_version)s
|
||||
</table>
|
||||
<h2 id="wsgi-environment">WSGI Environment</h2>
|
||||
<table>%(wsgi_env)s</table>
|
||||
<h2 id="installed-eggs">Installed Eggs</h2>
|
||||
<p>
|
||||
The following python packages were installed on the system as
|
||||
Python eggs:
|
||||
<ul>%(python_eggs)s</ul>
|
||||
<h2 id="sys-path">System Path</h2>
|
||||
<p>
|
||||
The following paths are the current contents of the load path. The
|
||||
following entries are looked up for Python packages. Note that not
|
||||
all items in this path are folders. Gray and underlined items are
|
||||
entries pointing to invalid resources or used by custom import hooks
|
||||
such as the zip importer.
|
||||
<p>
|
||||
Items with a bright background were expanded for display from a relative
|
||||
path. If you encounter such paths in the output you might want to check
|
||||
your setup as relative paths are usually problematic in multithreaded
|
||||
environments.
|
||||
<ul class="path">%(sys_path)s</ul>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
def iter_sys_path() -> t.Iterator[tuple[str, bool, bool]]:
|
||||
if os.name == "posix":
|
||||
|
||||
def strip(x: str) -> str:
|
||||
prefix = os.path.expanduser("~")
|
||||
if x.startswith(prefix):
|
||||
x = f"~{x[len(prefix) :]}"
|
||||
return x
|
||||
|
||||
else:
|
||||
|
||||
def strip(x: str) -> str:
|
||||
return x
|
||||
|
||||
cwd = os.path.abspath(os.getcwd())
|
||||
for item in sys.path:
|
||||
path = os.path.join(cwd, item or os.path.curdir)
|
||||
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
|
||||
|
||||
|
||||
@Request.application
|
||||
def test_app(req: Request) -> Response:
|
||||
"""Simple test application that dumps the environment. You can use
|
||||
it to check if Werkzeug is working properly:
|
||||
|
||||
.. sourcecode:: pycon
|
||||
|
||||
>>> from werkzeug.serving import run_simple
|
||||
>>> from werkzeug.testapp import test_app
|
||||
>>> run_simple('localhost', 3000, test_app)
|
||||
* Running on http://localhost:3000/
|
||||
|
||||
The application displays important information from the WSGI environment,
|
||||
the Python interpreter and the installed libraries.
|
||||
"""
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
eggs: t.Iterable[t.Any] = ()
|
||||
else:
|
||||
eggs = sorted(
|
||||
pkg_resources.working_set,
|
||||
key=lambda x: x.project_name.lower(),
|
||||
)
|
||||
python_eggs = []
|
||||
for egg in eggs:
|
||||
try:
|
||||
version = egg.version
|
||||
except (ValueError, AttributeError):
|
||||
version = "unknown"
|
||||
python_eggs.append(
|
||||
f"<li>{escape(egg.project_name)} <small>[{escape(version)}]</small>"
|
||||
)
|
||||
|
||||
wsgi_env = []
|
||||
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
|
||||
for key, value in sorted_environ:
|
||||
value = "".join(wrap(str(escape(repr(value)))))
|
||||
wsgi_env.append(f"<tr><th>{escape(key)}<td><code>{value}</code>")
|
||||
|
||||
sys_path = []
|
||||
for item, virtual, expanded in iter_sys_path():
|
||||
css = []
|
||||
if virtual:
|
||||
css.append("virtual")
|
||||
if expanded:
|
||||
css.append("exp")
|
||||
class_str = f' class="{" ".join(css)}"' if css else ""
|
||||
sys_path.append(f"<li{class_str}>{escape(item)}")
|
||||
|
||||
context = {
|
||||
"python_version": "<br>".join(escape(sys.version).splitlines()),
|
||||
"platform": escape(sys.platform),
|
||||
"os": escape(os.name),
|
||||
"api_version": sys.api_version,
|
||||
"byteorder": sys.byteorder,
|
||||
"werkzeug_version": _get_werkzeug_version(),
|
||||
"python_eggs": "\n".join(python_eggs),
|
||||
"wsgi_env": "\n".join(wsgi_env),
|
||||
"sys_path": "\n".join(sys_path),
|
||||
}
|
||||
return Response(TEMPLATE % context, mimetype="text/html")
|
||||
|
||||
|
||||
_werkzeug_version = ""
|
||||
|
||||
|
||||
def _get_werkzeug_version() -> str:
|
||||
global _werkzeug_version
|
||||
|
||||
if not _werkzeug_version:
|
||||
_werkzeug_version = importlib.metadata.version("werkzeug")
|
||||
|
||||
return _werkzeug_version
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from .serving import run_simple
|
||||
|
||||
run_simple("localhost", 5000, test_app, use_reloader=True)
|
||||
203
lib/python3.11/site-packages/werkzeug/urls.py
Normal file
203
lib/python3.11/site-packages/werkzeug/urls.py
Normal file
@ -0,0 +1,203 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
import re
|
||||
import typing as t
|
||||
import urllib.parse
|
||||
from urllib.parse import quote
|
||||
from urllib.parse import unquote
|
||||
from urllib.parse import urlencode
|
||||
from urllib.parse import urlsplit
|
||||
from urllib.parse import urlunsplit
|
||||
|
||||
from .datastructures import iter_multi_items
|
||||
|
||||
|
||||
def _codec_error_url_quote(e: UnicodeError) -> tuple[str, int]:
|
||||
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
|
||||
invalid bytes.
|
||||
"""
|
||||
# the docs state that UnicodeError does have these attributes,
|
||||
# but mypy isn't picking them up
|
||||
out = quote(e.object[e.start : e.end], safe="") # type: ignore
|
||||
return out, e.end # type: ignore
|
||||
|
||||
|
||||
codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
|
||||
|
||||
|
||||
def _make_unquote_part(name: str, chars: str) -> t.Callable[[str], str]:
|
||||
"""Create a function that unquotes all percent encoded characters except those
|
||||
given. This allows working with unquoted characters if possible while not changing
|
||||
the meaning of a given part of a URL.
|
||||
"""
|
||||
choices = "|".join(f"{ord(c):02X}" for c in sorted(chars))
|
||||
pattern = re.compile(f"((?:%(?:{choices}))+)", re.I)
|
||||
|
||||
def _unquote_partial(value: str) -> str:
|
||||
parts = iter(pattern.split(value))
|
||||
out = []
|
||||
|
||||
for part in parts:
|
||||
out.append(unquote(part, "utf-8", "werkzeug.url_quote"))
|
||||
out.append(next(parts, ""))
|
||||
|
||||
return "".join(out)
|
||||
|
||||
_unquote_partial.__name__ = f"_unquote_{name}"
|
||||
return _unquote_partial
|
||||
|
||||
|
||||
# characters that should remain quoted in URL parts
|
||||
# based on https://url.spec.whatwg.org/#percent-encoded-bytes
|
||||
# always keep all controls, space, and % quoted
|
||||
_always_unsafe = bytes((*range(0x21), 0x25, 0x7F)).decode()
|
||||
_unquote_fragment = _make_unquote_part("fragment", _always_unsafe)
|
||||
_unquote_query = _make_unquote_part("query", _always_unsafe + "&=+#")
|
||||
_unquote_path = _make_unquote_part("path", _always_unsafe + "/?#")
|
||||
_unquote_user = _make_unquote_part("user", _always_unsafe + ":@/?#")
|
||||
|
||||
|
||||
def uri_to_iri(uri: str) -> str:
|
||||
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
|
||||
leaving all reserved and invalid characters quoted. If the URL has
|
||||
a domain, it is decoded from Punycode.
|
||||
|
||||
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
|
||||
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
|
||||
|
||||
:param uri: The URI to convert.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Passing a tuple or bytes, and the ``charset`` and ``errors`` parameters,
|
||||
are removed.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Which characters remain quoted is specific to each part of the URL.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
All reserved and invalid characters remain quoted. Previously,
|
||||
only some reserved characters were preserved, and invalid bytes
|
||||
were replaced instead of left quoted.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
parts = urlsplit(uri)
|
||||
path = _unquote_path(parts.path)
|
||||
query = _unquote_query(parts.query)
|
||||
fragment = _unquote_fragment(parts.fragment)
|
||||
|
||||
if parts.hostname:
|
||||
netloc = _decode_idna(parts.hostname)
|
||||
else:
|
||||
netloc = ""
|
||||
|
||||
if ":" in netloc:
|
||||
netloc = f"[{netloc}]"
|
||||
|
||||
if parts.port:
|
||||
netloc = f"{netloc}:{parts.port}"
|
||||
|
||||
if parts.username:
|
||||
auth = _unquote_user(parts.username)
|
||||
|
||||
if parts.password:
|
||||
password = _unquote_user(parts.password)
|
||||
auth = f"{auth}:{password}"
|
||||
|
||||
netloc = f"{auth}@{netloc}"
|
||||
|
||||
return urlunsplit((parts.scheme, netloc, path, query, fragment))
|
||||
|
||||
|
||||
def iri_to_uri(iri: str) -> str:
|
||||
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
|
||||
quoted. If the URL has a domain, it is encoded to Punycode.
|
||||
|
||||
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
|
||||
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
|
||||
|
||||
:param iri: The IRI to convert.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Passing a tuple or bytes, the ``charset`` and ``errors`` parameters,
|
||||
and the ``safe_conversion`` parameter, are removed.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Which characters remain unquoted is specific to each part of the URL.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
All reserved characters remain unquoted. Previously, only some reserved
|
||||
characters were left unquoted.
|
||||
|
||||
.. versionchanged:: 0.9.6
|
||||
The ``safe_conversion`` parameter was added.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
parts = urlsplit(iri)
|
||||
# safe = https://url.spec.whatwg.org/#url-path-segment-string
|
||||
# as well as percent for things that are already quoted
|
||||
path = quote(parts.path, safe="%!$&'()*+,/:;=@")
|
||||
query = quote(parts.query, safe="%!$&'()*+,/:;=?@")
|
||||
fragment = quote(parts.fragment, safe="%!#$&'()*+,/:;=?@")
|
||||
|
||||
if parts.hostname:
|
||||
netloc = parts.hostname.encode("idna").decode("ascii")
|
||||
else:
|
||||
netloc = ""
|
||||
|
||||
if ":" in netloc:
|
||||
netloc = f"[{netloc}]"
|
||||
|
||||
if parts.port:
|
||||
netloc = f"{netloc}:{parts.port}"
|
||||
|
||||
if parts.username:
|
||||
auth = quote(parts.username, safe="%!$&'()*+,;=")
|
||||
|
||||
if parts.password:
|
||||
password = quote(parts.password, safe="%!$&'()*+,;=")
|
||||
auth = f"{auth}:{password}"
|
||||
|
||||
netloc = f"{auth}@{netloc}"
|
||||
|
||||
return urlunsplit((parts.scheme, netloc, path, query, fragment))
|
||||
|
||||
|
||||
# Python < 3.12
|
||||
# itms-services was worked around in previous iri_to_uri implementations, but
|
||||
# we can tell Python directly that it needs to preserve the //.
|
||||
if "itms-services" not in urllib.parse.uses_netloc:
|
||||
urllib.parse.uses_netloc.append("itms-services")
|
||||
|
||||
|
||||
def _decode_idna(domain: str) -> str:
|
||||
try:
|
||||
data = domain.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
# If the domain is not ASCII, it's decoded already.
|
||||
return domain
|
||||
|
||||
try:
|
||||
# Try decoding in one shot.
|
||||
return data.decode("idna")
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Decode each part separately, leaving invalid parts as punycode.
|
||||
parts = []
|
||||
|
||||
for part in data.split(b"."):
|
||||
try:
|
||||
parts.append(part.decode("idna"))
|
||||
except UnicodeDecodeError:
|
||||
parts.append(part.decode("ascii"))
|
||||
|
||||
return ".".join(parts)
|
||||
|
||||
|
||||
def _urlencode(query: t.Mapping[str, str] | t.Iterable[tuple[str, str]]) -> str:
|
||||
items = [x for x in iter_multi_items(query) if x[1] is not None]
|
||||
# safe = https://url.spec.whatwg.org/#percent-encoded-bytes
|
||||
return urlencode(items, safe="!$'()*,/:;?@")
|
||||
47
lib/python3.11/site-packages/werkzeug/user_agent.py
Normal file
47
lib/python3.11/site-packages/werkzeug/user_agent.py
Normal file
@ -0,0 +1,47 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class UserAgent:
|
||||
"""Represents a parsed user agent header value.
|
||||
|
||||
The default implementation does no parsing, only the :attr:`string`
|
||||
attribute is set. A subclass may parse the string to set the
|
||||
common attributes or expose other information. Set
|
||||
:attr:`werkzeug.wrappers.Request.user_agent_class` to use a
|
||||
subclass.
|
||||
|
||||
:param string: The header value to parse.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
This replaces the previous ``useragents`` module, but does not
|
||||
provide a built-in parser.
|
||||
"""
|
||||
|
||||
platform: str | None = None
|
||||
"""The OS name, if it could be parsed from the string."""
|
||||
|
||||
browser: str | None = None
|
||||
"""The browser name, if it could be parsed from the string."""
|
||||
|
||||
version: str | None = None
|
||||
"""The browser version, if it could be parsed from the string."""
|
||||
|
||||
language: str | None = None
|
||||
"""The browser language, if it could be parsed from the string."""
|
||||
|
||||
def __init__(self, string: str) -> None:
|
||||
self.string: str = string
|
||||
"""The original header value."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__} {self.browser}/{self.version}>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.string
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.browser)
|
||||
|
||||
def to_header(self) -> str:
|
||||
"""Convert to a header value."""
|
||||
return self.string
|
||||
691
lib/python3.11/site-packages/werkzeug/utils.py
Normal file
691
lib/python3.11/site-packages/werkzeug/utils.py
Normal file
@ -0,0 +1,691 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import mimetypes
|
||||
import os
|
||||
import pkgutil
|
||||
import re
|
||||
import sys
|
||||
import typing as t
|
||||
import unicodedata
|
||||
from datetime import datetime
|
||||
from time import time
|
||||
from urllib.parse import quote
|
||||
from zlib import adler32
|
||||
|
||||
from markupsafe import escape
|
||||
|
||||
from ._internal import _DictAccessorProperty
|
||||
from ._internal import _missing
|
||||
from ._internal import _TAccessorValue
|
||||
from .datastructures import Headers
|
||||
from .exceptions import NotFound
|
||||
from .exceptions import RequestedRangeNotSatisfiable
|
||||
from .security import safe_join
|
||||
from .wsgi import wrap_file
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from .wrappers.request import Request
|
||||
from .wrappers.response import Response
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
_entity_re = re.compile(r"&([^;]+);")
|
||||
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
|
||||
_windows_device_files = {
|
||||
"CON",
|
||||
"PRN",
|
||||
"AUX",
|
||||
"NUL",
|
||||
*(f"COM{i}" for i in range(10)),
|
||||
*(f"LPT{i}" for i in range(10)),
|
||||
}
|
||||
|
||||
|
||||
class cached_property(property, t.Generic[_T]):
|
||||
"""A :func:`property` that is only evaluated once. Subsequent access
|
||||
returns the cached value. Setting the property sets the cached
|
||||
value. Deleting the property clears the cached value, accessing it
|
||||
again will evaluate it again.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Example:
|
||||
@cached_property
|
||||
def value(self):
|
||||
# calculate something important here
|
||||
return 42
|
||||
|
||||
e = Example()
|
||||
e.value # evaluates
|
||||
e.value # uses cache
|
||||
e.value = 16 # sets cache
|
||||
del e.value # clears cache
|
||||
|
||||
If the class defines ``__slots__``, it must add ``_cache_{name}`` as
|
||||
a slot. Alternatively, it can add ``__dict__``, but that's usually
|
||||
not desirable.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Works with ``__slots__``.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
``del obj.name`` clears the cached value.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
fget: t.Callable[[t.Any], _T],
|
||||
name: str | None = None,
|
||||
doc: str | None = None,
|
||||
) -> None:
|
||||
super().__init__(fget, doc=doc)
|
||||
self.__name__ = name or fget.__name__
|
||||
self.slot_name = f"_cache_{self.__name__}"
|
||||
self.__module__ = fget.__module__
|
||||
|
||||
def __set__(self, obj: object, value: _T) -> None:
|
||||
if hasattr(obj, "__dict__"):
|
||||
obj.__dict__[self.__name__] = value
|
||||
else:
|
||||
setattr(obj, self.slot_name, value)
|
||||
|
||||
def __get__(self, obj: object, type: type = None) -> _T: # type: ignore
|
||||
if obj is None:
|
||||
return self # type: ignore
|
||||
|
||||
obj_dict = getattr(obj, "__dict__", None)
|
||||
|
||||
if obj_dict is not None:
|
||||
value: _T = obj_dict.get(self.__name__, _missing)
|
||||
else:
|
||||
value = getattr(obj, self.slot_name, _missing) # type: ignore[arg-type]
|
||||
|
||||
if value is _missing:
|
||||
value = self.fget(obj) # type: ignore
|
||||
|
||||
if obj_dict is not None:
|
||||
obj.__dict__[self.__name__] = value
|
||||
else:
|
||||
setattr(obj, self.slot_name, value)
|
||||
|
||||
return value
|
||||
|
||||
def __delete__(self, obj: object) -> None:
|
||||
if hasattr(obj, "__dict__"):
|
||||
del obj.__dict__[self.__name__]
|
||||
else:
|
||||
setattr(obj, self.slot_name, _missing)
|
||||
|
||||
|
||||
class environ_property(_DictAccessorProperty[_TAccessorValue]):
|
||||
"""Maps request attributes to environment variables. This works not only
|
||||
for the Werkzeug request object, but also any other class with an
|
||||
environ attribute:
|
||||
|
||||
>>> class Test(object):
|
||||
... environ = {'key': 'value'}
|
||||
... test = environ_property('key')
|
||||
>>> var = Test()
|
||||
>>> var.test
|
||||
'value'
|
||||
|
||||
If you pass it a second value it's used as default if the key does not
|
||||
exist, the third one can be a converter that takes a value and converts
|
||||
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
|
||||
is used. If no default value is provided `None` is used.
|
||||
|
||||
Per default the property is read only. You have to explicitly enable it
|
||||
by passing ``read_only=False`` to the constructor.
|
||||
"""
|
||||
|
||||
read_only = True
|
||||
|
||||
def lookup(self, obj: Request) -> WSGIEnvironment:
|
||||
return obj.environ
|
||||
|
||||
|
||||
class header_property(_DictAccessorProperty[_TAccessorValue]):
|
||||
"""Like `environ_property` but for headers."""
|
||||
|
||||
def lookup(self, obj: Request | Response) -> Headers: # type: ignore[override]
|
||||
return obj.headers
|
||||
|
||||
|
||||
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
|
||||
# https://www.iana.org/assignments/media-types/media-types.xhtml
|
||||
# Types listed in the XDG mime info that have a charset in the IANA registration.
|
||||
_charset_mimetypes = {
|
||||
"application/ecmascript",
|
||||
"application/javascript",
|
||||
"application/sql",
|
||||
"application/xml",
|
||||
"application/xml-dtd",
|
||||
"application/xml-external-parsed-entity",
|
||||
}
|
||||
|
||||
|
||||
def get_content_type(mimetype: str, charset: str) -> str:
|
||||
"""Returns the full content type string with charset for a mimetype.
|
||||
|
||||
If the mimetype represents text, the charset parameter will be
|
||||
appended, otherwise the mimetype is returned unchanged.
|
||||
|
||||
:param mimetype: The mimetype to be used as content type.
|
||||
:param charset: The charset to be appended for text mimetypes.
|
||||
:return: The content type.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Any type that ends with ``+xml`` gets a charset, not just those
|
||||
that start with ``application/``. Known text types such as
|
||||
``application/javascript`` are also given charsets.
|
||||
"""
|
||||
if (
|
||||
mimetype.startswith("text/")
|
||||
or mimetype in _charset_mimetypes
|
||||
or mimetype.endswith("+xml")
|
||||
):
|
||||
mimetype += f"; charset={charset}"
|
||||
|
||||
return mimetype
|
||||
|
||||
|
||||
def secure_filename(filename: str) -> str:
|
||||
r"""Pass it a filename and it will return a secure version of it. This
|
||||
filename can then safely be stored on a regular file system and passed
|
||||
to :func:`os.path.join`. The filename returned is an ASCII only string
|
||||
for maximum portability.
|
||||
|
||||
On windows systems the function also makes sure that the file is not
|
||||
named after one of the special device files.
|
||||
|
||||
>>> secure_filename("My cool movie.mov")
|
||||
'My_cool_movie.mov'
|
||||
>>> secure_filename("../../../etc/passwd")
|
||||
'etc_passwd'
|
||||
>>> secure_filename('i contain cool \xfcml\xe4uts.txt')
|
||||
'i_contain_cool_umlauts.txt'
|
||||
|
||||
The function might return an empty filename. It's your responsibility
|
||||
to ensure that the filename is unique and that you abort or
|
||||
generate a random filename if the function returned an empty one.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param filename: the filename to secure
|
||||
"""
|
||||
filename = unicodedata.normalize("NFKD", filename)
|
||||
filename = filename.encode("ascii", "ignore").decode("ascii")
|
||||
|
||||
for sep in os.sep, os.path.altsep:
|
||||
if sep:
|
||||
filename = filename.replace(sep, " ")
|
||||
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
|
||||
"._"
|
||||
)
|
||||
|
||||
# on nt a couple of special files are present in each folder. We
|
||||
# have to ensure that the target file is not such a filename. In
|
||||
# this case we prepend an underline
|
||||
if (
|
||||
os.name == "nt"
|
||||
and filename
|
||||
and filename.split(".")[0].upper() in _windows_device_files
|
||||
):
|
||||
filename = f"_{filename}"
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def redirect(
|
||||
location: str, code: int = 302, Response: type[Response] | None = None
|
||||
) -> Response:
|
||||
"""Returns a response object (a WSGI application) that, if called,
|
||||
redirects the client to the target location. Supported codes are
|
||||
301, 302, 303, 305, 307, and 308. 300 is not supported because
|
||||
it's not a real redirect and 304 because it's the answer for a
|
||||
request with a request with defined If-Modified-Since headers.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
The location can now be a unicode string that is encoded using
|
||||
the :func:`iri_to_uri` function.
|
||||
|
||||
.. versionadded:: 0.10
|
||||
The class used for the Response object can now be passed in.
|
||||
|
||||
:param location: the location the response should redirect to.
|
||||
:param code: the redirect status code. defaults to 302.
|
||||
:param class Response: a Response class to use when instantiating a
|
||||
response. The default is :class:`werkzeug.wrappers.Response` if
|
||||
unspecified.
|
||||
"""
|
||||
if Response is None:
|
||||
from .wrappers import Response
|
||||
|
||||
html_location = escape(location)
|
||||
response = Response( # type: ignore[misc]
|
||||
"<!doctype html>\n"
|
||||
"<html lang=en>\n"
|
||||
"<title>Redirecting...</title>\n"
|
||||
"<h1>Redirecting...</h1>\n"
|
||||
"<p>You should be redirected automatically to the target URL: "
|
||||
f'<a href="{html_location}">{html_location}</a>. If not, click the link.\n',
|
||||
code,
|
||||
mimetype="text/html",
|
||||
)
|
||||
response.headers["Location"] = location
|
||||
return response
|
||||
|
||||
|
||||
def append_slash_redirect(environ: WSGIEnvironment, code: int = 308) -> Response:
|
||||
"""Redirect to the current URL with a slash appended.
|
||||
|
||||
If the current URL is ``/user/42``, the redirect URL will be
|
||||
``42/``. When joined to the current URL during response
|
||||
processing or by the browser, this will produce ``/user/42/``.
|
||||
|
||||
The behavior is undefined if the path ends with a slash already. If
|
||||
called unconditionally on a URL, it may produce a redirect loop.
|
||||
|
||||
:param environ: Use the path and query from this WSGI environment
|
||||
to produce the redirect URL.
|
||||
:param code: the status code for the redirect.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Produce a relative URL that only modifies the last segment.
|
||||
Relevant when the current path has multiple segments.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
The default status code is 308 instead of 301. This preserves
|
||||
the request method and body.
|
||||
"""
|
||||
tail = environ["PATH_INFO"].rpartition("/")[2]
|
||||
|
||||
if not tail:
|
||||
new_path = "./"
|
||||
else:
|
||||
new_path = f"{tail}/"
|
||||
|
||||
query_string = environ.get("QUERY_STRING")
|
||||
|
||||
if query_string:
|
||||
new_path = f"{new_path}?{query_string}"
|
||||
|
||||
return redirect(new_path, code)
|
||||
|
||||
|
||||
def send_file(
|
||||
path_or_file: os.PathLike[str] | str | t.IO[bytes],
|
||||
environ: WSGIEnvironment,
|
||||
mimetype: str | None = None,
|
||||
as_attachment: bool = False,
|
||||
download_name: str | None = None,
|
||||
conditional: bool = True,
|
||||
etag: bool | str = True,
|
||||
last_modified: datetime | int | float | None = None,
|
||||
max_age: None | (int | t.Callable[[str | None], int | None]) = None,
|
||||
use_x_sendfile: bool = False,
|
||||
response_class: type[Response] | None = None,
|
||||
_root_path: os.PathLike[str] | str | None = None,
|
||||
) -> Response:
|
||||
"""Send the contents of a file to the client.
|
||||
|
||||
The first argument can be a file path or a file-like object. Paths
|
||||
are preferred in most cases because Werkzeug can manage the file and
|
||||
get extra information from the path. Passing a file-like object
|
||||
requires that the file is opened in binary mode, and is mostly
|
||||
useful when building a file in memory with :class:`io.BytesIO`.
|
||||
|
||||
Never pass file paths provided by a user. The path is assumed to be
|
||||
trusted, so a user could craft a path to access a file you didn't
|
||||
intend. Use :func:`send_from_directory` to safely serve user-provided paths.
|
||||
|
||||
If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
|
||||
used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
|
||||
if the HTTP server supports ``X-Sendfile``, ``use_x_sendfile=True``
|
||||
will tell the server to send the given path, which is much more
|
||||
efficient than reading it in Python.
|
||||
|
||||
:param path_or_file: The path to the file to send, relative to the
|
||||
current working directory if a relative path is given.
|
||||
Alternatively, a file-like object opened in binary mode. Make
|
||||
sure the file pointer is seeked to the start of the data.
|
||||
:param environ: The WSGI environ for the current request.
|
||||
:param mimetype: The MIME type to send for the file. If not
|
||||
provided, it will try to detect it from the file name.
|
||||
:param as_attachment: Indicate to a browser that it should offer to
|
||||
save the file instead of displaying it.
|
||||
:param download_name: The default name browsers will use when saving
|
||||
the file. Defaults to the passed file name.
|
||||
:param conditional: Enable conditional and range responses based on
|
||||
request headers. Requires passing a file path and ``environ``.
|
||||
:param etag: Calculate an ETag for the file, which requires passing
|
||||
a file path. Can also be a string to use instead.
|
||||
:param last_modified: The last modified time to send for the file,
|
||||
in seconds. If not provided, it will try to detect it from the
|
||||
file path.
|
||||
:param max_age: How long the client should cache the file, in
|
||||
seconds. If set, ``Cache-Control`` will be ``public``, otherwise
|
||||
it will be ``no-cache`` to prefer conditional caching.
|
||||
:param use_x_sendfile: Set the ``X-Sendfile`` header to let the
|
||||
server to efficiently send the file. Requires support from the
|
||||
HTTP server. Requires passing a file path.
|
||||
:param response_class: Build the response using this class. Defaults
|
||||
to :class:`~werkzeug.wrappers.Response`.
|
||||
:param _root_path: Do not use. For internal use only. Use
|
||||
:func:`send_from_directory` to safely send files under a path.
|
||||
|
||||
.. versionchanged:: 2.0.2
|
||||
``send_file`` only sets a detected ``Content-Encoding`` if
|
||||
``as_attachment`` is disabled.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
Adapted from Flask's implementation.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
``download_name`` replaces Flask's ``attachment_filename``
|
||||
parameter. If ``as_attachment=False``, it is passed with
|
||||
``Content-Disposition: inline`` instead.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
``max_age`` replaces Flask's ``cache_timeout`` parameter.
|
||||
``conditional`` is enabled and ``max_age`` is not set by
|
||||
default.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
``etag`` replaces Flask's ``add_etags`` parameter. It can be a
|
||||
string to use instead of generating one.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
If an encoding is returned when guessing ``mimetype`` from
|
||||
``download_name``, set the ``Content-Encoding`` header.
|
||||
"""
|
||||
if response_class is None:
|
||||
from .wrappers import Response
|
||||
|
||||
response_class = Response
|
||||
|
||||
path: str | None = None
|
||||
file: t.IO[bytes] | None = None
|
||||
size: int | None = None
|
||||
mtime: float | None = None
|
||||
headers = Headers()
|
||||
|
||||
if isinstance(path_or_file, (os.PathLike, str)) or hasattr(
|
||||
path_or_file, "__fspath__"
|
||||
):
|
||||
path_or_file = t.cast("t.Union[os.PathLike[str], str]", path_or_file)
|
||||
|
||||
# Flask will pass app.root_path, allowing its send_file wrapper
|
||||
# to not have to deal with paths.
|
||||
if _root_path is not None:
|
||||
path = os.path.join(_root_path, path_or_file)
|
||||
else:
|
||||
path = os.path.abspath(path_or_file)
|
||||
|
||||
stat = os.stat(path)
|
||||
size = stat.st_size
|
||||
mtime = stat.st_mtime
|
||||
else:
|
||||
file = path_or_file
|
||||
|
||||
if download_name is None and path is not None:
|
||||
download_name = os.path.basename(path)
|
||||
|
||||
if mimetype is None:
|
||||
if download_name is None:
|
||||
raise TypeError(
|
||||
"Unable to detect the MIME type because a file name is"
|
||||
" not available. Either set 'download_name', pass a"
|
||||
" path instead of a file, or set 'mimetype'."
|
||||
)
|
||||
|
||||
mimetype, encoding = mimetypes.guess_type(download_name)
|
||||
|
||||
if mimetype is None:
|
||||
mimetype = "application/octet-stream"
|
||||
|
||||
# Don't send encoding for attachments, it causes browsers to
|
||||
# save decompress tar.gz files.
|
||||
if encoding is not None and not as_attachment:
|
||||
headers.set("Content-Encoding", encoding)
|
||||
|
||||
if download_name is not None:
|
||||
try:
|
||||
download_name.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
simple = unicodedata.normalize("NFKD", download_name)
|
||||
simple = simple.encode("ascii", "ignore").decode("ascii")
|
||||
# safe = RFC 5987 attr-char
|
||||
quoted = quote(download_name, safe="!#$&+-.^_`|~")
|
||||
names = {"filename": simple, "filename*": f"UTF-8''{quoted}"}
|
||||
else:
|
||||
names = {"filename": download_name}
|
||||
|
||||
value = "attachment" if as_attachment else "inline"
|
||||
headers.set("Content-Disposition", value, **names)
|
||||
elif as_attachment:
|
||||
raise TypeError(
|
||||
"No name provided for attachment. Either set"
|
||||
" 'download_name' or pass a path instead of a file."
|
||||
)
|
||||
|
||||
if use_x_sendfile and path is not None:
|
||||
headers["X-Sendfile"] = path
|
||||
data = None
|
||||
else:
|
||||
if file is None:
|
||||
file = open(path, "rb") # type: ignore
|
||||
elif isinstance(file, io.BytesIO):
|
||||
size = file.getbuffer().nbytes
|
||||
elif isinstance(file, io.TextIOBase):
|
||||
raise ValueError("Files must be opened in binary mode or use BytesIO.")
|
||||
|
||||
data = wrap_file(environ, file)
|
||||
|
||||
rv = response_class(
|
||||
data, mimetype=mimetype, headers=headers, direct_passthrough=True
|
||||
)
|
||||
|
||||
if size is not None:
|
||||
rv.content_length = size
|
||||
|
||||
if last_modified is not None:
|
||||
rv.last_modified = last_modified # type: ignore
|
||||
elif mtime is not None:
|
||||
rv.last_modified = mtime # type: ignore
|
||||
|
||||
rv.cache_control.no_cache = True
|
||||
|
||||
# Flask will pass app.get_send_file_max_age, allowing its send_file
|
||||
# wrapper to not have to deal with paths.
|
||||
if callable(max_age):
|
||||
max_age = max_age(path)
|
||||
|
||||
if max_age is not None:
|
||||
if max_age > 0:
|
||||
rv.cache_control.no_cache = None
|
||||
rv.cache_control.public = True
|
||||
|
||||
rv.cache_control.max_age = max_age
|
||||
rv.expires = int(time() + max_age) # type: ignore
|
||||
|
||||
if isinstance(etag, str):
|
||||
rv.set_etag(etag)
|
||||
elif etag and path is not None:
|
||||
check = adler32(path.encode()) & 0xFFFFFFFF
|
||||
rv.set_etag(f"{mtime}-{size}-{check}")
|
||||
|
||||
if conditional:
|
||||
try:
|
||||
rv = rv.make_conditional(environ, accept_ranges=True, complete_length=size)
|
||||
except RequestedRangeNotSatisfiable:
|
||||
if file is not None:
|
||||
file.close()
|
||||
|
||||
raise
|
||||
|
||||
# Some x-sendfile implementations incorrectly ignore the 304
|
||||
# status code and send the file anyway.
|
||||
if rv.status_code == 304:
|
||||
rv.headers.pop("x-sendfile", None)
|
||||
|
||||
return rv
|
||||
|
||||
|
||||
def send_from_directory(
|
||||
directory: os.PathLike[str] | str,
|
||||
path: os.PathLike[str] | str,
|
||||
environ: WSGIEnvironment,
|
||||
**kwargs: t.Any,
|
||||
) -> Response:
|
||||
"""Send a file from within a directory using :func:`send_file`.
|
||||
|
||||
This is a secure way to serve files from a folder, such as static
|
||||
files or uploads. Uses :func:`~werkzeug.security.safe_join` to
|
||||
ensure the path coming from the client is not maliciously crafted to
|
||||
point outside the specified directory.
|
||||
|
||||
If the final path does not point to an existing regular file,
|
||||
returns a 404 :exc:`~werkzeug.exceptions.NotFound` error.
|
||||
|
||||
:param directory: The directory that ``path`` must be located under. This *must not*
|
||||
be a value provided by the client, otherwise it becomes insecure.
|
||||
:param path: The path to the file to send, relative to ``directory``. This is the
|
||||
part of the path provided by the client, which is checked for security.
|
||||
:param environ: The WSGI environ for the current request.
|
||||
:param kwargs: Arguments to pass to :func:`send_file`.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
Adapted from Flask's implementation.
|
||||
"""
|
||||
path_str = safe_join(os.fspath(directory), os.fspath(path))
|
||||
|
||||
if path_str is None:
|
||||
raise NotFound()
|
||||
|
||||
# Flask will pass app.root_path, allowing its send_from_directory
|
||||
# wrapper to not have to deal with paths.
|
||||
if "_root_path" in kwargs:
|
||||
path_str = os.path.join(kwargs["_root_path"], path_str)
|
||||
|
||||
if not os.path.isfile(path_str):
|
||||
raise NotFound()
|
||||
|
||||
return send_file(path_str, environ, **kwargs)
|
||||
|
||||
|
||||
def import_string(import_name: str, silent: bool = False) -> t.Any:
|
||||
"""Imports an object based on a string. This is useful if you want to
|
||||
use import paths as endpoints or something similar. An import path can
|
||||
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
||||
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
||||
|
||||
If `silent` is True the return value will be `None` if the import fails.
|
||||
|
||||
:param import_name: the dotted name for the object to import.
|
||||
:param silent: if set to `True` import errors are ignored and
|
||||
`None` is returned instead.
|
||||
:return: imported object
|
||||
"""
|
||||
import_name = import_name.replace(":", ".")
|
||||
try:
|
||||
try:
|
||||
__import__(import_name)
|
||||
except ImportError:
|
||||
if "." not in import_name:
|
||||
raise
|
||||
else:
|
||||
return sys.modules[import_name]
|
||||
|
||||
module_name, obj_name = import_name.rsplit(".", 1)
|
||||
module = __import__(module_name, globals(), locals(), [obj_name])
|
||||
try:
|
||||
return getattr(module, obj_name)
|
||||
except AttributeError as e:
|
||||
raise ImportError(e) from None
|
||||
|
||||
except ImportError as e:
|
||||
if not silent:
|
||||
raise ImportStringError(import_name, e).with_traceback(
|
||||
sys.exc_info()[2]
|
||||
) from None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def find_modules(
|
||||
import_path: str, include_packages: bool = False, recursive: bool = False
|
||||
) -> t.Iterator[str]:
|
||||
"""Finds all the modules below a package. This can be useful to
|
||||
automatically import all views / controllers so that their metaclasses /
|
||||
function decorators have a chance to register themselves on the
|
||||
application.
|
||||
|
||||
Packages are not returned unless `include_packages` is `True`. This can
|
||||
also recursively list modules but in that case it will import all the
|
||||
packages to get the correct load path of that module.
|
||||
|
||||
:param import_path: the dotted name for the package to find child modules.
|
||||
:param include_packages: set to `True` if packages should be returned, too.
|
||||
:param recursive: set to `True` if recursion should happen.
|
||||
:return: generator
|
||||
"""
|
||||
module = import_string(import_path)
|
||||
path = getattr(module, "__path__", None)
|
||||
if path is None:
|
||||
raise ValueError(f"{import_path!r} is not a package")
|
||||
basename = f"{module.__name__}."
|
||||
for _importer, modname, ispkg in pkgutil.iter_modules(path):
|
||||
modname = basename + modname
|
||||
if ispkg:
|
||||
if include_packages:
|
||||
yield modname
|
||||
if recursive:
|
||||
yield from find_modules(modname, include_packages, True)
|
||||
else:
|
||||
yield modname
|
||||
|
||||
|
||||
class ImportStringError(ImportError):
|
||||
"""Provides information about a failed :func:`import_string` attempt."""
|
||||
|
||||
#: String in dotted notation that failed to be imported.
|
||||
import_name: str
|
||||
#: Wrapped exception.
|
||||
exception: BaseException
|
||||
|
||||
def __init__(self, import_name: str, exception: BaseException) -> None:
|
||||
self.import_name = import_name
|
||||
self.exception = exception
|
||||
msg = import_name
|
||||
name = ""
|
||||
tracked = []
|
||||
for part in import_name.replace(":", ".").split("."):
|
||||
name = f"{name}.{part}" if name else part
|
||||
imported = import_string(name, silent=True)
|
||||
if imported:
|
||||
tracked.append((name, getattr(imported, "__file__", None)))
|
||||
else:
|
||||
track = [f"- {n!r} found in {i!r}." for n, i in tracked]
|
||||
track.append(f"- {name!r} not found.")
|
||||
track_str = "\n".join(track)
|
||||
msg = (
|
||||
f"import_string() failed for {import_name!r}. Possible reasons"
|
||||
f" are:\n\n"
|
||||
"- missing __init__.py in a package;\n"
|
||||
"- package or module path not included in sys.path;\n"
|
||||
"- duplicated package or module name taking precedence in"
|
||||
" sys.path;\n"
|
||||
"- missing module, class, function or variable;\n\n"
|
||||
f"Debugged import:\n\n{track_str}\n\n"
|
||||
f"Original exception:\n\n{type(exception).__name__}: {exception}"
|
||||
)
|
||||
break
|
||||
|
||||
super().__init__(msg)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__name__}({self.import_name!r}, {self.exception!r})>"
|
||||
@ -0,0 +1,3 @@
|
||||
from .request import Request as Request
|
||||
from .response import Response as Response
|
||||
from .response import ResponseStream as ResponseStream
|
||||
650
lib/python3.11/site-packages/werkzeug/wrappers/request.py
Normal file
650
lib/python3.11/site-packages/werkzeug/wrappers/request.py
Normal file
@ -0,0 +1,650 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as cabc
|
||||
import functools
|
||||
import json
|
||||
import typing as t
|
||||
from io import BytesIO
|
||||
|
||||
from .._internal import _wsgi_decoding_dance
|
||||
from ..datastructures import CombinedMultiDict
|
||||
from ..datastructures import EnvironHeaders
|
||||
from ..datastructures import FileStorage
|
||||
from ..datastructures import ImmutableMultiDict
|
||||
from ..datastructures import iter_multi_items
|
||||
from ..datastructures import MultiDict
|
||||
from ..exceptions import BadRequest
|
||||
from ..exceptions import UnsupportedMediaType
|
||||
from ..formparser import default_stream_factory
|
||||
from ..formparser import FormDataParser
|
||||
from ..sansio.request import Request as _SansIORequest
|
||||
from ..utils import cached_property
|
||||
from ..utils import environ_property
|
||||
from ..wsgi import _get_server
|
||||
from ..wsgi import get_input_stream
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
class Request(_SansIORequest):
|
||||
"""Represents an incoming WSGI HTTP request, with headers and body
|
||||
taken from the WSGI environment. Has properties and methods for
|
||||
using the functionality defined by various HTTP specs. The data in
|
||||
requests object is read-only.
|
||||
|
||||
Text data is assumed to use UTF-8 encoding, which should be true for
|
||||
the vast majority of modern clients. Using an encoding set by the
|
||||
client is unsafe in Python due to extra encodings it provides, such
|
||||
as ``zip``. To change the assumed encoding, subclass and replace
|
||||
:attr:`charset`.
|
||||
|
||||
:param environ: The WSGI environ is generated by the WSGI server and
|
||||
contains information about the server configuration and client
|
||||
request.
|
||||
:param populate_request: Add this request object to the WSGI environ
|
||||
as ``environ['werkzeug.request']``. Can be useful when
|
||||
debugging.
|
||||
:param shallow: Makes reading from :attr:`stream` (and any method
|
||||
that would read from it) raise a :exc:`RuntimeError`. Useful to
|
||||
prevent consuming the form data in middleware, which would make
|
||||
it unavailable to the final application.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset``, ``url_charset``, and ``encoding_errors`` parameters
|
||||
were removed.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Old ``BaseRequest`` and mixin classes were removed.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Remove the ``disable_data_descriptor`` attribute.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Combine ``BaseRequest`` and mixins into a single ``Request``
|
||||
class.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
Read-only mode is enforced with immutable classes for all data.
|
||||
"""
|
||||
|
||||
#: the maximum content length. This is forwarded to the form data
|
||||
#: parsing function (:func:`parse_form_data`). When set and the
|
||||
#: :attr:`form` or :attr:`files` attribute is accessed and the
|
||||
#: parsing fails because more than the specified value is transmitted
|
||||
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
|
||||
#:
|
||||
#: .. versionadded:: 0.5
|
||||
max_content_length: int | None = None
|
||||
|
||||
#: the maximum form field size. This is forwarded to the form data
|
||||
#: parsing function (:func:`parse_form_data`). When set and the
|
||||
#: :attr:`form` or :attr:`files` attribute is accessed and the
|
||||
#: data in memory for post data is longer than the specified value a
|
||||
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
|
||||
#:
|
||||
#: .. versionchanged:: 3.1
|
||||
#: Defaults to 500kB instead of unlimited.
|
||||
#:
|
||||
#: .. versionadded:: 0.5
|
||||
max_form_memory_size: int | None = 500_000
|
||||
|
||||
#: The maximum number of multipart parts to parse, passed to
|
||||
#: :attr:`form_data_parser_class`. Parsing form data with more than this
|
||||
#: many parts will raise :exc:`~.RequestEntityTooLarge`.
|
||||
#:
|
||||
#: .. versionadded:: 2.2.3
|
||||
max_form_parts = 1000
|
||||
|
||||
#: The form data parser that should be used. Can be replaced to customize
|
||||
#: the form date parsing.
|
||||
form_data_parser_class: type[FormDataParser] = FormDataParser
|
||||
|
||||
#: The WSGI environment containing HTTP headers and information from
|
||||
#: the WSGI server.
|
||||
environ: WSGIEnvironment
|
||||
|
||||
#: Set when creating the request object. If ``True``, reading from
|
||||
#: the request body will cause a ``RuntimeException``. Useful to
|
||||
#: prevent modifying the stream from middleware.
|
||||
shallow: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
environ: WSGIEnvironment,
|
||||
populate_request: bool = True,
|
||||
shallow: bool = False,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
method=environ.get("REQUEST_METHOD", "GET"),
|
||||
scheme=environ.get("wsgi.url_scheme", "http"),
|
||||
server=_get_server(environ),
|
||||
root_path=_wsgi_decoding_dance(environ.get("SCRIPT_NAME") or ""),
|
||||
path=_wsgi_decoding_dance(environ.get("PATH_INFO") or ""),
|
||||
query_string=environ.get("QUERY_STRING", "").encode("latin1"),
|
||||
headers=EnvironHeaders(environ),
|
||||
remote_addr=environ.get("REMOTE_ADDR"),
|
||||
)
|
||||
self.environ = environ
|
||||
self.shallow = shallow
|
||||
|
||||
if populate_request and not shallow:
|
||||
self.environ["werkzeug.request"] = self
|
||||
|
||||
@classmethod
|
||||
def from_values(cls, *args: t.Any, **kwargs: t.Any) -> Request:
|
||||
"""Create a new request object based on the values provided. If
|
||||
environ is given missing values are filled from there. This method is
|
||||
useful for small scripts when you need to simulate a request from an URL.
|
||||
Do not use this method for unittesting, there is a full featured client
|
||||
object (:class:`Client`) that allows to create multipart requests,
|
||||
support for cookies etc.
|
||||
|
||||
This accepts the same options as the
|
||||
:class:`~werkzeug.test.EnvironBuilder`.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
This method now accepts the same arguments as
|
||||
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
|
||||
`environ` parameter is now called `environ_overrides`.
|
||||
|
||||
:return: request object
|
||||
"""
|
||||
from ..test import EnvironBuilder
|
||||
|
||||
builder = EnvironBuilder(*args, **kwargs)
|
||||
try:
|
||||
return builder.get_request(cls)
|
||||
finally:
|
||||
builder.close()
|
||||
|
||||
@classmethod
|
||||
def application(cls, f: t.Callable[[Request], WSGIApplication]) -> WSGIApplication:
|
||||
"""Decorate a function as responder that accepts the request as
|
||||
the last argument. This works like the :func:`responder`
|
||||
decorator but the function is passed the request object as the
|
||||
last argument and the request object will be closed
|
||||
automatically::
|
||||
|
||||
@Request.application
|
||||
def my_wsgi_app(request):
|
||||
return Response('Hello World!')
|
||||
|
||||
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
|
||||
converted to responses instead of failing.
|
||||
|
||||
:param f: the WSGI callable to decorate
|
||||
:return: a new WSGI callable
|
||||
"""
|
||||
#: return a callable that wraps the -2nd argument with the request
|
||||
#: and calls the function with all the arguments up to that one and
|
||||
#: the request. The return value is then called with the latest
|
||||
#: two arguments. This makes it possible to use this decorator for
|
||||
#: both standalone WSGI functions as well as bound methods and
|
||||
#: partially applied functions.
|
||||
from ..exceptions import HTTPException
|
||||
|
||||
@functools.wraps(f)
|
||||
def application(*args: t.Any) -> cabc.Iterable[bytes]:
|
||||
request = cls(args[-2])
|
||||
with request:
|
||||
try:
|
||||
resp = f(*args[:-2] + (request,))
|
||||
except HTTPException as e:
|
||||
resp = t.cast("WSGIApplication", e.get_response(args[-2]))
|
||||
return resp(*args[-2:])
|
||||
|
||||
return t.cast("WSGIApplication", application)
|
||||
|
||||
def _get_file_stream(
|
||||
self,
|
||||
total_content_length: int | None,
|
||||
content_type: str | None,
|
||||
filename: str | None = None,
|
||||
content_length: int | None = None,
|
||||
) -> t.IO[bytes]:
|
||||
"""Called to get a stream for the file upload.
|
||||
|
||||
This must provide a file-like class with `read()`, `readline()`
|
||||
and `seek()` methods that is both writeable and readable.
|
||||
|
||||
The default implementation returns a temporary file if the total
|
||||
content length is higher than 500KB. Because many browsers do not
|
||||
provide a content length for the files only the total content
|
||||
length matters.
|
||||
|
||||
:param total_content_length: the total content length of all the
|
||||
data in the request combined. This value
|
||||
is guaranteed to be there.
|
||||
:param content_type: the mimetype of the uploaded file.
|
||||
:param filename: the filename of the uploaded file. May be `None`.
|
||||
:param content_length: the length of this file. This value is usually
|
||||
not provided because webbrowsers do not provide
|
||||
this value.
|
||||
"""
|
||||
return default_stream_factory(
|
||||
total_content_length=total_content_length,
|
||||
filename=filename,
|
||||
content_type=content_type,
|
||||
content_length=content_length,
|
||||
)
|
||||
|
||||
@property
|
||||
def want_form_data_parsed(self) -> bool:
|
||||
"""``True`` if the request method carries content. By default
|
||||
this is true if a ``Content-Type`` is sent.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
return bool(self.environ.get("CONTENT_TYPE"))
|
||||
|
||||
def make_form_data_parser(self) -> FormDataParser:
|
||||
"""Creates the form data parser. Instantiates the
|
||||
:attr:`form_data_parser_class` with some parameters.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
return self.form_data_parser_class(
|
||||
stream_factory=self._get_file_stream,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
max_content_length=self.max_content_length,
|
||||
max_form_parts=self.max_form_parts,
|
||||
cls=self.parameter_storage_class,
|
||||
)
|
||||
|
||||
def _load_form_data(self) -> None:
|
||||
"""Method used internally to retrieve submitted data. After calling
|
||||
this sets `form` and `files` on the request object to multi dicts
|
||||
filled with the incoming form data. As a matter of fact the input
|
||||
stream will be empty afterwards. You can also call this method to
|
||||
force the parsing of the form data.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
# abort early if we have already consumed the stream
|
||||
if "form" in self.__dict__:
|
||||
return
|
||||
|
||||
if self.want_form_data_parsed:
|
||||
parser = self.make_form_data_parser()
|
||||
data = parser.parse(
|
||||
self._get_stream_for_parsing(),
|
||||
self.mimetype,
|
||||
self.content_length,
|
||||
self.mimetype_params,
|
||||
)
|
||||
else:
|
||||
data = (
|
||||
self.stream,
|
||||
self.parameter_storage_class(),
|
||||
self.parameter_storage_class(),
|
||||
)
|
||||
|
||||
# inject the values into the instance dict so that we bypass
|
||||
# our cached_property non-data descriptor.
|
||||
d = self.__dict__
|
||||
d["stream"], d["form"], d["files"] = data
|
||||
|
||||
def _get_stream_for_parsing(self) -> t.IO[bytes]:
|
||||
"""This is the same as accessing :attr:`stream` with the difference
|
||||
that if it finds cached data from calling :meth:`get_data` first it
|
||||
will create a new stream out of the cached data.
|
||||
|
||||
.. versionadded:: 0.9.3
|
||||
"""
|
||||
cached_data = getattr(self, "_cached_data", None)
|
||||
if cached_data is not None:
|
||||
return BytesIO(cached_data)
|
||||
return self.stream
|
||||
|
||||
def close(self) -> None:
|
||||
"""Closes associated resources of this request object. This
|
||||
closes all file handles explicitly. You can also use the request
|
||||
object in a with statement which will automatically close it.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
files = self.__dict__.get("files")
|
||||
for _key, value in iter_multi_items(files or ()):
|
||||
value.close()
|
||||
|
||||
def __enter__(self) -> Request:
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb) -> None: # type: ignore
|
||||
self.close()
|
||||
|
||||
@cached_property
|
||||
def stream(self) -> t.IO[bytes]:
|
||||
"""The WSGI input stream, with safety checks. This stream can only be consumed
|
||||
once.
|
||||
|
||||
Use :meth:`get_data` to get the full data as bytes or text. The :attr:`data`
|
||||
attribute will contain the full bytes only if they do not represent form data.
|
||||
The :attr:`form` attribute will contain the parsed form data in that case.
|
||||
|
||||
Unlike :attr:`input_stream`, this stream guards against infinite streams or
|
||||
reading past :attr:`content_length` or :attr:`max_content_length`.
|
||||
|
||||
If ``max_content_length`` is set, it can be enforced on streams if
|
||||
``wsgi.input_terminated`` is set. Otherwise, an empty stream is returned.
|
||||
|
||||
If the limit is reached before the underlying stream is exhausted (such as a
|
||||
file that is too large, or an infinite stream), the remaining contents of the
|
||||
stream cannot be read safely. Depending on how the server handles this, clients
|
||||
may show a "connection reset" failure instead of seeing the 413 response.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Check ``max_content_length`` preemptively and while reading.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
The stream is always set (but may be consumed) even if form parsing was
|
||||
accessed first.
|
||||
"""
|
||||
if self.shallow:
|
||||
raise RuntimeError(
|
||||
"This request was created with 'shallow=True', reading"
|
||||
" from the input stream is disabled."
|
||||
)
|
||||
|
||||
return get_input_stream(
|
||||
self.environ, max_content_length=self.max_content_length
|
||||
)
|
||||
|
||||
input_stream = environ_property[t.IO[bytes]](
|
||||
"wsgi.input",
|
||||
doc="""The raw WSGI input stream, without any safety checks.
|
||||
|
||||
This is dangerous to use. It does not guard against infinite streams or reading
|
||||
past :attr:`content_length` or :attr:`max_content_length`.
|
||||
|
||||
Use :attr:`stream` instead.
|
||||
""",
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def data(self) -> bytes:
|
||||
"""The raw data read from :attr:`stream`. Will be empty if the request
|
||||
represents form data.
|
||||
|
||||
To get the raw data even if it represents form data, use :meth:`get_data`.
|
||||
"""
|
||||
return self.get_data(parse_form_data=True)
|
||||
|
||||
@t.overload
|
||||
def get_data(
|
||||
self,
|
||||
cache: bool = True,
|
||||
as_text: t.Literal[False] = False,
|
||||
parse_form_data: bool = False,
|
||||
) -> bytes: ...
|
||||
|
||||
@t.overload
|
||||
def get_data(
|
||||
self,
|
||||
cache: bool = True,
|
||||
as_text: t.Literal[True] = ...,
|
||||
parse_form_data: bool = False,
|
||||
) -> str: ...
|
||||
|
||||
def get_data(
|
||||
self, cache: bool = True, as_text: bool = False, parse_form_data: bool = False
|
||||
) -> bytes | str:
|
||||
"""This reads the buffered incoming data from the client into one
|
||||
bytes object. By default this is cached but that behavior can be
|
||||
changed by setting `cache` to `False`.
|
||||
|
||||
Usually it's a bad idea to call this method without checking the
|
||||
content length first as a client could send dozens of megabytes or more
|
||||
to cause memory problems on the server.
|
||||
|
||||
Note that if the form data was already parsed this method will not
|
||||
return anything as form data parsing does not cache the data like
|
||||
this method does. To implicitly invoke form data parsing function
|
||||
set `parse_form_data` to `True`. When this is done the return value
|
||||
of this method will be an empty string if the form parser handles
|
||||
the data. This generally is not necessary as if the whole data is
|
||||
cached (which is the default) the form parser will used the cached
|
||||
data to parse the form data. Please be generally aware of checking
|
||||
the content length first in any case before calling this method
|
||||
to avoid exhausting server memory.
|
||||
|
||||
If `as_text` is set to `True` the return value will be a decoded
|
||||
string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
rv = getattr(self, "_cached_data", None)
|
||||
if rv is None:
|
||||
if parse_form_data:
|
||||
self._load_form_data()
|
||||
rv = self.stream.read()
|
||||
if cache:
|
||||
self._cached_data = rv
|
||||
if as_text:
|
||||
rv = rv.decode(errors="replace")
|
||||
return rv
|
||||
|
||||
@cached_property
|
||||
def form(self) -> ImmutableMultiDict[str, str]:
|
||||
"""The form parameters. By default an
|
||||
:class:`~werkzeug.datastructures.ImmutableMultiDict`
|
||||
is returned from this function. This can be changed by setting
|
||||
:attr:`parameter_storage_class` to a different type. This might
|
||||
be necessary if the order of the form data is important.
|
||||
|
||||
Please keep in mind that file uploads will not end up here, but instead
|
||||
in the :attr:`files` attribute.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
|
||||
Previous to Werkzeug 0.9 this would only contain form data for POST
|
||||
and PUT requests.
|
||||
"""
|
||||
self._load_form_data()
|
||||
return self.form
|
||||
|
||||
@cached_property
|
||||
def values(self) -> CombinedMultiDict[str, str]:
|
||||
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that
|
||||
combines :attr:`args` and :attr:`form`.
|
||||
|
||||
For GET requests, only ``args`` are present, not ``form``.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
For GET requests, only ``args`` are present, not ``form``.
|
||||
"""
|
||||
sources = [self.args]
|
||||
|
||||
if self.method != "GET":
|
||||
# GET requests can have a body, and some caching proxies
|
||||
# might not treat that differently than a normal GET
|
||||
# request, allowing form data to "invisibly" affect the
|
||||
# cache without indication in the query string / URL.
|
||||
sources.append(self.form)
|
||||
|
||||
args = []
|
||||
|
||||
for d in sources:
|
||||
if not isinstance(d, MultiDict):
|
||||
d = MultiDict(d)
|
||||
|
||||
args.append(d)
|
||||
|
||||
return CombinedMultiDict(args)
|
||||
|
||||
@cached_property
|
||||
def files(self) -> ImmutableMultiDict[str, FileStorage]:
|
||||
""":class:`~werkzeug.datastructures.MultiDict` object containing
|
||||
all uploaded files. Each key in :attr:`files` is the name from the
|
||||
``<input type="file" name="">``. Each value in :attr:`files` is a
|
||||
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
|
||||
|
||||
It basically behaves like a standard file object you know from Python,
|
||||
with the difference that it also has a
|
||||
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
|
||||
store the file on the filesystem.
|
||||
|
||||
Note that :attr:`files` will only contain data if the request method was
|
||||
POST, PUT or PATCH and the ``<form>`` that posted to the request had
|
||||
``enctype="multipart/form-data"``. It will be empty otherwise.
|
||||
|
||||
See the :class:`~werkzeug.datastructures.MultiDict` /
|
||||
:class:`~werkzeug.datastructures.FileStorage` documentation for
|
||||
more details about the used data structure.
|
||||
"""
|
||||
self._load_form_data()
|
||||
return self.files
|
||||
|
||||
@property
|
||||
def script_root(self) -> str:
|
||||
"""Alias for :attr:`self.root_path`. ``environ["SCRIPT_ROOT"]``
|
||||
without a trailing slash.
|
||||
"""
|
||||
return self.root_path
|
||||
|
||||
@cached_property
|
||||
def url_root(self) -> str:
|
||||
"""Alias for :attr:`root_url`. The URL with scheme, host, and
|
||||
root path. For example, ``https://example.com/app/``.
|
||||
"""
|
||||
return self.root_url
|
||||
|
||||
remote_user = environ_property[str](
|
||||
"REMOTE_USER",
|
||||
doc="""If the server supports user authentication, and the
|
||||
script is protected, this attribute contains the username the
|
||||
user has authenticated as.""",
|
||||
)
|
||||
is_multithread = environ_property[bool](
|
||||
"wsgi.multithread",
|
||||
doc="""boolean that is `True` if the application is served by a
|
||||
multithreaded WSGI server.""",
|
||||
)
|
||||
is_multiprocess = environ_property[bool](
|
||||
"wsgi.multiprocess",
|
||||
doc="""boolean that is `True` if the application is served by a
|
||||
WSGI server that spawns multiple processes.""",
|
||||
)
|
||||
is_run_once = environ_property[bool](
|
||||
"wsgi.run_once",
|
||||
doc="""boolean that is `True` if the application will be
|
||||
executed only once in a process lifetime. This is the case for
|
||||
CGI for example, but it's not guaranteed that the execution only
|
||||
happens one time.""",
|
||||
)
|
||||
|
||||
# JSON
|
||||
|
||||
#: A module or other object that has ``dumps`` and ``loads``
|
||||
#: functions that match the API of the built-in :mod:`json` module.
|
||||
json_module = json
|
||||
|
||||
@property
|
||||
def json(self) -> t.Any | None:
|
||||
"""The parsed JSON data if :attr:`mimetype` indicates JSON
|
||||
(:mimetype:`application/json`, see :attr:`is_json`).
|
||||
|
||||
Calls :meth:`get_json` with default arguments.
|
||||
|
||||
If the request content type is not ``application/json``, this
|
||||
will raise a 415 Unsupported Media Type error.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Raise a 415 error instead of 400.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Raise a 400 error if the content type is incorrect.
|
||||
"""
|
||||
return self.get_json()
|
||||
|
||||
# Cached values for ``(silent=False, silent=True)``. Initialized
|
||||
# with sentinel values.
|
||||
_cached_json: tuple[t.Any, t.Any] = (Ellipsis, Ellipsis)
|
||||
|
||||
@t.overload
|
||||
def get_json(
|
||||
self, force: bool = ..., silent: t.Literal[False] = ..., cache: bool = ...
|
||||
) -> t.Any: ...
|
||||
|
||||
@t.overload
|
||||
def get_json(
|
||||
self, force: bool = ..., silent: bool = ..., cache: bool = ...
|
||||
) -> t.Any | None: ...
|
||||
|
||||
def get_json(
|
||||
self, force: bool = False, silent: bool = False, cache: bool = True
|
||||
) -> t.Any | None:
|
||||
"""Parse :attr:`data` as JSON.
|
||||
|
||||
If the mimetype does not indicate JSON
|
||||
(:mimetype:`application/json`, see :attr:`is_json`), or parsing
|
||||
fails, :meth:`on_json_loading_failed` is called and
|
||||
its return value is used as the return value. By default this
|
||||
raises a 415 Unsupported Media Type resp.
|
||||
|
||||
:param force: Ignore the mimetype and always try to parse JSON.
|
||||
:param silent: Silence mimetype and parsing errors, and
|
||||
return ``None`` instead.
|
||||
:param cache: Store the parsed JSON to return for subsequent
|
||||
calls.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Raise a 415 error instead of 400.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Raise a 400 error if the content type is incorrect.
|
||||
"""
|
||||
if cache and self._cached_json[silent] is not Ellipsis:
|
||||
return self._cached_json[silent]
|
||||
|
||||
if not (force or self.is_json):
|
||||
if not silent:
|
||||
return self.on_json_loading_failed(None)
|
||||
else:
|
||||
return None
|
||||
|
||||
data = self.get_data(cache=cache)
|
||||
|
||||
try:
|
||||
rv = self.json_module.loads(data)
|
||||
except ValueError as e:
|
||||
if silent:
|
||||
rv = None
|
||||
|
||||
if cache:
|
||||
normal_rv, _ = self._cached_json
|
||||
self._cached_json = (normal_rv, rv)
|
||||
else:
|
||||
rv = self.on_json_loading_failed(e)
|
||||
|
||||
if cache:
|
||||
_, silent_rv = self._cached_json
|
||||
self._cached_json = (rv, silent_rv)
|
||||
else:
|
||||
if cache:
|
||||
self._cached_json = (rv, rv)
|
||||
|
||||
return rv
|
||||
|
||||
def on_json_loading_failed(self, e: ValueError | None) -> t.Any:
|
||||
"""Called if :meth:`get_json` fails and isn't silenced.
|
||||
|
||||
If this method returns a value, it is used as the return value
|
||||
for :meth:`get_json`. The default implementation raises
|
||||
:exc:`~werkzeug.exceptions.BadRequest`.
|
||||
|
||||
:param e: If parsing failed, this is the exception. It will be
|
||||
``None`` if the content type wasn't ``application/json``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Raise a 415 error instead of 400.
|
||||
"""
|
||||
if e is not None:
|
||||
raise BadRequest(f"Failed to decode JSON object: {e}")
|
||||
|
||||
raise UnsupportedMediaType(
|
||||
"Did not attempt to load JSON data because the request"
|
||||
" Content-Type was not 'application/json'."
|
||||
)
|
||||
831
lib/python3.11/site-packages/werkzeug/wrappers/response.py
Normal file
831
lib/python3.11/site-packages/werkzeug/wrappers/response.py
Normal file
@ -0,0 +1,831 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import typing as t
|
||||
from http import HTTPStatus
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from .._internal import _get_environ
|
||||
from ..datastructures import Headers
|
||||
from ..http import generate_etag
|
||||
from ..http import http_date
|
||||
from ..http import is_resource_modified
|
||||
from ..http import parse_etags
|
||||
from ..http import parse_range_header
|
||||
from ..http import remove_entity_headers
|
||||
from ..sansio.response import Response as _SansIOResponse
|
||||
from ..urls import iri_to_uri
|
||||
from ..utils import cached_property
|
||||
from ..wsgi import _RangeWrapper
|
||||
from ..wsgi import ClosingIterator
|
||||
from ..wsgi import get_current_url
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import StartResponse
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
from .request import Request
|
||||
|
||||
|
||||
def _iter_encoded(iterable: t.Iterable[str | bytes]) -> t.Iterator[bytes]:
|
||||
for item in iterable:
|
||||
if isinstance(item, str):
|
||||
yield item.encode()
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
class Response(_SansIOResponse):
|
||||
"""Represents an outgoing WSGI HTTP response with body, status, and
|
||||
headers. Has properties and methods for using the functionality
|
||||
defined by various HTTP specs.
|
||||
|
||||
The response body is flexible to support different use cases. The
|
||||
simple form is passing bytes, or a string which will be encoded as
|
||||
UTF-8. Passing an iterable of bytes or strings makes this a
|
||||
streaming response. A generator is particularly useful for building
|
||||
a CSV file in memory or using SSE (Server Sent Events). A file-like
|
||||
object is also iterable, although the
|
||||
:func:`~werkzeug.utils.send_file` helper should be used in that
|
||||
case.
|
||||
|
||||
The response object is itself a WSGI application callable. When
|
||||
called (:meth:`__call__`) with ``environ`` and ``start_response``,
|
||||
it will pass its status and headers to ``start_response`` then
|
||||
return its body as an iterable.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
def index():
|
||||
return Response("Hello, World!")
|
||||
|
||||
def application(environ, start_response):
|
||||
path = environ.get("PATH_INFO") or "/"
|
||||
|
||||
if path == "/":
|
||||
response = index()
|
||||
else:
|
||||
response = Response("Not Found", status=404)
|
||||
|
||||
return response(environ, start_response)
|
||||
|
||||
:param response: The data for the body of the response. A string or
|
||||
bytes, or tuple or list of strings or bytes, for a fixed-length
|
||||
response, or any other iterable of strings or bytes for a
|
||||
streaming response. Defaults to an empty body.
|
||||
:param status: The status code for the response. Either an int, in
|
||||
which case the default status message is added, or a string in
|
||||
the form ``{code} {message}``, like ``404 Not Found``. Defaults
|
||||
to 200.
|
||||
:param headers: A :class:`~werkzeug.datastructures.Headers` object,
|
||||
or a list of ``(key, value)`` tuples that will be converted to a
|
||||
``Headers`` object.
|
||||
:param mimetype: The mime type (content type without charset or
|
||||
other parameters) of the response. If the value starts with
|
||||
``text/`` (or matches some other special cases), the charset
|
||||
will be added to create the ``content_type``.
|
||||
:param content_type: The full content type of the response.
|
||||
Overrides building the value from ``mimetype``.
|
||||
:param direct_passthrough: Pass the response body directly through
|
||||
as the WSGI iterable. This can be used when the body is a binary
|
||||
file or other iterator of bytes, to skip some unnecessary
|
||||
checks. Use :func:`~werkzeug.utils.send_file` instead of setting
|
||||
this manually.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Old ``BaseResponse`` and mixin classes were removed.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Combine ``BaseResponse`` and mixins into a single ``Response``
|
||||
class.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
The ``direct_passthrough`` parameter was added.
|
||||
"""
|
||||
|
||||
#: if set to `False` accessing properties on the response object will
|
||||
#: not try to consume the response iterator and convert it into a list.
|
||||
#:
|
||||
#: .. versionadded:: 0.6.2
|
||||
#:
|
||||
#: That attribute was previously called `implicit_seqence_conversion`.
|
||||
#: (Notice the typo). If you did use this feature, you have to adapt
|
||||
#: your code to the name change.
|
||||
implicit_sequence_conversion = True
|
||||
|
||||
#: If a redirect ``Location`` header is a relative URL, make it an
|
||||
#: absolute URL, including scheme and domain.
|
||||
#:
|
||||
#: .. versionchanged:: 2.1
|
||||
#: This is disabled by default, so responses will send relative
|
||||
#: redirects.
|
||||
#:
|
||||
#: .. versionadded:: 0.8
|
||||
autocorrect_location_header = False
|
||||
|
||||
#: Should this response object automatically set the content-length
|
||||
#: header if possible? This is true by default.
|
||||
#:
|
||||
#: .. versionadded:: 0.8
|
||||
automatically_set_content_length = True
|
||||
|
||||
#: The response body to send as the WSGI iterable. A list of strings
|
||||
#: or bytes represents a fixed-length response, any other iterable
|
||||
#: is a streaming response. Strings are encoded to bytes as UTF-8.
|
||||
#:
|
||||
#: Do not set to a plain string or bytes, that will cause sending
|
||||
#: the response to be very inefficient as it will iterate one byte
|
||||
#: at a time.
|
||||
response: t.Iterable[str] | t.Iterable[bytes]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
response: t.Iterable[bytes] | bytes | t.Iterable[str] | str | None = None,
|
||||
status: int | str | HTTPStatus | None = None,
|
||||
headers: t.Mapping[str, str | t.Iterable[str]]
|
||||
| t.Iterable[tuple[str, str]]
|
||||
| None = None,
|
||||
mimetype: str | None = None,
|
||||
content_type: str | None = None,
|
||||
direct_passthrough: bool = False,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
status=status,
|
||||
headers=headers,
|
||||
mimetype=mimetype,
|
||||
content_type=content_type,
|
||||
)
|
||||
|
||||
#: Pass the response body directly through as the WSGI iterable.
|
||||
#: This can be used when the body is a binary file or other
|
||||
#: iterator of bytes, to skip some unnecessary checks. Use
|
||||
#: :func:`~werkzeug.utils.send_file` instead of setting this
|
||||
#: manually.
|
||||
self.direct_passthrough = direct_passthrough
|
||||
self._on_close: list[t.Callable[[], t.Any]] = []
|
||||
|
||||
# we set the response after the headers so that if a class changes
|
||||
# the charset attribute, the data is set in the correct charset.
|
||||
if response is None:
|
||||
self.response = []
|
||||
elif isinstance(response, (str, bytes, bytearray)):
|
||||
self.set_data(response)
|
||||
else:
|
||||
self.response = response
|
||||
|
||||
def call_on_close(self, func: t.Callable[[], t.Any]) -> t.Callable[[], t.Any]:
|
||||
"""Adds a function to the internal list of functions that should
|
||||
be called as part of closing down the response. Since 0.7 this
|
||||
function also returns the function that was passed so that this
|
||||
can be used as a decorator.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
self._on_close.append(func)
|
||||
return func
|
||||
|
||||
def __repr__(self) -> str:
|
||||
if self.is_sequence:
|
||||
body_info = f"{sum(map(len, self.iter_encoded()))} bytes"
|
||||
else:
|
||||
body_info = "streamed" if self.is_streamed else "likely-streamed"
|
||||
return f"<{type(self).__name__} {body_info} [{self.status}]>"
|
||||
|
||||
@classmethod
|
||||
def force_type(
|
||||
cls, response: Response, environ: WSGIEnvironment | None = None
|
||||
) -> Response:
|
||||
"""Enforce that the WSGI response is a response object of the current
|
||||
type. Werkzeug will use the :class:`Response` internally in many
|
||||
situations like the exceptions. If you call :meth:`get_response` on an
|
||||
exception you will get back a regular :class:`Response` object, even
|
||||
if you are using a custom subclass.
|
||||
|
||||
This method can enforce a given response type, and it will also
|
||||
convert arbitrary WSGI callables into response objects if an environ
|
||||
is provided::
|
||||
|
||||
# convert a Werkzeug response object into an instance of the
|
||||
# MyResponseClass subclass.
|
||||
response = MyResponseClass.force_type(response)
|
||||
|
||||
# convert any WSGI application into a response object
|
||||
response = MyResponseClass.force_type(response, environ)
|
||||
|
||||
This is especially useful if you want to post-process responses in
|
||||
the main dispatcher and use functionality provided by your subclass.
|
||||
|
||||
Keep in mind that this will modify response objects in place if
|
||||
possible!
|
||||
|
||||
:param response: a response object or wsgi application.
|
||||
:param environ: a WSGI environment object.
|
||||
:return: a response object.
|
||||
"""
|
||||
if not isinstance(response, Response):
|
||||
if environ is None:
|
||||
raise TypeError(
|
||||
"cannot convert WSGI application into response"
|
||||
" objects without an environ"
|
||||
)
|
||||
|
||||
from ..test import run_wsgi_app
|
||||
|
||||
response = Response(*run_wsgi_app(response, environ))
|
||||
|
||||
response.__class__ = cls
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def from_app(
|
||||
cls, app: WSGIApplication, environ: WSGIEnvironment, buffered: bool = False
|
||||
) -> Response:
|
||||
"""Create a new response object from an application output. This
|
||||
works best if you pass it an application that returns a generator all
|
||||
the time. Sometimes applications may use the `write()` callable
|
||||
returned by the `start_response` function. This tries to resolve such
|
||||
edge cases automatically. But if you don't get the expected output
|
||||
you should set `buffered` to `True` which enforces buffering.
|
||||
|
||||
:param app: the WSGI application to execute.
|
||||
:param environ: the WSGI environment to execute against.
|
||||
:param buffered: set to `True` to enforce buffering.
|
||||
:return: a response object.
|
||||
"""
|
||||
from ..test import run_wsgi_app
|
||||
|
||||
return cls(*run_wsgi_app(app, environ, buffered))
|
||||
|
||||
@t.overload
|
||||
def get_data(self, as_text: t.Literal[False] = False) -> bytes: ...
|
||||
|
||||
@t.overload
|
||||
def get_data(self, as_text: t.Literal[True]) -> str: ...
|
||||
|
||||
def get_data(self, as_text: bool = False) -> bytes | str:
|
||||
"""The string representation of the response body. Whenever you call
|
||||
this property the response iterable is encoded and flattened. This
|
||||
can lead to unwanted behavior if you stream big data.
|
||||
|
||||
This behavior can be disabled by setting
|
||||
:attr:`implicit_sequence_conversion` to `False`.
|
||||
|
||||
If `as_text` is set to `True` the return value will be a decoded
|
||||
string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
self._ensure_sequence()
|
||||
rv = b"".join(self.iter_encoded())
|
||||
|
||||
if as_text:
|
||||
return rv.decode()
|
||||
|
||||
return rv
|
||||
|
||||
def set_data(self, value: bytes | str) -> None:
|
||||
"""Sets a new string as response. The value must be a string or
|
||||
bytes. If a string is set it's encoded to the charset of the
|
||||
response (utf-8 by default).
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
if isinstance(value, str):
|
||||
value = value.encode()
|
||||
self.response = [value]
|
||||
if self.automatically_set_content_length:
|
||||
self.headers["Content-Length"] = str(len(value))
|
||||
|
||||
data = property(
|
||||
get_data,
|
||||
set_data,
|
||||
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
|
||||
)
|
||||
|
||||
def calculate_content_length(self) -> int | None:
|
||||
"""Returns the content length if available or `None` otherwise."""
|
||||
try:
|
||||
self._ensure_sequence()
|
||||
except RuntimeError:
|
||||
return None
|
||||
return sum(len(x) for x in self.iter_encoded())
|
||||
|
||||
def _ensure_sequence(self, mutable: bool = False) -> None:
|
||||
"""This method can be called by methods that need a sequence. If
|
||||
`mutable` is true, it will also ensure that the response sequence
|
||||
is a standard Python list.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
if self.is_sequence:
|
||||
# if we need a mutable object, we ensure it's a list.
|
||||
if mutable and not isinstance(self.response, list):
|
||||
self.response = list(self.response) # type: ignore
|
||||
return
|
||||
if self.direct_passthrough:
|
||||
raise RuntimeError(
|
||||
"Attempted implicit sequence conversion but the"
|
||||
" response object is in direct passthrough mode."
|
||||
)
|
||||
if not self.implicit_sequence_conversion:
|
||||
raise RuntimeError(
|
||||
"The response object required the iterable to be a"
|
||||
" sequence, but the implicit conversion was disabled."
|
||||
" Call make_sequence() yourself."
|
||||
)
|
||||
self.make_sequence()
|
||||
|
||||
def make_sequence(self) -> None:
|
||||
"""Converts the response iterator in a list. By default this happens
|
||||
automatically if required. If `implicit_sequence_conversion` is
|
||||
disabled, this method is not automatically called and some properties
|
||||
might raise exceptions. This also encodes all the items.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
if not self.is_sequence:
|
||||
# if we consume an iterable we have to ensure that the close
|
||||
# method of the iterable is called if available when we tear
|
||||
# down the response
|
||||
close = getattr(self.response, "close", None)
|
||||
self.response = list(self.iter_encoded())
|
||||
if close is not None:
|
||||
self.call_on_close(close)
|
||||
|
||||
def iter_encoded(self) -> t.Iterator[bytes]:
|
||||
"""Iter the response encoded with the encoding of the response.
|
||||
If the response object is invoked as WSGI application the return
|
||||
value of this method is used as application iterator unless
|
||||
:attr:`direct_passthrough` was activated.
|
||||
"""
|
||||
# Encode in a separate function so that self.response is fetched
|
||||
# early. This allows us to wrap the response with the return
|
||||
# value from get_app_iter or iter_encoded.
|
||||
return _iter_encoded(self.response)
|
||||
|
||||
@property
|
||||
def is_streamed(self) -> bool:
|
||||
"""If the response is streamed (the response is not an iterable with
|
||||
a length information) this property is `True`. In this case streamed
|
||||
means that there is no information about the number of iterations.
|
||||
This is usually `True` if a generator is passed to the response object.
|
||||
|
||||
This is useful for checking before applying some sort of post
|
||||
filtering that should not take place for streamed responses.
|
||||
"""
|
||||
try:
|
||||
len(self.response) # type: ignore
|
||||
except (TypeError, AttributeError):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_sequence(self) -> bool:
|
||||
"""If the iterator is buffered, this property will be `True`. A
|
||||
response object will consider an iterator to be buffered if the
|
||||
response attribute is a list or tuple.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
return isinstance(self.response, (tuple, list))
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the wrapped response if possible. You can also use the object
|
||||
in a with statement which will automatically close it.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Can now be used in a with statement.
|
||||
"""
|
||||
if hasattr(self.response, "close"):
|
||||
self.response.close()
|
||||
for func in self._on_close:
|
||||
func()
|
||||
|
||||
def __enter__(self) -> Response:
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb): # type: ignore
|
||||
self.close()
|
||||
|
||||
def freeze(self) -> None:
|
||||
"""Make the response object ready to be pickled. Does the
|
||||
following:
|
||||
|
||||
* Buffer the response into a list, ignoring
|
||||
:attr:`implicity_sequence_conversion` and
|
||||
:attr:`direct_passthrough`.
|
||||
* Set the ``Content-Length`` header.
|
||||
* Generate an ``ETag`` header if one is not already set.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Removed the ``no_etag`` parameter.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
An ``ETag`` header is always added.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
The ``Content-Length`` header is set.
|
||||
"""
|
||||
# Always freeze the encoded response body, ignore
|
||||
# implicit_sequence_conversion and direct_passthrough.
|
||||
self.response = list(self.iter_encoded())
|
||||
self.headers["Content-Length"] = str(sum(map(len, self.response)))
|
||||
self.add_etag()
|
||||
|
||||
def get_wsgi_headers(self, environ: WSGIEnvironment) -> Headers:
|
||||
"""This is automatically called right before the response is started
|
||||
and returns headers modified for the given environment. It returns a
|
||||
copy of the headers from the response with some modifications applied
|
||||
if necessary.
|
||||
|
||||
For example the location header (if present) is joined with the root
|
||||
URL of the environment. Also the content length is automatically set
|
||||
to zero here for certain status codes.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
Previously that function was called `fix_headers` and modified
|
||||
the response object in place. Also since 0.6, IRIs in location
|
||||
and content-location headers are handled properly.
|
||||
|
||||
Also starting with 0.6, Werkzeug will attempt to set the content
|
||||
length if it is able to figure it out on its own. This is the
|
||||
case if all the strings in the response iterable are already
|
||||
encoded and the iterable is buffered.
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: returns a new :class:`~werkzeug.datastructures.Headers`
|
||||
object.
|
||||
"""
|
||||
headers = Headers(self.headers)
|
||||
location: str | None = None
|
||||
content_location: str | None = None
|
||||
content_length: str | int | None = None
|
||||
status = self.status_code
|
||||
|
||||
# iterate over the headers to find all values in one go. Because
|
||||
# get_wsgi_headers is used each response that gives us a tiny
|
||||
# speedup.
|
||||
for key, value in headers:
|
||||
ikey = key.lower()
|
||||
if ikey == "location":
|
||||
location = value
|
||||
elif ikey == "content-location":
|
||||
content_location = value
|
||||
elif ikey == "content-length":
|
||||
content_length = value
|
||||
|
||||
if location is not None:
|
||||
location = iri_to_uri(location)
|
||||
|
||||
if self.autocorrect_location_header:
|
||||
# Make the location header an absolute URL.
|
||||
current_url = get_current_url(environ, strip_querystring=True)
|
||||
current_url = iri_to_uri(current_url)
|
||||
location = urljoin(current_url, location)
|
||||
|
||||
headers["Location"] = location
|
||||
|
||||
# make sure the content location is a URL
|
||||
if content_location is not None:
|
||||
headers["Content-Location"] = iri_to_uri(content_location)
|
||||
|
||||
if 100 <= status < 200 or status == 204:
|
||||
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
|
||||
# Content-Length header field in any response with a status
|
||||
# code of 1xx (Informational) or 204 (No Content)."
|
||||
headers.remove("Content-Length")
|
||||
elif status == 304:
|
||||
remove_entity_headers(headers)
|
||||
|
||||
# if we can determine the content length automatically, we
|
||||
# should try to do that. But only if this does not involve
|
||||
# flattening the iterator or encoding of strings in the
|
||||
# response. We however should not do that if we have a 304
|
||||
# response.
|
||||
if (
|
||||
self.automatically_set_content_length
|
||||
and self.is_sequence
|
||||
and content_length is None
|
||||
and status not in (204, 304)
|
||||
and not (100 <= status < 200)
|
||||
):
|
||||
content_length = sum(len(x) for x in self.iter_encoded())
|
||||
headers["Content-Length"] = str(content_length)
|
||||
|
||||
return headers
|
||||
|
||||
def get_app_iter(self, environ: WSGIEnvironment) -> t.Iterable[bytes]:
|
||||
"""Returns the application iterator for the given environ. Depending
|
||||
on the request method and the current status code the return value
|
||||
might be an empty response rather than the one from the response.
|
||||
|
||||
If the request method is `HEAD` or the status code is in a range
|
||||
where the HTTP specification requires an empty response, an empty
|
||||
iterable is returned.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: a response iterable.
|
||||
"""
|
||||
status = self.status_code
|
||||
if (
|
||||
environ["REQUEST_METHOD"] == "HEAD"
|
||||
or 100 <= status < 200
|
||||
or status in (204, 304)
|
||||
):
|
||||
iterable: t.Iterable[bytes] = ()
|
||||
elif self.direct_passthrough:
|
||||
return self.response # type: ignore
|
||||
else:
|
||||
iterable = self.iter_encoded()
|
||||
return ClosingIterator(iterable, self.close)
|
||||
|
||||
def get_wsgi_response(
|
||||
self, environ: WSGIEnvironment
|
||||
) -> tuple[t.Iterable[bytes], str, list[tuple[str, str]]]:
|
||||
"""Returns the final WSGI response as tuple. The first item in
|
||||
the tuple is the application iterator, the second the status and
|
||||
the third the list of headers. The response returned is created
|
||||
specially for the given environment. For example if the request
|
||||
method in the WSGI environment is ``'HEAD'`` the response will
|
||||
be empty and only the headers and status code will be present.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: an ``(app_iter, status, headers)`` tuple.
|
||||
"""
|
||||
headers = self.get_wsgi_headers(environ)
|
||||
app_iter = self.get_app_iter(environ)
|
||||
return app_iter, self.status, headers.to_wsgi_list()
|
||||
|
||||
def __call__(
|
||||
self, environ: WSGIEnvironment, start_response: StartResponse
|
||||
) -> t.Iterable[bytes]:
|
||||
"""Process this response as WSGI application.
|
||||
|
||||
:param environ: the WSGI environment.
|
||||
:param start_response: the response callable provided by the WSGI
|
||||
server.
|
||||
:return: an application iterator
|
||||
"""
|
||||
app_iter, status, headers = self.get_wsgi_response(environ)
|
||||
start_response(status, headers)
|
||||
return app_iter
|
||||
|
||||
# JSON
|
||||
|
||||
#: A module or other object that has ``dumps`` and ``loads``
|
||||
#: functions that match the API of the built-in :mod:`json` module.
|
||||
json_module = json
|
||||
|
||||
@property
|
||||
def json(self) -> t.Any | None:
|
||||
"""The parsed JSON data if :attr:`mimetype` indicates JSON
|
||||
(:mimetype:`application/json`, see :attr:`is_json`).
|
||||
|
||||
Calls :meth:`get_json` with default arguments.
|
||||
"""
|
||||
return self.get_json()
|
||||
|
||||
@t.overload
|
||||
def get_json(self, force: bool = ..., silent: t.Literal[False] = ...) -> t.Any: ...
|
||||
|
||||
@t.overload
|
||||
def get_json(self, force: bool = ..., silent: bool = ...) -> t.Any | None: ...
|
||||
|
||||
def get_json(self, force: bool = False, silent: bool = False) -> t.Any | None:
|
||||
"""Parse :attr:`data` as JSON. Useful during testing.
|
||||
|
||||
If the mimetype does not indicate JSON
|
||||
(:mimetype:`application/json`, see :attr:`is_json`), this
|
||||
returns ``None``.
|
||||
|
||||
Unlike :meth:`Request.get_json`, the result is not cached.
|
||||
|
||||
:param force: Ignore the mimetype and always try to parse JSON.
|
||||
:param silent: Silence parsing errors and return ``None``
|
||||
instead.
|
||||
"""
|
||||
if not (force or self.is_json):
|
||||
return None
|
||||
|
||||
data = self.get_data()
|
||||
|
||||
try:
|
||||
return self.json_module.loads(data)
|
||||
except ValueError:
|
||||
if not silent:
|
||||
raise
|
||||
|
||||
return None
|
||||
|
||||
# Stream
|
||||
|
||||
@cached_property
|
||||
def stream(self) -> ResponseStream:
|
||||
"""The response iterable as write-only stream."""
|
||||
return ResponseStream(self)
|
||||
|
||||
def _wrap_range_response(self, start: int, length: int) -> None:
|
||||
"""Wrap existing Response in case of Range Request context."""
|
||||
if self.status_code == 206:
|
||||
self.response = _RangeWrapper(self.response, start, length) # type: ignore
|
||||
|
||||
def _is_range_request_processable(self, environ: WSGIEnvironment) -> bool:
|
||||
"""Return ``True`` if `Range` header is present and if underlying
|
||||
resource is considered unchanged when compared with `If-Range` header.
|
||||
"""
|
||||
return (
|
||||
"HTTP_IF_RANGE" not in environ
|
||||
or not is_resource_modified(
|
||||
environ,
|
||||
self.headers.get("etag"),
|
||||
None,
|
||||
self.headers.get("last-modified"),
|
||||
ignore_if_range=False,
|
||||
)
|
||||
) and "HTTP_RANGE" in environ
|
||||
|
||||
def _process_range_request(
|
||||
self,
|
||||
environ: WSGIEnvironment,
|
||||
complete_length: int | None,
|
||||
accept_ranges: bool | str,
|
||||
) -> bool:
|
||||
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
|
||||
header is valid, and Range Request is processable, we set the headers
|
||||
as described by the RFC, and wrap the underlying response in a
|
||||
RangeWrapper.
|
||||
|
||||
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
|
||||
|
||||
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
|
||||
if `Range` header could not be parsed or satisfied.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Returns ``False`` if the length is 0.
|
||||
"""
|
||||
from ..exceptions import RequestedRangeNotSatisfiable
|
||||
|
||||
if (
|
||||
not accept_ranges
|
||||
or complete_length is None
|
||||
or complete_length == 0
|
||||
or not self._is_range_request_processable(environ)
|
||||
):
|
||||
return False
|
||||
|
||||
if accept_ranges is True:
|
||||
accept_ranges = "bytes"
|
||||
|
||||
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
|
||||
|
||||
if parsed_range is None:
|
||||
raise RequestedRangeNotSatisfiable(complete_length)
|
||||
|
||||
range_tuple = parsed_range.range_for_length(complete_length)
|
||||
content_range_header = parsed_range.to_content_range_header(complete_length)
|
||||
|
||||
if range_tuple is None or content_range_header is None:
|
||||
raise RequestedRangeNotSatisfiable(complete_length)
|
||||
|
||||
content_length = range_tuple[1] - range_tuple[0]
|
||||
self.headers["Content-Length"] = str(content_length)
|
||||
self.headers["Accept-Ranges"] = accept_ranges
|
||||
self.content_range = content_range_header # type: ignore
|
||||
self.status_code = 206
|
||||
self._wrap_range_response(range_tuple[0], content_length)
|
||||
return True
|
||||
|
||||
def make_conditional(
|
||||
self,
|
||||
request_or_environ: WSGIEnvironment | Request,
|
||||
accept_ranges: bool | str = False,
|
||||
complete_length: int | None = None,
|
||||
) -> Response:
|
||||
"""Make the response conditional to the request. This method works
|
||||
best if an etag was defined for the response already. The `add_etag`
|
||||
method can be used to do that. If called without etag just the date
|
||||
header is set.
|
||||
|
||||
This does nothing if the request method in the request or environ is
|
||||
anything but GET or HEAD.
|
||||
|
||||
For optimal performance when handling range requests, it's recommended
|
||||
that your response data object implements `seekable`, `seek` and `tell`
|
||||
methods as described by :py:class:`io.IOBase`. Objects returned by
|
||||
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
|
||||
|
||||
It does not remove the body of the response because that's something
|
||||
the :meth:`__call__` function does for us automatically.
|
||||
|
||||
Returns self so that you can do ``return resp.make_conditional(req)``
|
||||
but modifies the object in-place.
|
||||
|
||||
:param request_or_environ: a request object or WSGI environment to be
|
||||
used to make the response conditional
|
||||
against.
|
||||
:param accept_ranges: This parameter dictates the value of
|
||||
`Accept-Ranges` header. If ``False`` (default),
|
||||
the header is not set. If ``True``, it will be set
|
||||
to ``"bytes"``. If it's a string, it will use this
|
||||
value.
|
||||
:param complete_length: Will be used only in valid Range Requests.
|
||||
It will set `Content-Range` complete length
|
||||
value and compute `Content-Length` real value.
|
||||
This parameter is mandatory for successful
|
||||
Range Requests completion.
|
||||
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
|
||||
if `Range` header could not be parsed or satisfied.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Range processing is skipped if length is 0 instead of
|
||||
raising a 416 Range Not Satisfiable error.
|
||||
"""
|
||||
environ = _get_environ(request_or_environ)
|
||||
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
|
||||
# if the date is not in the headers, add it now. We however
|
||||
# will not override an already existing header. Unfortunately
|
||||
# this header will be overridden by many WSGI servers including
|
||||
# wsgiref.
|
||||
if "date" not in self.headers:
|
||||
self.headers["Date"] = http_date()
|
||||
is206 = self._process_range_request(environ, complete_length, accept_ranges)
|
||||
if not is206 and not is_resource_modified(
|
||||
environ,
|
||||
self.headers.get("etag"),
|
||||
None,
|
||||
self.headers.get("last-modified"),
|
||||
):
|
||||
if parse_etags(environ.get("HTTP_IF_MATCH")):
|
||||
self.status_code = 412
|
||||
else:
|
||||
self.status_code = 304
|
||||
if (
|
||||
self.automatically_set_content_length
|
||||
and "content-length" not in self.headers
|
||||
):
|
||||
length = self.calculate_content_length()
|
||||
if length is not None:
|
||||
self.headers["Content-Length"] = str(length)
|
||||
return self
|
||||
|
||||
def add_etag(self, overwrite: bool = False, weak: bool = False) -> None:
|
||||
"""Add an etag for the current response if there is none yet.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
SHA-1 is used to generate the value. MD5 may not be
|
||||
available in some environments.
|
||||
"""
|
||||
if overwrite or "etag" not in self.headers:
|
||||
self.set_etag(generate_etag(self.get_data()), weak)
|
||||
|
||||
|
||||
class ResponseStream:
|
||||
"""A file descriptor like object used by :meth:`Response.stream` to
|
||||
represent the body of the stream. It directly pushes into the
|
||||
response iterable of the response object.
|
||||
"""
|
||||
|
||||
mode = "wb+"
|
||||
|
||||
def __init__(self, response: Response):
|
||||
self.response = response
|
||||
self.closed = False
|
||||
|
||||
def write(self, value: bytes) -> int:
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
self.response._ensure_sequence(mutable=True)
|
||||
self.response.response.append(value) # type: ignore
|
||||
self.response.headers.pop("Content-Length", None)
|
||||
return len(value)
|
||||
|
||||
def writelines(self, seq: t.Iterable[bytes]) -> None:
|
||||
for item in seq:
|
||||
self.write(item)
|
||||
|
||||
def close(self) -> None:
|
||||
self.closed = True
|
||||
|
||||
def flush(self) -> None:
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
def isatty(self) -> bool:
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return False
|
||||
|
||||
def tell(self) -> int:
|
||||
self.response._ensure_sequence()
|
||||
return sum(map(len, self.response.response))
|
||||
|
||||
@property
|
||||
def encoding(self) -> str:
|
||||
return "utf-8"
|
||||
595
lib/python3.11/site-packages/werkzeug/wsgi.py
Normal file
595
lib/python3.11/site-packages/werkzeug/wsgi.py
Normal file
@ -0,0 +1,595 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import typing as t
|
||||
from functools import partial
|
||||
from functools import update_wrapper
|
||||
|
||||
from .exceptions import ClientDisconnected
|
||||
from .exceptions import RequestEntityTooLarge
|
||||
from .sansio import utils as _sansio_utils
|
||||
from .sansio.utils import host_is_trusted # noqa: F401 # Imported as part of API
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _typeshed.wsgi import WSGIApplication
|
||||
from _typeshed.wsgi import WSGIEnvironment
|
||||
|
||||
|
||||
def responder(f: t.Callable[..., WSGIApplication]) -> WSGIApplication:
|
||||
"""Marks a function as responder. Decorate a function with it and it
|
||||
will automatically call the return value as WSGI application.
|
||||
|
||||
Example::
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
return Response('Hello World!')
|
||||
"""
|
||||
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
|
||||
|
||||
|
||||
def get_current_url(
|
||||
environ: WSGIEnvironment,
|
||||
root_only: bool = False,
|
||||
strip_querystring: bool = False,
|
||||
host_only: bool = False,
|
||||
trusted_hosts: t.Iterable[str] | None = None,
|
||||
) -> str:
|
||||
"""Recreate the URL for a request from the parts in a WSGI
|
||||
environment.
|
||||
|
||||
The URL is an IRI, not a URI, so it may contain Unicode characters.
|
||||
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
|
||||
|
||||
:param environ: The WSGI environment to get the URL parts from.
|
||||
:param root_only: Only build the root path, don't include the
|
||||
remaining path or query string.
|
||||
:param strip_querystring: Don't include the query string.
|
||||
:param host_only: Only build the scheme and host.
|
||||
:param trusted_hosts: A list of trusted host names to validate the
|
||||
host against.
|
||||
"""
|
||||
parts = {
|
||||
"scheme": environ["wsgi.url_scheme"],
|
||||
"host": get_host(environ, trusted_hosts),
|
||||
}
|
||||
|
||||
if not host_only:
|
||||
parts["root_path"] = environ.get("SCRIPT_NAME", "")
|
||||
|
||||
if not root_only:
|
||||
parts["path"] = environ.get("PATH_INFO", "")
|
||||
|
||||
if not strip_querystring:
|
||||
parts["query_string"] = environ.get("QUERY_STRING", "").encode("latin1")
|
||||
|
||||
return _sansio_utils.get_current_url(**parts)
|
||||
|
||||
|
||||
def _get_server(
|
||||
environ: WSGIEnvironment,
|
||||
) -> tuple[str, int | None] | None:
|
||||
name = environ.get("SERVER_NAME")
|
||||
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
port: int | None = int(environ.get("SERVER_PORT", None))
|
||||
except (TypeError, ValueError):
|
||||
# unix socket
|
||||
port = None
|
||||
|
||||
return name, port
|
||||
|
||||
|
||||
def get_host(
|
||||
environ: WSGIEnvironment, trusted_hosts: t.Iterable[str] | None = None
|
||||
) -> str:
|
||||
"""Return the host for the given WSGI environment.
|
||||
|
||||
The ``Host`` header is preferred, then ``SERVER_NAME`` if it's not
|
||||
set. The returned host will only contain the port if it is different
|
||||
than the standard port for the protocol.
|
||||
|
||||
Optionally, verify that the host is trusted using
|
||||
:func:`host_is_trusted` and raise a
|
||||
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
|
||||
|
||||
:param environ: A WSGI environment dict.
|
||||
:param trusted_hosts: A list of trusted host names.
|
||||
|
||||
:return: Host, with port if necessary.
|
||||
:raise ~werkzeug.exceptions.SecurityError: If the host is not
|
||||
trusted.
|
||||
"""
|
||||
return _sansio_utils.get_host(
|
||||
environ["wsgi.url_scheme"],
|
||||
environ.get("HTTP_HOST"),
|
||||
_get_server(environ),
|
||||
trusted_hosts,
|
||||
)
|
||||
|
||||
|
||||
def get_content_length(environ: WSGIEnvironment) -> int | None:
|
||||
"""Return the ``Content-Length`` header value as an int. If the header is not given
|
||||
or the ``Transfer-Encoding`` header is ``chunked``, ``None`` is returned to indicate
|
||||
a streaming request. If the value is not an integer, or negative, 0 is returned.
|
||||
|
||||
:param environ: The WSGI environ to get the content length from.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
return _sansio_utils.get_content_length(
|
||||
http_content_length=environ.get("CONTENT_LENGTH"),
|
||||
http_transfer_encoding=environ.get("HTTP_TRANSFER_ENCODING"),
|
||||
)
|
||||
|
||||
|
||||
def get_input_stream(
|
||||
environ: WSGIEnvironment,
|
||||
safe_fallback: bool = True,
|
||||
max_content_length: int | None = None,
|
||||
) -> t.IO[bytes]:
|
||||
"""Return the WSGI input stream, wrapped so that it may be read safely without going
|
||||
past the ``Content-Length`` header value or ``max_content_length``.
|
||||
|
||||
If ``Content-Length`` exceeds ``max_content_length``, a
|
||||
:exc:`RequestEntityTooLarge`` ``413 Content Too Large`` error is raised.
|
||||
|
||||
If the WSGI server sets ``environ["wsgi.input_terminated"]``, it indicates that the
|
||||
server handles terminating the stream, so it is safe to read directly. For example,
|
||||
a server that knows how to handle chunked requests safely would set this.
|
||||
|
||||
If ``max_content_length`` is set, it can be enforced on streams if
|
||||
``wsgi.input_terminated`` is set. Otherwise, an empty stream is returned unless the
|
||||
user explicitly disables this safe fallback.
|
||||
|
||||
If the limit is reached before the underlying stream is exhausted (such as a file
|
||||
that is too large, or an infinite stream), the remaining contents of the stream
|
||||
cannot be read safely. Depending on how the server handles this, clients may show a
|
||||
"connection reset" failure instead of seeing the 413 response.
|
||||
|
||||
:param environ: The WSGI environ containing the stream.
|
||||
:param safe_fallback: Return an empty stream when ``Content-Length`` is not set.
|
||||
Disabling this allows infinite streams, which can be a denial-of-service risk.
|
||||
:param max_content_length: The maximum length that content-length or streaming
|
||||
requests may not exceed.
|
||||
|
||||
.. versionchanged:: 2.3.2
|
||||
``max_content_length`` is only applied to streaming requests if the server sets
|
||||
``wsgi.input_terminated``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Check ``max_content_length`` and raise an error if it is exceeded.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
stream = t.cast(t.IO[bytes], environ["wsgi.input"])
|
||||
content_length = get_content_length(environ)
|
||||
|
||||
if content_length is not None and max_content_length is not None:
|
||||
if content_length > max_content_length:
|
||||
raise RequestEntityTooLarge()
|
||||
|
||||
# A WSGI server can set this to indicate that it terminates the input stream. In
|
||||
# that case the stream is safe without wrapping, or can enforce a max length.
|
||||
if "wsgi.input_terminated" in environ:
|
||||
if max_content_length is not None:
|
||||
# If this is moved above, it can cause the stream to hang if a read attempt
|
||||
# is made when the client sends no data. For example, the development server
|
||||
# does not handle buffering except for chunked encoding.
|
||||
return t.cast(
|
||||
t.IO[bytes], LimitedStream(stream, max_content_length, is_max=True)
|
||||
)
|
||||
|
||||
return stream
|
||||
|
||||
# No limit given, return an empty stream unless the user explicitly allows the
|
||||
# potentially infinite stream. An infinite stream is dangerous if it's not expected,
|
||||
# as it can tie up a worker indefinitely.
|
||||
if content_length is None:
|
||||
return io.BytesIO() if safe_fallback else stream
|
||||
|
||||
return t.cast(t.IO[bytes], LimitedStream(stream, content_length))
|
||||
|
||||
|
||||
def get_path_info(environ: WSGIEnvironment) -> str:
|
||||
"""Return ``PATH_INFO`` from the WSGI environment.
|
||||
|
||||
:param environ: WSGI environment to get the path from.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``charset`` and ``errors`` parameters were removed.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
path: bytes = environ.get("PATH_INFO", "").encode("latin1")
|
||||
return path.decode(errors="replace")
|
||||
|
||||
|
||||
class ClosingIterator:
|
||||
"""The WSGI specification requires that all middlewares and gateways
|
||||
respect the `close` callback of the iterable returned by the application.
|
||||
Because it is useful to add another close action to a returned iterable
|
||||
and adding a custom iterable is a boring task this class can be used for
|
||||
that::
|
||||
|
||||
return ClosingIterator(app(environ, start_response), [cleanup_session,
|
||||
cleanup_locals])
|
||||
|
||||
If there is just one close function it can be passed instead of the list.
|
||||
|
||||
A closing iterator is not needed if the application uses response objects
|
||||
and finishes the processing if the response is started::
|
||||
|
||||
try:
|
||||
return response(environ, start_response)
|
||||
finally:
|
||||
cleanup_session()
|
||||
cleanup_locals()
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
iterable: t.Iterable[bytes],
|
||||
callbacks: None
|
||||
| (t.Callable[[], None] | t.Iterable[t.Callable[[], None]]) = None,
|
||||
) -> None:
|
||||
iterator = iter(iterable)
|
||||
self._next = t.cast(t.Callable[[], bytes], partial(next, iterator))
|
||||
if callbacks is None:
|
||||
callbacks = []
|
||||
elif callable(callbacks):
|
||||
callbacks = [callbacks]
|
||||
else:
|
||||
callbacks = list(callbacks)
|
||||
iterable_close = getattr(iterable, "close", None)
|
||||
if iterable_close:
|
||||
callbacks.insert(0, iterable_close)
|
||||
self._callbacks = callbacks
|
||||
|
||||
def __iter__(self) -> ClosingIterator:
|
||||
return self
|
||||
|
||||
def __next__(self) -> bytes:
|
||||
return self._next()
|
||||
|
||||
def close(self) -> None:
|
||||
for callback in self._callbacks:
|
||||
callback()
|
||||
|
||||
|
||||
def wrap_file(
|
||||
environ: WSGIEnvironment, file: t.IO[bytes], buffer_size: int = 8192
|
||||
) -> t.Iterable[bytes]:
|
||||
"""Wraps a file. This uses the WSGI server's file wrapper if available
|
||||
or otherwise the generic :class:`FileWrapper`.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
If the file wrapper from the WSGI server is used it's important to not
|
||||
iterate over it from inside the application but to pass it through
|
||||
unchanged. If you want to pass out a file wrapper inside a response
|
||||
object you have to set :attr:`Response.direct_passthrough` to `True`.
|
||||
|
||||
More information about file wrappers are available in :pep:`333`.
|
||||
|
||||
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
|
||||
:param buffer_size: number of bytes for one iteration.
|
||||
"""
|
||||
return environ.get("wsgi.file_wrapper", FileWrapper)( # type: ignore
|
||||
file, buffer_size
|
||||
)
|
||||
|
||||
|
||||
class FileWrapper:
|
||||
"""This class can be used to convert a :class:`file`-like object into
|
||||
an iterable. It yields `buffer_size` blocks until the file is fully
|
||||
read.
|
||||
|
||||
You should not use this class directly but rather use the
|
||||
:func:`wrap_file` function that uses the WSGI server's file wrapper
|
||||
support if it's available.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
If you're using this object together with a :class:`Response` you have
|
||||
to use the `direct_passthrough` mode.
|
||||
|
||||
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
|
||||
:param buffer_size: number of bytes for one iteration.
|
||||
"""
|
||||
|
||||
def __init__(self, file: t.IO[bytes], buffer_size: int = 8192) -> None:
|
||||
self.file = file
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def close(self) -> None:
|
||||
if hasattr(self.file, "close"):
|
||||
self.file.close()
|
||||
|
||||
def seekable(self) -> bool:
|
||||
if hasattr(self.file, "seekable"):
|
||||
return self.file.seekable()
|
||||
if hasattr(self.file, "seek"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def seek(self, *args: t.Any) -> None:
|
||||
if hasattr(self.file, "seek"):
|
||||
self.file.seek(*args)
|
||||
|
||||
def tell(self) -> int | None:
|
||||
if hasattr(self.file, "tell"):
|
||||
return self.file.tell()
|
||||
return None
|
||||
|
||||
def __iter__(self) -> FileWrapper:
|
||||
return self
|
||||
|
||||
def __next__(self) -> bytes:
|
||||
data = self.file.read(self.buffer_size)
|
||||
if data:
|
||||
return data
|
||||
raise StopIteration()
|
||||
|
||||
|
||||
class _RangeWrapper:
|
||||
# private for now, but should we make it public in the future ?
|
||||
|
||||
"""This class can be used to convert an iterable object into
|
||||
an iterable that will only yield a piece of the underlying content.
|
||||
It yields blocks until the underlying stream range is fully read.
|
||||
The yielded blocks will have a size that can't exceed the original
|
||||
iterator defined block size, but that can be smaller.
|
||||
|
||||
If you're using this object together with a :class:`Response` you have
|
||||
to use the `direct_passthrough` mode.
|
||||
|
||||
:param iterable: an iterable object with a :meth:`__next__` method.
|
||||
:param start_byte: byte from which read will start.
|
||||
:param byte_range: how many bytes to read.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
iterable: t.Iterable[bytes] | t.IO[bytes],
|
||||
start_byte: int = 0,
|
||||
byte_range: int | None = None,
|
||||
):
|
||||
self.iterable = iter(iterable)
|
||||
self.byte_range = byte_range
|
||||
self.start_byte = start_byte
|
||||
self.end_byte = None
|
||||
|
||||
if byte_range is not None:
|
||||
self.end_byte = start_byte + byte_range
|
||||
|
||||
self.read_length = 0
|
||||
self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
|
||||
self.end_reached = False
|
||||
|
||||
def __iter__(self) -> _RangeWrapper:
|
||||
return self
|
||||
|
||||
def _next_chunk(self) -> bytes:
|
||||
try:
|
||||
chunk = next(self.iterable)
|
||||
self.read_length += len(chunk)
|
||||
return chunk
|
||||
except StopIteration:
|
||||
self.end_reached = True
|
||||
raise
|
||||
|
||||
def _first_iteration(self) -> tuple[bytes | None, int]:
|
||||
chunk = None
|
||||
if self.seekable:
|
||||
self.iterable.seek(self.start_byte) # type: ignore
|
||||
self.read_length = self.iterable.tell() # type: ignore
|
||||
contextual_read_length = self.read_length
|
||||
else:
|
||||
while self.read_length <= self.start_byte:
|
||||
chunk = self._next_chunk()
|
||||
if chunk is not None:
|
||||
chunk = chunk[self.start_byte - self.read_length :]
|
||||
contextual_read_length = self.start_byte
|
||||
return chunk, contextual_read_length
|
||||
|
||||
def _next(self) -> bytes:
|
||||
if self.end_reached:
|
||||
raise StopIteration()
|
||||
chunk = None
|
||||
contextual_read_length = self.read_length
|
||||
if self.read_length == 0:
|
||||
chunk, contextual_read_length = self._first_iteration()
|
||||
if chunk is None:
|
||||
chunk = self._next_chunk()
|
||||
if self.end_byte is not None and self.read_length >= self.end_byte:
|
||||
self.end_reached = True
|
||||
return chunk[: self.end_byte - contextual_read_length]
|
||||
return chunk
|
||||
|
||||
def __next__(self) -> bytes:
|
||||
chunk = self._next()
|
||||
if chunk:
|
||||
return chunk
|
||||
self.end_reached = True
|
||||
raise StopIteration()
|
||||
|
||||
def close(self) -> None:
|
||||
if hasattr(self.iterable, "close"):
|
||||
self.iterable.close()
|
||||
|
||||
|
||||
class LimitedStream(io.RawIOBase):
|
||||
"""Wrap a stream so that it doesn't read more than a given limit. This is used to
|
||||
limit ``wsgi.input`` to the ``Content-Length`` header value or
|
||||
:attr:`.Request.max_content_length`.
|
||||
|
||||
When attempting to read after the limit has been reached, :meth:`on_exhausted` is
|
||||
called. When the limit is a maximum, this raises :exc:`.RequestEntityTooLarge`.
|
||||
|
||||
If reading from the stream returns zero bytes or raises an error,
|
||||
:meth:`on_disconnect` is called, which raises :exc:`.ClientDisconnected`. When the
|
||||
limit is a maximum and zero bytes were read, no error is raised, since it may be the
|
||||
end of the stream.
|
||||
|
||||
If the limit is reached before the underlying stream is exhausted (such as a file
|
||||
that is too large, or an infinite stream), the remaining contents of the stream
|
||||
cannot be read safely. Depending on how the server handles this, clients may show a
|
||||
"connection reset" failure instead of seeing the 413 response.
|
||||
|
||||
:param stream: The stream to read from. Must be a readable binary IO object.
|
||||
:param limit: The limit in bytes to not read past. Should be either the
|
||||
``Content-Length`` header value or ``request.max_content_length``.
|
||||
:param is_max: Whether the given ``limit`` is ``request.max_content_length`` instead
|
||||
of the ``Content-Length`` header value. This changes how exhausted and
|
||||
disconnect events are handled.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Handle ``max_content_length`` differently than ``Content-Length``.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Implements ``io.RawIOBase`` rather than ``io.IOBase``.
|
||||
"""
|
||||
|
||||
def __init__(self, stream: t.IO[bytes], limit: int, is_max: bool = False) -> None:
|
||||
self._stream = stream
|
||||
self._pos = 0
|
||||
self.limit = limit
|
||||
self._limit_is_max = is_max
|
||||
|
||||
@property
|
||||
def is_exhausted(self) -> bool:
|
||||
"""Whether the current stream position has reached the limit."""
|
||||
return self._pos >= self.limit
|
||||
|
||||
def on_exhausted(self) -> None:
|
||||
"""Called when attempting to read after the limit has been reached.
|
||||
|
||||
The default behavior is to do nothing, unless the limit is a maximum, in which
|
||||
case it raises :exc:`.RequestEntityTooLarge`.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Raises ``RequestEntityTooLarge`` if the limit is a maximum.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Any return value is ignored.
|
||||
"""
|
||||
if self._limit_is_max:
|
||||
raise RequestEntityTooLarge()
|
||||
|
||||
def on_disconnect(self, error: Exception | None = None) -> None:
|
||||
"""Called when an attempted read receives zero bytes before the limit was
|
||||
reached. This indicates that the client disconnected before sending the full
|
||||
request body.
|
||||
|
||||
The default behavior is to raise :exc:`.ClientDisconnected`, unless the limit is
|
||||
a maximum and no error was raised.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Added the ``error`` parameter. Do nothing if the limit is a maximum and no
|
||||
error was raised.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Any return value is ignored.
|
||||
"""
|
||||
if not self._limit_is_max or error is not None:
|
||||
raise ClientDisconnected()
|
||||
|
||||
# If the limit is a maximum, then we may have read zero bytes because the
|
||||
# streaming body is complete. There's no way to distinguish that from the
|
||||
# client disconnecting early.
|
||||
|
||||
def exhaust(self) -> bytes:
|
||||
"""Exhaust the stream by reading until the limit is reached or the client
|
||||
disconnects, returning the remaining data.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Return the remaining data.
|
||||
|
||||
.. versionchanged:: 2.2.3
|
||||
Handle case where wrapped stream returns fewer bytes than requested.
|
||||
"""
|
||||
if not self.is_exhausted:
|
||||
return self.readall()
|
||||
|
||||
return b""
|
||||
|
||||
def readinto(self, b: bytearray) -> int | None: # type: ignore[override]
|
||||
size = len(b)
|
||||
remaining = self.limit - self._pos
|
||||
|
||||
if remaining <= 0:
|
||||
self.on_exhausted()
|
||||
return 0
|
||||
|
||||
if hasattr(self._stream, "readinto"):
|
||||
# Use stream.readinto if it's available.
|
||||
if size <= remaining:
|
||||
# The size fits in the remaining limit, use the buffer directly.
|
||||
try:
|
||||
out_size: int | None = self._stream.readinto(b)
|
||||
except (OSError, ValueError) as e:
|
||||
self.on_disconnect(error=e)
|
||||
return 0
|
||||
else:
|
||||
# Use a temp buffer with the remaining limit as the size.
|
||||
temp_b = bytearray(remaining)
|
||||
|
||||
try:
|
||||
out_size = self._stream.readinto(temp_b)
|
||||
except (OSError, ValueError) as e:
|
||||
self.on_disconnect(error=e)
|
||||
return 0
|
||||
|
||||
if out_size:
|
||||
b[:out_size] = temp_b
|
||||
else:
|
||||
# WSGI requires that stream.read is available.
|
||||
try:
|
||||
data = self._stream.read(min(size, remaining))
|
||||
except (OSError, ValueError) as e:
|
||||
self.on_disconnect(error=e)
|
||||
return 0
|
||||
|
||||
out_size = len(data)
|
||||
b[:out_size] = data
|
||||
|
||||
if not out_size:
|
||||
# Read zero bytes from the stream.
|
||||
self.on_disconnect()
|
||||
return 0
|
||||
|
||||
self._pos += out_size
|
||||
return out_size
|
||||
|
||||
def readall(self) -> bytes:
|
||||
if self.is_exhausted:
|
||||
self.on_exhausted()
|
||||
return b""
|
||||
|
||||
out = bytearray()
|
||||
|
||||
# The parent implementation uses "while True", which results in an extra read.
|
||||
while not self.is_exhausted:
|
||||
data = self.read(1024 * 64)
|
||||
|
||||
# Stream may return empty before a max limit is reached.
|
||||
if not data:
|
||||
break
|
||||
|
||||
out.extend(data)
|
||||
|
||||
return bytes(out)
|
||||
|
||||
def tell(self) -> int:
|
||||
"""Return the current stream position.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
return self._pos
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
Reference in New Issue
Block a user