tests versuch 2
This commit is contained in:
parent
fdf385fe06
commit
c88f7df83a
2363 changed files with 408191 additions and 0 deletions
|
@ -0,0 +1,2 @@
|
|||
"""A package that contains models that represent entities.
|
||||
"""
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,34 @@
|
|||
from pip._vendor.packaging.version import parse as parse_version
|
||||
|
||||
from pip._internal.models.link import Link
|
||||
from pip._internal.utils.models import KeyBasedCompareMixin
|
||||
|
||||
|
||||
class InstallationCandidate(KeyBasedCompareMixin):
|
||||
"""Represents a potential "candidate" for installation."""
|
||||
|
||||
__slots__ = ["name", "version", "link"]
|
||||
|
||||
def __init__(self, name: str, version: str, link: Link) -> None:
|
||||
self.name = name
|
||||
self.version = parse_version(version)
|
||||
self.link = link
|
||||
|
||||
super().__init__(
|
||||
key=(self.name, self.version, self.link),
|
||||
defining_class=InstallationCandidate,
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
|
||||
self.name,
|
||||
self.version,
|
||||
self.link,
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "{!r} candidate (version {} at {})".format(
|
||||
self.name,
|
||||
self.version,
|
||||
self.link,
|
||||
)
|
|
@ -0,0 +1,237 @@
|
|||
""" PEP 610 """
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union
|
||||
|
||||
__all__ = [
|
||||
"DirectUrl",
|
||||
"DirectUrlValidationError",
|
||||
"DirInfo",
|
||||
"ArchiveInfo",
|
||||
"VcsInfo",
|
||||
]
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
DIRECT_URL_METADATA_NAME = "direct_url.json"
|
||||
ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
|
||||
|
||||
|
||||
class DirectUrlValidationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _get(
|
||||
d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
|
||||
) -> Optional[T]:
|
||||
"""Get value from dictionary and verify expected type."""
|
||||
if key not in d:
|
||||
return default
|
||||
value = d[key]
|
||||
if not isinstance(value, expected_type):
|
||||
raise DirectUrlValidationError(
|
||||
"{!r} has unexpected type for {} (expected {})".format(
|
||||
value, key, expected_type
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
def _get_required(
|
||||
d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
|
||||
) -> T:
|
||||
value = _get(d, expected_type, key, default)
|
||||
if value is None:
|
||||
raise DirectUrlValidationError(f"{key} must have a value")
|
||||
return value
|
||||
|
||||
|
||||
def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
|
||||
infos = [info for info in infos if info is not None]
|
||||
if not infos:
|
||||
raise DirectUrlValidationError(
|
||||
"missing one of archive_info, dir_info, vcs_info"
|
||||
)
|
||||
if len(infos) > 1:
|
||||
raise DirectUrlValidationError(
|
||||
"more than one of archive_info, dir_info, vcs_info"
|
||||
)
|
||||
assert infos[0] is not None
|
||||
return infos[0]
|
||||
|
||||
|
||||
def _filter_none(**kwargs: Any) -> Dict[str, Any]:
|
||||
"""Make dict excluding None values."""
|
||||
return {k: v for k, v in kwargs.items() if v is not None}
|
||||
|
||||
|
||||
class VcsInfo:
|
||||
name = "vcs_info"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vcs: str,
|
||||
commit_id: str,
|
||||
requested_revision: Optional[str] = None,
|
||||
) -> None:
|
||||
self.vcs = vcs
|
||||
self.requested_revision = requested_revision
|
||||
self.commit_id = commit_id
|
||||
|
||||
@classmethod
|
||||
def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
|
||||
if d is None:
|
||||
return None
|
||||
return cls(
|
||||
vcs=_get_required(d, str, "vcs"),
|
||||
commit_id=_get_required(d, str, "commit_id"),
|
||||
requested_revision=_get(d, str, "requested_revision"),
|
||||
)
|
||||
|
||||
def _to_dict(self) -> Dict[str, Any]:
|
||||
return _filter_none(
|
||||
vcs=self.vcs,
|
||||
requested_revision=self.requested_revision,
|
||||
commit_id=self.commit_id,
|
||||
)
|
||||
|
||||
|
||||
class ArchiveInfo:
|
||||
name = "archive_info"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hash: Optional[str] = None,
|
||||
hashes: Optional[Dict[str, str]] = None,
|
||||
) -> None:
|
||||
# set hashes before hash, since the hash setter will further populate hashes
|
||||
self.hashes = hashes
|
||||
self.hash = hash
|
||||
|
||||
@property
|
||||
def hash(self) -> Optional[str]:
|
||||
return self._hash
|
||||
|
||||
@hash.setter
|
||||
def hash(self, value: Optional[str]) -> None:
|
||||
if value is not None:
|
||||
# Auto-populate the hashes key to upgrade to the new format automatically.
|
||||
# We don't back-populate the legacy hash key from hashes.
|
||||
try:
|
||||
hash_name, hash_value = value.split("=", 1)
|
||||
except ValueError:
|
||||
raise DirectUrlValidationError(
|
||||
f"invalid archive_info.hash format: {value!r}"
|
||||
)
|
||||
if self.hashes is None:
|
||||
self.hashes = {hash_name: hash_value}
|
||||
elif hash_name not in self.hashes:
|
||||
self.hashes = self.hashes.copy()
|
||||
self.hashes[hash_name] = hash_value
|
||||
self._hash = value
|
||||
|
||||
@classmethod
|
||||
def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]:
|
||||
if d is None:
|
||||
return None
|
||||
return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
|
||||
|
||||
def _to_dict(self) -> Dict[str, Any]:
|
||||
return _filter_none(hash=self.hash, hashes=self.hashes)
|
||||
|
||||
|
||||
class DirInfo:
|
||||
name = "dir_info"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
editable: bool = False,
|
||||
) -> None:
|
||||
self.editable = editable
|
||||
|
||||
@classmethod
|
||||
def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
|
||||
if d is None:
|
||||
return None
|
||||
return cls(editable=_get_required(d, bool, "editable", default=False))
|
||||
|
||||
def _to_dict(self) -> Dict[str, Any]:
|
||||
return _filter_none(editable=self.editable or None)
|
||||
|
||||
|
||||
InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
|
||||
|
||||
|
||||
class DirectUrl:
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
info: InfoType,
|
||||
subdirectory: Optional[str] = None,
|
||||
) -> None:
|
||||
self.url = url
|
||||
self.info = info
|
||||
self.subdirectory = subdirectory
|
||||
|
||||
def _remove_auth_from_netloc(self, netloc: str) -> str:
|
||||
if "@" not in netloc:
|
||||
return netloc
|
||||
user_pass, netloc_no_user_pass = netloc.split("@", 1)
|
||||
if (
|
||||
isinstance(self.info, VcsInfo)
|
||||
and self.info.vcs == "git"
|
||||
and user_pass == "git"
|
||||
):
|
||||
return netloc
|
||||
if ENV_VAR_RE.match(user_pass):
|
||||
return netloc
|
||||
return netloc_no_user_pass
|
||||
|
||||
@property
|
||||
def redacted_url(self) -> str:
|
||||
"""url with user:password part removed unless it is formed with
|
||||
environment variables as specified in PEP 610, or it is ``git``
|
||||
in the case of a git URL.
|
||||
"""
|
||||
purl = urllib.parse.urlsplit(self.url)
|
||||
netloc = self._remove_auth_from_netloc(purl.netloc)
|
||||
surl = urllib.parse.urlunsplit(
|
||||
(purl.scheme, netloc, purl.path, purl.query, purl.fragment)
|
||||
)
|
||||
return surl
|
||||
|
||||
def validate(self) -> None:
|
||||
self.from_dict(self.to_dict())
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
|
||||
return DirectUrl(
|
||||
url=_get_required(d, str, "url"),
|
||||
subdirectory=_get(d, str, "subdirectory"),
|
||||
info=_exactly_one_of(
|
||||
[
|
||||
ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
|
||||
DirInfo._from_dict(_get(d, dict, "dir_info")),
|
||||
VcsInfo._from_dict(_get(d, dict, "vcs_info")),
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
res = _filter_none(
|
||||
url=self.redacted_url,
|
||||
subdirectory=self.subdirectory,
|
||||
)
|
||||
res[self.info.name] = self.info._to_dict()
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, s: str) -> "DirectUrl":
|
||||
return cls.from_dict(json.loads(s))
|
||||
|
||||
def to_json(self) -> str:
|
||||
return json.dumps(self.to_dict(), sort_keys=True)
|
||||
|
||||
def is_local_editable(self) -> bool:
|
||||
return isinstance(self.info, DirInfo) and self.info.editable
|
|
@ -0,0 +1,80 @@
|
|||
from typing import FrozenSet, Optional, Set
|
||||
|
||||
from pip._vendor.packaging.utils import canonicalize_name
|
||||
|
||||
from pip._internal.exceptions import CommandError
|
||||
|
||||
|
||||
class FormatControl:
|
||||
"""Helper for managing formats from which a package can be installed."""
|
||||
|
||||
__slots__ = ["no_binary", "only_binary"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
no_binary: Optional[Set[str]] = None,
|
||||
only_binary: Optional[Set[str]] = None,
|
||||
) -> None:
|
||||
if no_binary is None:
|
||||
no_binary = set()
|
||||
if only_binary is None:
|
||||
only_binary = set()
|
||||
|
||||
self.no_binary = no_binary
|
||||
self.only_binary = only_binary
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
|
||||
if self.__slots__ != other.__slots__:
|
||||
return False
|
||||
|
||||
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "{}({}, {})".format(
|
||||
self.__class__.__name__, self.no_binary, self.only_binary
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None:
|
||||
if value.startswith("-"):
|
||||
raise CommandError(
|
||||
"--no-binary / --only-binary option requires 1 argument."
|
||||
)
|
||||
new = value.split(",")
|
||||
while ":all:" in new:
|
||||
other.clear()
|
||||
target.clear()
|
||||
target.add(":all:")
|
||||
del new[: new.index(":all:") + 1]
|
||||
# Without a none, we want to discard everything as :all: covers it
|
||||
if ":none:" not in new:
|
||||
return
|
||||
for name in new:
|
||||
if name == ":none:":
|
||||
target.clear()
|
||||
continue
|
||||
name = canonicalize_name(name)
|
||||
other.discard(name)
|
||||
target.add(name)
|
||||
|
||||
def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]:
|
||||
result = {"binary", "source"}
|
||||
if canonical_name in self.only_binary:
|
||||
result.discard("source")
|
||||
elif canonical_name in self.no_binary:
|
||||
result.discard("binary")
|
||||
elif ":all:" in self.only_binary:
|
||||
result.discard("source")
|
||||
elif ":all:" in self.no_binary:
|
||||
result.discard("binary")
|
||||
return frozenset(result)
|
||||
|
||||
def disallow_binaries(self) -> None:
|
||||
self.handle_mutual_excludes(
|
||||
":all:",
|
||||
self.no_binary,
|
||||
self.only_binary,
|
||||
)
|
|
@ -0,0 +1,28 @@
|
|||
import urllib.parse
|
||||
|
||||
|
||||
class PackageIndex:
|
||||
"""Represents a Package Index and provides easier access to endpoints"""
|
||||
|
||||
__slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"]
|
||||
|
||||
def __init__(self, url: str, file_storage_domain: str) -> None:
|
||||
super().__init__()
|
||||
self.url = url
|
||||
self.netloc = urllib.parse.urlsplit(url).netloc
|
||||
self.simple_url = self._url_for_path("simple")
|
||||
self.pypi_url = self._url_for_path("pypi")
|
||||
|
||||
# This is part of a temporary hack used to block installs of PyPI
|
||||
# packages which depend on external urls only necessary until PyPI can
|
||||
# block such packages themselves
|
||||
self.file_storage_domain = file_storage_domain
|
||||
|
||||
def _url_for_path(self, path: str) -> str:
|
||||
return urllib.parse.urljoin(self.url, path)
|
||||
|
||||
|
||||
PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org")
|
||||
TestPyPI = PackageIndex(
|
||||
"https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org"
|
||||
)
|
|
@ -0,0 +1,53 @@
|
|||
from typing import Any, Dict, Sequence
|
||||
|
||||
from pip._vendor.packaging.markers import default_environment
|
||||
|
||||
from pip import __version__
|
||||
from pip._internal.req.req_install import InstallRequirement
|
||||
|
||||
|
||||
class InstallationReport:
|
||||
def __init__(self, install_requirements: Sequence[InstallRequirement]):
|
||||
self._install_requirements = install_requirements
|
||||
|
||||
@classmethod
|
||||
def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]:
|
||||
assert ireq.download_info, f"No download_info for {ireq}"
|
||||
res = {
|
||||
# PEP 610 json for the download URL. download_info.archive_info.hashes may
|
||||
# be absent when the requirement was installed from the wheel cache
|
||||
# and the cache entry was populated by an older pip version that did not
|
||||
# record origin.json.
|
||||
"download_info": ireq.download_info.to_dict(),
|
||||
# is_direct is true if the requirement was a direct URL reference (which
|
||||
# includes editable requirements), and false if the requirement was
|
||||
# downloaded from a PEP 503 index or --find-links.
|
||||
"is_direct": ireq.is_direct,
|
||||
# requested is true if the requirement was specified by the user (aka
|
||||
# top level requirement), and false if it was installed as a dependency of a
|
||||
# requirement. https://peps.python.org/pep-0376/#requested
|
||||
"requested": ireq.user_supplied,
|
||||
# PEP 566 json encoding for metadata
|
||||
# https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata
|
||||
"metadata": ireq.get_dist().metadata_dict,
|
||||
}
|
||||
if ireq.user_supplied and ireq.extras:
|
||||
# For top level requirements, the list of requested extras, if any.
|
||||
res["requested_extras"] = list(sorted(ireq.extras))
|
||||
return res
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"version": "1",
|
||||
"pip_version": __version__,
|
||||
"install": [
|
||||
self._install_req_to_dict(ireq) for ireq in self._install_requirements
|
||||
],
|
||||
# https://peps.python.org/pep-0508/#environment-markers
|
||||
# TODO: currently, the resolver uses the default environment to evaluate
|
||||
# environment markers, so that is what we report here. In the future, it
|
||||
# should also take into account options such as --python-version or
|
||||
# --platform, perhaps under the form of an environment_override field?
|
||||
# https://github.com/pypa/pip/issues/11198
|
||||
"environment": default_environment(),
|
||||
}
|
581
venv/lib/python3.11/site-packages/pip/_internal/models/link.py
Normal file
581
venv/lib/python3.11/site-packages/pip/_internal/models/link.py
Normal file
|
@ -0,0 +1,581 @@
|
|||
import functools
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import posixpath
|
||||
import re
|
||||
import urllib.parse
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pip._internal.utils.deprecation import deprecated
|
||||
from pip._internal.utils.filetypes import WHEEL_EXTENSION
|
||||
from pip._internal.utils.hashes import Hashes
|
||||
from pip._internal.utils.misc import (
|
||||
pairwise,
|
||||
redact_auth_from_url,
|
||||
split_auth_from_netloc,
|
||||
splitext,
|
||||
)
|
||||
from pip._internal.utils.models import KeyBasedCompareMixin
|
||||
from pip._internal.utils.urls import path_to_url, url_to_path
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pip._internal.index.collector import IndexContent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Order matters, earlier hashes have a precedence over later hashes for what
|
||||
# we will pick to use.
|
||||
_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LinkHash:
|
||||
"""Links to content may have embedded hash values. This class parses those.
|
||||
|
||||
`name` must be any member of `_SUPPORTED_HASHES`.
|
||||
|
||||
This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
|
||||
be JSON-serializable to conform to PEP 610, this class contains the logic for
|
||||
parsing a hash name and value for correctness, and then checking whether that hash
|
||||
conforms to a schema with `.is_hash_allowed()`."""
|
||||
|
||||
name: str
|
||||
value: str
|
||||
|
||||
_hash_url_fragment_re = re.compile(
|
||||
# NB: we do not validate that the second group (.*) is a valid hex
|
||||
# digest. Instead, we simply keep that string in this class, and then check it
|
||||
# against Hashes when hash-checking is needed. This is easier to debug than
|
||||
# proactively discarding an invalid hex digest, as we handle incorrect hashes
|
||||
# and malformed hashes in the same place.
|
||||
r"[#&]({choices})=([^&]*)".format(
|
||||
choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
|
||||
),
|
||||
)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
assert self.name in _SUPPORTED_HASHES
|
||||
|
||||
@classmethod
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]:
|
||||
"""Search a string for a checksum algorithm name and encoded output value."""
|
||||
match = cls._hash_url_fragment_re.search(url)
|
||||
if match is None:
|
||||
return None
|
||||
name, value = match.groups()
|
||||
return cls(name=name, value=value)
|
||||
|
||||
def as_dict(self) -> Dict[str, str]:
|
||||
return {self.name: self.value}
|
||||
|
||||
def as_hashes(self) -> Hashes:
|
||||
"""Return a Hashes instance which checks only for the current hash."""
|
||||
return Hashes({self.name: [self.value]})
|
||||
|
||||
def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
|
||||
"""
|
||||
Return True if the current hash is allowed by `hashes`.
|
||||
"""
|
||||
if hashes is None:
|
||||
return False
|
||||
return hashes.is_hash_allowed(self.name, hex_digest=self.value)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MetadataFile:
|
||||
"""Information about a core metadata file associated with a distribution."""
|
||||
|
||||
hashes: Optional[Dict[str, str]]
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.hashes is not None:
|
||||
assert all(name in _SUPPORTED_HASHES for name in self.hashes)
|
||||
|
||||
|
||||
def supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
|
||||
# Remove any unsupported hash types from the mapping. If this leaves no
|
||||
# supported hashes, return None
|
||||
if hashes is None:
|
||||
return None
|
||||
hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES}
|
||||
if not hashes:
|
||||
return None
|
||||
return hashes
|
||||
|
||||
|
||||
def _clean_url_path_part(part: str) -> str:
|
||||
"""
|
||||
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
|
||||
"""
|
||||
# We unquote prior to quoting to make sure nothing is double quoted.
|
||||
return urllib.parse.quote(urllib.parse.unquote(part))
|
||||
|
||||
|
||||
def _clean_file_url_path(part: str) -> str:
|
||||
"""
|
||||
Clean the first part of a URL path that corresponds to a local
|
||||
filesystem path (i.e. the first part after splitting on "@" characters).
|
||||
"""
|
||||
# We unquote prior to quoting to make sure nothing is double quoted.
|
||||
# Also, on Windows the path part might contain a drive letter which
|
||||
# should not be quoted. On Linux where drive letters do not
|
||||
# exist, the colon should be quoted. We rely on urllib.request
|
||||
# to do the right thing here.
|
||||
return urllib.request.pathname2url(urllib.request.url2pathname(part))
|
||||
|
||||
|
||||
# percent-encoded: /
|
||||
_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
|
||||
|
||||
|
||||
def _clean_url_path(path: str, is_local_path: bool) -> str:
|
||||
"""
|
||||
Clean the path portion of a URL.
|
||||
"""
|
||||
if is_local_path:
|
||||
clean_func = _clean_file_url_path
|
||||
else:
|
||||
clean_func = _clean_url_path_part
|
||||
|
||||
# Split on the reserved characters prior to cleaning so that
|
||||
# revision strings in VCS URLs are properly preserved.
|
||||
parts = _reserved_chars_re.split(path)
|
||||
|
||||
cleaned_parts = []
|
||||
for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
|
||||
cleaned_parts.append(clean_func(to_clean))
|
||||
# Normalize %xx escapes (e.g. %2f -> %2F)
|
||||
cleaned_parts.append(reserved.upper())
|
||||
|
||||
return "".join(cleaned_parts)
|
||||
|
||||
|
||||
def _ensure_quoted_url(url: str) -> str:
|
||||
"""
|
||||
Make sure a link is fully quoted.
|
||||
For example, if ' ' occurs in the URL, it will be replaced with "%20",
|
||||
and without double-quoting other characters.
|
||||
"""
|
||||
# Split the URL into parts according to the general structure
|
||||
# `scheme://netloc/path;parameters?query#fragment`.
|
||||
result = urllib.parse.urlparse(url)
|
||||
# If the netloc is empty, then the URL refers to a local filesystem path.
|
||||
is_local_path = not result.netloc
|
||||
path = _clean_url_path(result.path, is_local_path=is_local_path)
|
||||
return urllib.parse.urlunparse(result._replace(path=path))
|
||||
|
||||
|
||||
class Link(KeyBasedCompareMixin):
|
||||
"""Represents a parsed link from a Package Index's simple URL"""
|
||||
|
||||
__slots__ = [
|
||||
"_parsed_url",
|
||||
"_url",
|
||||
"_hashes",
|
||||
"comes_from",
|
||||
"requires_python",
|
||||
"yanked_reason",
|
||||
"metadata_file_data",
|
||||
"cache_link_parsing",
|
||||
"egg_fragment",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
comes_from: Optional[Union[str, "IndexContent"]] = None,
|
||||
requires_python: Optional[str] = None,
|
||||
yanked_reason: Optional[str] = None,
|
||||
metadata_file_data: Optional[MetadataFile] = None,
|
||||
cache_link_parsing: bool = True,
|
||||
hashes: Optional[Mapping[str, str]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
:param url: url of the resource pointed to (href of the link)
|
||||
:param comes_from: instance of IndexContent where the link was found,
|
||||
or string.
|
||||
:param requires_python: String containing the `Requires-Python`
|
||||
metadata field, specified in PEP 345. This may be specified by
|
||||
a data-requires-python attribute in the HTML link tag, as
|
||||
described in PEP 503.
|
||||
:param yanked_reason: the reason the file has been yanked, if the
|
||||
file has been yanked, or None if the file hasn't been yanked.
|
||||
This is the value of the "data-yanked" attribute, if present, in
|
||||
a simple repository HTML link. If the file has been yanked but
|
||||
no reason was provided, this should be the empty string. See
|
||||
PEP 592 for more information and the specification.
|
||||
:param metadata_file_data: the metadata attached to the file, or None if
|
||||
no such metadata is provided. This argument, if not None, indicates
|
||||
that a separate metadata file exists, and also optionally supplies
|
||||
hashes for that file.
|
||||
:param cache_link_parsing: A flag that is used elsewhere to determine
|
||||
whether resources retrieved from this link should be cached. PyPI
|
||||
URLs should generally have this set to False, for example.
|
||||
:param hashes: A mapping of hash names to digests to allow us to
|
||||
determine the validity of a download.
|
||||
"""
|
||||
|
||||
# The comes_from, requires_python, and metadata_file_data arguments are
|
||||
# only used by classmethods of this class, and are not used in client
|
||||
# code directly.
|
||||
|
||||
# url can be a UNC windows share
|
||||
if url.startswith("\\\\"):
|
||||
url = path_to_url(url)
|
||||
|
||||
self._parsed_url = urllib.parse.urlsplit(url)
|
||||
# Store the url as a private attribute to prevent accidentally
|
||||
# trying to set a new value.
|
||||
self._url = url
|
||||
|
||||
link_hash = LinkHash.find_hash_url_fragment(url)
|
||||
hashes_from_link = {} if link_hash is None else link_hash.as_dict()
|
||||
if hashes is None:
|
||||
self._hashes = hashes_from_link
|
||||
else:
|
||||
self._hashes = {**hashes, **hashes_from_link}
|
||||
|
||||
self.comes_from = comes_from
|
||||
self.requires_python = requires_python if requires_python else None
|
||||
self.yanked_reason = yanked_reason
|
||||
self.metadata_file_data = metadata_file_data
|
||||
|
||||
super().__init__(key=url, defining_class=Link)
|
||||
|
||||
self.cache_link_parsing = cache_link_parsing
|
||||
self.egg_fragment = self._egg_fragment()
|
||||
|
||||
@classmethod
|
||||
def from_json(
|
||||
cls,
|
||||
file_data: Dict[str, Any],
|
||||
page_url: str,
|
||||
) -> Optional["Link"]:
|
||||
"""
|
||||
Convert an pypi json document from a simple repository page into a Link.
|
||||
"""
|
||||
file_url = file_data.get("url")
|
||||
if file_url is None:
|
||||
return None
|
||||
|
||||
url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url))
|
||||
pyrequire = file_data.get("requires-python")
|
||||
yanked_reason = file_data.get("yanked")
|
||||
hashes = file_data.get("hashes", {})
|
||||
|
||||
# PEP 714: Indexes must use the name core-metadata, but
|
||||
# clients should support the old name as a fallback for compatibility.
|
||||
metadata_info = file_data.get("core-metadata")
|
||||
if metadata_info is None:
|
||||
metadata_info = file_data.get("dist-info-metadata")
|
||||
|
||||
# The metadata info value may be a boolean, or a dict of hashes.
|
||||
if isinstance(metadata_info, dict):
|
||||
# The file exists, and hashes have been supplied
|
||||
metadata_file_data = MetadataFile(supported_hashes(metadata_info))
|
||||
elif metadata_info:
|
||||
# The file exists, but there are no hashes
|
||||
metadata_file_data = MetadataFile(None)
|
||||
else:
|
||||
# False or not present: the file does not exist
|
||||
metadata_file_data = None
|
||||
|
||||
# The Link.yanked_reason expects an empty string instead of a boolean.
|
||||
if yanked_reason and not isinstance(yanked_reason, str):
|
||||
yanked_reason = ""
|
||||
# The Link.yanked_reason expects None instead of False.
|
||||
elif not yanked_reason:
|
||||
yanked_reason = None
|
||||
|
||||
return cls(
|
||||
url,
|
||||
comes_from=page_url,
|
||||
requires_python=pyrequire,
|
||||
yanked_reason=yanked_reason,
|
||||
hashes=hashes,
|
||||
metadata_file_data=metadata_file_data,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_element(
|
||||
cls,
|
||||
anchor_attribs: Dict[str, Optional[str]],
|
||||
page_url: str,
|
||||
base_url: str,
|
||||
) -> Optional["Link"]:
|
||||
"""
|
||||
Convert an anchor element's attributes in a simple repository page to a Link.
|
||||
"""
|
||||
href = anchor_attribs.get("href")
|
||||
if not href:
|
||||
return None
|
||||
|
||||
url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href))
|
||||
pyrequire = anchor_attribs.get("data-requires-python")
|
||||
yanked_reason = anchor_attribs.get("data-yanked")
|
||||
|
||||
# PEP 714: Indexes must use the name data-core-metadata, but
|
||||
# clients should support the old name as a fallback for compatibility.
|
||||
metadata_info = anchor_attribs.get("data-core-metadata")
|
||||
if metadata_info is None:
|
||||
metadata_info = anchor_attribs.get("data-dist-info-metadata")
|
||||
# The metadata info value may be the string "true", or a string of
|
||||
# the form "hashname=hashval"
|
||||
if metadata_info == "true":
|
||||
# The file exists, but there are no hashes
|
||||
metadata_file_data = MetadataFile(None)
|
||||
elif metadata_info is None:
|
||||
# The file does not exist
|
||||
metadata_file_data = None
|
||||
else:
|
||||
# The file exists, and hashes have been supplied
|
||||
hashname, sep, hashval = metadata_info.partition("=")
|
||||
if sep == "=":
|
||||
metadata_file_data = MetadataFile(supported_hashes({hashname: hashval}))
|
||||
else:
|
||||
# Error - data is wrong. Treat as no hashes supplied.
|
||||
logger.debug(
|
||||
"Index returned invalid data-dist-info-metadata value: %s",
|
||||
metadata_info,
|
||||
)
|
||||
metadata_file_data = MetadataFile(None)
|
||||
|
||||
return cls(
|
||||
url,
|
||||
comes_from=page_url,
|
||||
requires_python=pyrequire,
|
||||
yanked_reason=yanked_reason,
|
||||
metadata_file_data=metadata_file_data,
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.requires_python:
|
||||
rp = f" (requires-python:{self.requires_python})"
|
||||
else:
|
||||
rp = ""
|
||||
if self.comes_from:
|
||||
return "{} (from {}){}".format(
|
||||
redact_auth_from_url(self._url), self.comes_from, rp
|
||||
)
|
||||
else:
|
||||
return redact_auth_from_url(str(self._url))
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Link {self}>"
|
||||
|
||||
@property
|
||||
def url(self) -> str:
|
||||
return self._url
|
||||
|
||||
@property
|
||||
def filename(self) -> str:
|
||||
path = self.path.rstrip("/")
|
||||
name = posixpath.basename(path)
|
||||
if not name:
|
||||
# Make sure we don't leak auth information if the netloc
|
||||
# includes a username and password.
|
||||
netloc, user_pass = split_auth_from_netloc(self.netloc)
|
||||
return netloc
|
||||
|
||||
name = urllib.parse.unquote(name)
|
||||
assert name, f"URL {self._url!r} produced no filename"
|
||||
return name
|
||||
|
||||
@property
|
||||
def file_path(self) -> str:
|
||||
return url_to_path(self.url)
|
||||
|
||||
@property
|
||||
def scheme(self) -> str:
|
||||
return self._parsed_url.scheme
|
||||
|
||||
@property
|
||||
def netloc(self) -> str:
|
||||
"""
|
||||
This can contain auth information.
|
||||
"""
|
||||
return self._parsed_url.netloc
|
||||
|
||||
@property
|
||||
def path(self) -> str:
|
||||
return urllib.parse.unquote(self._parsed_url.path)
|
||||
|
||||
def splitext(self) -> Tuple[str, str]:
|
||||
return splitext(posixpath.basename(self.path.rstrip("/")))
|
||||
|
||||
@property
|
||||
def ext(self) -> str:
|
||||
return self.splitext()[1]
|
||||
|
||||
@property
|
||||
def url_without_fragment(self) -> str:
|
||||
scheme, netloc, path, query, fragment = self._parsed_url
|
||||
return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
|
||||
|
||||
_egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
|
||||
|
||||
# Per PEP 508.
|
||||
_project_name_re = re.compile(
|
||||
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
|
||||
)
|
||||
|
||||
def _egg_fragment(self) -> Optional[str]:
|
||||
match = self._egg_fragment_re.search(self._url)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
# An egg fragment looks like a PEP 508 project name, along with
|
||||
# an optional extras specifier. Anything else is invalid.
|
||||
project_name = match.group(1)
|
||||
if not self._project_name_re.match(project_name):
|
||||
deprecated(
|
||||
reason=f"{self} contains an egg fragment with a non-PEP 508 name",
|
||||
replacement="to use the req @ url syntax, and remove the egg fragment",
|
||||
gone_in="25.0",
|
||||
issue=11617,
|
||||
)
|
||||
|
||||
return project_name
|
||||
|
||||
_subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
|
||||
|
||||
@property
|
||||
def subdirectory_fragment(self) -> Optional[str]:
|
||||
match = self._subdirectory_fragment_re.search(self._url)
|
||||
if not match:
|
||||
return None
|
||||
return match.group(1)
|
||||
|
||||
def metadata_link(self) -> Optional["Link"]:
|
||||
"""Return a link to the associated core metadata file (if any)."""
|
||||
if self.metadata_file_data is None:
|
||||
return None
|
||||
metadata_url = f"{self.url_without_fragment}.metadata"
|
||||
if self.metadata_file_data.hashes is None:
|
||||
return Link(metadata_url)
|
||||
return Link(metadata_url, hashes=self.metadata_file_data.hashes)
|
||||
|
||||
def as_hashes(self) -> Hashes:
|
||||
return Hashes({k: [v] for k, v in self._hashes.items()})
|
||||
|
||||
@property
|
||||
def hash(self) -> Optional[str]:
|
||||
return next(iter(self._hashes.values()), None)
|
||||
|
||||
@property
|
||||
def hash_name(self) -> Optional[str]:
|
||||
return next(iter(self._hashes), None)
|
||||
|
||||
@property
|
||||
def show_url(self) -> str:
|
||||
return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
|
||||
|
||||
@property
|
||||
def is_file(self) -> bool:
|
||||
return self.scheme == "file"
|
||||
|
||||
def is_existing_dir(self) -> bool:
|
||||
return self.is_file and os.path.isdir(self.file_path)
|
||||
|
||||
@property
|
||||
def is_wheel(self) -> bool:
|
||||
return self.ext == WHEEL_EXTENSION
|
||||
|
||||
@property
|
||||
def is_vcs(self) -> bool:
|
||||
from pip._internal.vcs import vcs
|
||||
|
||||
return self.scheme in vcs.all_schemes
|
||||
|
||||
@property
|
||||
def is_yanked(self) -> bool:
|
||||
return self.yanked_reason is not None
|
||||
|
||||
@property
|
||||
def has_hash(self) -> bool:
|
||||
return bool(self._hashes)
|
||||
|
||||
def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
|
||||
"""
|
||||
Return True if the link has a hash and it is allowed by `hashes`.
|
||||
"""
|
||||
if hashes is None:
|
||||
return False
|
||||
return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items())
|
||||
|
||||
|
||||
class _CleanResult(NamedTuple):
|
||||
"""Convert link for equivalency check.
|
||||
|
||||
This is used in the resolver to check whether two URL-specified requirements
|
||||
likely point to the same distribution and can be considered equivalent. This
|
||||
equivalency logic avoids comparing URLs literally, which can be too strict
|
||||
(e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
|
||||
|
||||
Currently this does three things:
|
||||
|
||||
1. Drop the basic auth part. This is technically wrong since a server can
|
||||
serve different content based on auth, but if it does that, it is even
|
||||
impossible to guarantee two URLs without auth are equivalent, since
|
||||
the user can input different auth information when prompted. So the
|
||||
practical solution is to assume the auth doesn't affect the response.
|
||||
2. Parse the query to avoid the ordering issue. Note that ordering under the
|
||||
same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
|
||||
still considered different.
|
||||
3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
|
||||
hash values, since it should have no impact the downloaded content. Note
|
||||
that this drops the "egg=" part historically used to denote the requested
|
||||
project (and extras), which is wrong in the strictest sense, but too many
|
||||
people are supplying it inconsistently to cause superfluous resolution
|
||||
conflicts, so we choose to also ignore them.
|
||||
"""
|
||||
|
||||
parsed: urllib.parse.SplitResult
|
||||
query: Dict[str, List[str]]
|
||||
subdirectory: str
|
||||
hashes: Dict[str, str]
|
||||
|
||||
|
||||
def _clean_link(link: Link) -> _CleanResult:
|
||||
parsed = link._parsed_url
|
||||
netloc = parsed.netloc.rsplit("@", 1)[-1]
|
||||
# According to RFC 8089, an empty host in file: means localhost.
|
||||
if parsed.scheme == "file" and not netloc:
|
||||
netloc = "localhost"
|
||||
fragment = urllib.parse.parse_qs(parsed.fragment)
|
||||
if "egg" in fragment:
|
||||
logger.debug("Ignoring egg= fragment in %s", link)
|
||||
try:
|
||||
# If there are multiple subdirectory values, use the first one.
|
||||
# This matches the behavior of Link.subdirectory_fragment.
|
||||
subdirectory = fragment["subdirectory"][0]
|
||||
except (IndexError, KeyError):
|
||||
subdirectory = ""
|
||||
# If there are multiple hash values under the same algorithm, use the
|
||||
# first one. This matches the behavior of Link.hash_value.
|
||||
hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
|
||||
return _CleanResult(
|
||||
parsed=parsed._replace(netloc=netloc, query="", fragment=""),
|
||||
query=urllib.parse.parse_qs(parsed.query),
|
||||
subdirectory=subdirectory,
|
||||
hashes=hashes,
|
||||
)
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def links_equivalent(link1: Link, link2: Link) -> bool:
|
||||
return _clean_link(link1) == _clean_link(link2)
|
|
@ -0,0 +1,31 @@
|
|||
"""
|
||||
For types associated with installation schemes.
|
||||
|
||||
For a general overview of available schemes and their context, see
|
||||
https://docs.python.org/3/install/index.html#alternate-installation.
|
||||
"""
|
||||
|
||||
|
||||
SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]
|
||||
|
||||
|
||||
class Scheme:
|
||||
"""A Scheme holds paths which are used as the base directories for
|
||||
artifacts associated with a Python package.
|
||||
"""
|
||||
|
||||
__slots__ = SCHEME_KEYS
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
platlib: str,
|
||||
purelib: str,
|
||||
headers: str,
|
||||
scripts: str,
|
||||
data: str,
|
||||
) -> None:
|
||||
self.platlib = platlib
|
||||
self.purelib = purelib
|
||||
self.headers = headers
|
||||
self.scripts = scripts
|
||||
self.data = data
|
|
@ -0,0 +1,132 @@
|
|||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import posixpath
|
||||
import urllib.parse
|
||||
from typing import List
|
||||
|
||||
from pip._vendor.packaging.utils import canonicalize_name
|
||||
|
||||
from pip._internal.models.index import PyPI
|
||||
from pip._internal.utils.compat import has_tls
|
||||
from pip._internal.utils.misc import normalize_path, redact_auth_from_url
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SearchScope:
|
||||
|
||||
"""
|
||||
Encapsulates the locations that pip is configured to search.
|
||||
"""
|
||||
|
||||
__slots__ = ["find_links", "index_urls", "no_index"]
|
||||
|
||||
@classmethod
|
||||
def create(
|
||||
cls,
|
||||
find_links: List[str],
|
||||
index_urls: List[str],
|
||||
no_index: bool,
|
||||
) -> "SearchScope":
|
||||
"""
|
||||
Create a SearchScope object after normalizing the `find_links`.
|
||||
"""
|
||||
# Build find_links. If an argument starts with ~, it may be
|
||||
# a local file relative to a home directory. So try normalizing
|
||||
# it and if it exists, use the normalized version.
|
||||
# This is deliberately conservative - it might be fine just to
|
||||
# blindly normalize anything starting with a ~...
|
||||
built_find_links: List[str] = []
|
||||
for link in find_links:
|
||||
if link.startswith("~"):
|
||||
new_link = normalize_path(link)
|
||||
if os.path.exists(new_link):
|
||||
link = new_link
|
||||
built_find_links.append(link)
|
||||
|
||||
# If we don't have TLS enabled, then WARN if anyplace we're looking
|
||||
# relies on TLS.
|
||||
if not has_tls():
|
||||
for link in itertools.chain(index_urls, built_find_links):
|
||||
parsed = urllib.parse.urlparse(link)
|
||||
if parsed.scheme == "https":
|
||||
logger.warning(
|
||||
"pip is configured with locations that require "
|
||||
"TLS/SSL, however the ssl module in Python is not "
|
||||
"available."
|
||||
)
|
||||
break
|
||||
|
||||
return cls(
|
||||
find_links=built_find_links,
|
||||
index_urls=index_urls,
|
||||
no_index=no_index,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
find_links: List[str],
|
||||
index_urls: List[str],
|
||||
no_index: bool,
|
||||
) -> None:
|
||||
self.find_links = find_links
|
||||
self.index_urls = index_urls
|
||||
self.no_index = no_index
|
||||
|
||||
def get_formatted_locations(self) -> str:
|
||||
lines = []
|
||||
redacted_index_urls = []
|
||||
if self.index_urls and self.index_urls != [PyPI.simple_url]:
|
||||
for url in self.index_urls:
|
||||
redacted_index_url = redact_auth_from_url(url)
|
||||
|
||||
# Parse the URL
|
||||
purl = urllib.parse.urlsplit(redacted_index_url)
|
||||
|
||||
# URL is generally invalid if scheme and netloc is missing
|
||||
# there are issues with Python and URL parsing, so this test
|
||||
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
|
||||
# always parse invalid URLs correctly - it should raise
|
||||
# exceptions for malformed URLs
|
||||
if not purl.scheme and not purl.netloc:
|
||||
logger.warning(
|
||||
'The index url "%s" seems invalid, please provide a scheme.',
|
||||
redacted_index_url,
|
||||
)
|
||||
|
||||
redacted_index_urls.append(redacted_index_url)
|
||||
|
||||
lines.append(
|
||||
"Looking in indexes: {}".format(", ".join(redacted_index_urls))
|
||||
)
|
||||
|
||||
if self.find_links:
|
||||
lines.append(
|
||||
"Looking in links: {}".format(
|
||||
", ".join(redact_auth_from_url(url) for url in self.find_links)
|
||||
)
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
def get_index_urls_locations(self, project_name: str) -> List[str]:
|
||||
"""Returns the locations found via self.index_urls
|
||||
|
||||
Checks the url_name on the main (first in the list) index and
|
||||
use this url_name to produce all locations
|
||||
"""
|
||||
|
||||
def mkurl_pypi_url(url: str) -> str:
|
||||
loc = posixpath.join(
|
||||
url, urllib.parse.quote(canonicalize_name(project_name))
|
||||
)
|
||||
# For maximum compatibility with easy_install, ensure the path
|
||||
# ends in a trailing slash. Although this isn't in the spec
|
||||
# (and PyPI can handle it without the slash) some other index
|
||||
# implementations might break if they relied on easy_install's
|
||||
# behavior.
|
||||
if not loc.endswith("/"):
|
||||
loc = loc + "/"
|
||||
return loc
|
||||
|
||||
return [mkurl_pypi_url(url) for url in self.index_urls]
|
|
@ -0,0 +1,51 @@
|
|||
from typing import Optional
|
||||
|
||||
from pip._internal.models.format_control import FormatControl
|
||||
|
||||
|
||||
class SelectionPreferences:
|
||||
"""
|
||||
Encapsulates the candidate selection preferences for downloading
|
||||
and installing files.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"allow_yanked",
|
||||
"allow_all_prereleases",
|
||||
"format_control",
|
||||
"prefer_binary",
|
||||
"ignore_requires_python",
|
||||
]
|
||||
|
||||
# Don't include an allow_yanked default value to make sure each call
|
||||
# site considers whether yanked releases are allowed. This also causes
|
||||
# that decision to be made explicit in the calling code, which helps
|
||||
# people when reading the code.
|
||||
def __init__(
|
||||
self,
|
||||
allow_yanked: bool,
|
||||
allow_all_prereleases: bool = False,
|
||||
format_control: Optional[FormatControl] = None,
|
||||
prefer_binary: bool = False,
|
||||
ignore_requires_python: Optional[bool] = None,
|
||||
) -> None:
|
||||
"""Create a SelectionPreferences object.
|
||||
|
||||
:param allow_yanked: Whether files marked as yanked (in the sense
|
||||
of PEP 592) are permitted to be candidates for install.
|
||||
:param format_control: A FormatControl object or None. Used to control
|
||||
the selection of source packages / binary packages when consulting
|
||||
the index and links.
|
||||
:param prefer_binary: Whether to prefer an old, but valid, binary
|
||||
dist over a new source dist.
|
||||
:param ignore_requires_python: Whether to ignore incompatible
|
||||
"Requires-Python" values in links. Defaults to False.
|
||||
"""
|
||||
if ignore_requires_python is None:
|
||||
ignore_requires_python = False
|
||||
|
||||
self.allow_yanked = allow_yanked
|
||||
self.allow_all_prereleases = allow_all_prereleases
|
||||
self.format_control = format_control
|
||||
self.prefer_binary = prefer_binary
|
||||
self.ignore_requires_python = ignore_requires_python
|
|
@ -0,0 +1,110 @@
|
|||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from pip._vendor.packaging.tags import Tag
|
||||
|
||||
from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot
|
||||
from pip._internal.utils.misc import normalize_version_info
|
||||
|
||||
|
||||
class TargetPython:
|
||||
|
||||
"""
|
||||
Encapsulates the properties of a Python interpreter one is targeting
|
||||
for a package install, download, etc.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"_given_py_version_info",
|
||||
"abis",
|
||||
"implementation",
|
||||
"platforms",
|
||||
"py_version",
|
||||
"py_version_info",
|
||||
"_valid_tags",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
platforms: Optional[List[str]] = None,
|
||||
py_version_info: Optional[Tuple[int, ...]] = None,
|
||||
abis: Optional[List[str]] = None,
|
||||
implementation: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
:param platforms: A list of strings or None. If None, searches for
|
||||
packages that are supported by the current system. Otherwise, will
|
||||
find packages that can be built on the platforms passed in. These
|
||||
packages will only be downloaded for distribution: they will
|
||||
not be built locally.
|
||||
:param py_version_info: An optional tuple of ints representing the
|
||||
Python version information to use (e.g. `sys.version_info[:3]`).
|
||||
This can have length 1, 2, or 3 when provided.
|
||||
:param abis: A list of strings or None. This is passed to
|
||||
compatibility_tags.py's get_supported() function as is.
|
||||
:param implementation: A string or None. This is passed to
|
||||
compatibility_tags.py's get_supported() function as is.
|
||||
"""
|
||||
# Store the given py_version_info for when we call get_supported().
|
||||
self._given_py_version_info = py_version_info
|
||||
|
||||
if py_version_info is None:
|
||||
py_version_info = sys.version_info[:3]
|
||||
else:
|
||||
py_version_info = normalize_version_info(py_version_info)
|
||||
|
||||
py_version = ".".join(map(str, py_version_info[:2]))
|
||||
|
||||
self.abis = abis
|
||||
self.implementation = implementation
|
||||
self.platforms = platforms
|
||||
self.py_version = py_version
|
||||
self.py_version_info = py_version_info
|
||||
|
||||
# This is used to cache the return value of get_tags().
|
||||
self._valid_tags: Optional[List[Tag]] = None
|
||||
|
||||
def format_given(self) -> str:
|
||||
"""
|
||||
Format the given, non-None attributes for display.
|
||||
"""
|
||||
display_version = None
|
||||
if self._given_py_version_info is not None:
|
||||
display_version = ".".join(
|
||||
str(part) for part in self._given_py_version_info
|
||||
)
|
||||
|
||||
key_values = [
|
||||
("platforms", self.platforms),
|
||||
("version_info", display_version),
|
||||
("abis", self.abis),
|
||||
("implementation", self.implementation),
|
||||
]
|
||||
return " ".join(
|
||||
f"{key}={value!r}" for key, value in key_values if value is not None
|
||||
)
|
||||
|
||||
def get_tags(self) -> List[Tag]:
|
||||
"""
|
||||
Return the supported PEP 425 tags to check wheel candidates against.
|
||||
|
||||
The tags are returned in order of preference (most preferred first).
|
||||
"""
|
||||
if self._valid_tags is None:
|
||||
# Pass versions=None if no py_version_info was given since
|
||||
# versions=None uses special default logic.
|
||||
py_version_info = self._given_py_version_info
|
||||
if py_version_info is None:
|
||||
version = None
|
||||
else:
|
||||
version = version_info_to_nodot(py_version_info)
|
||||
|
||||
tags = get_supported(
|
||||
version=version,
|
||||
platforms=self.platforms,
|
||||
abis=self.abis,
|
||||
impl=self.implementation,
|
||||
)
|
||||
self._valid_tags = tags
|
||||
|
||||
return self._valid_tags
|
|
@ -0,0 +1,92 @@
|
|||
"""Represents a wheel file and provides access to the various parts of the
|
||||
name that have meaning.
|
||||
"""
|
||||
import re
|
||||
from typing import Dict, Iterable, List
|
||||
|
||||
from pip._vendor.packaging.tags import Tag
|
||||
|
||||
from pip._internal.exceptions import InvalidWheelFilename
|
||||
|
||||
|
||||
class Wheel:
|
||||
"""A wheel file"""
|
||||
|
||||
wheel_file_re = re.compile(
|
||||
r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
|
||||
((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
|
||||
\.whl|\.dist-info)$""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
def __init__(self, filename: str) -> None:
|
||||
"""
|
||||
:raises InvalidWheelFilename: when the filename is invalid for a wheel
|
||||
"""
|
||||
wheel_info = self.wheel_file_re.match(filename)
|
||||
if not wheel_info:
|
||||
raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
|
||||
self.filename = filename
|
||||
self.name = wheel_info.group("name").replace("_", "-")
|
||||
# we'll assume "_" means "-" due to wheel naming scheme
|
||||
# (https://github.com/pypa/pip/issues/1150)
|
||||
self.version = wheel_info.group("ver").replace("_", "-")
|
||||
self.build_tag = wheel_info.group("build")
|
||||
self.pyversions = wheel_info.group("pyver").split(".")
|
||||
self.abis = wheel_info.group("abi").split(".")
|
||||
self.plats = wheel_info.group("plat").split(".")
|
||||
|
||||
# All the tag combinations from this file
|
||||
self.file_tags = {
|
||||
Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
|
||||
}
|
||||
|
||||
def get_formatted_file_tags(self) -> List[str]:
|
||||
"""Return the wheel's tags as a sorted list of strings."""
|
||||
return sorted(str(tag) for tag in self.file_tags)
|
||||
|
||||
def support_index_min(self, tags: List[Tag]) -> int:
|
||||
"""Return the lowest index that one of the wheel's file_tag combinations
|
||||
achieves in the given list of supported tags.
|
||||
|
||||
For example, if there are 8 supported tags and one of the file tags
|
||||
is first in the list, then return 0.
|
||||
|
||||
:param tags: the PEP 425 tags to check the wheel against, in order
|
||||
with most preferred first.
|
||||
|
||||
:raises ValueError: If none of the wheel's file tags match one of
|
||||
the supported tags.
|
||||
"""
|
||||
try:
|
||||
return next(i for i, t in enumerate(tags) if t in self.file_tags)
|
||||
except StopIteration:
|
||||
raise ValueError()
|
||||
|
||||
def find_most_preferred_tag(
|
||||
self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
|
||||
) -> int:
|
||||
"""Return the priority of the most preferred tag that one of the wheel's file
|
||||
tag combinations achieves in the given list of supported tags using the given
|
||||
tag_to_priority mapping, where lower priorities are more-preferred.
|
||||
|
||||
This is used in place of support_index_min in some cases in order to avoid
|
||||
an expensive linear scan of a large list of tags.
|
||||
|
||||
:param tags: the PEP 425 tags to check the wheel against.
|
||||
:param tag_to_priority: a mapping from tag to priority of that tag, where
|
||||
lower is more preferred.
|
||||
|
||||
:raises ValueError: If none of the wheel's file tags match one of
|
||||
the supported tags.
|
||||
"""
|
||||
return min(
|
||||
tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
|
||||
)
|
||||
|
||||
def supported(self, tags: Iterable[Tag]) -> bool:
|
||||
"""Return whether the wheel is compatible with one of the given tags.
|
||||
|
||||
:param tags: the PEP 425 tags to check the wheel against.
|
||||
"""
|
||||
return not self.file_tags.isdisjoint(tags)
|
Loading…
Add table
Add a link
Reference in a new issue