blob: f6549eba6373418f6a6504cfa356704f96779179 [file] [log] [blame]
# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import errno
import functools
import io
import logging
import stat
import textwrap
import warnings
import collections
from collections import deque, OrderedDict
from itertools import chain
import portage
from portage import os
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi.DummyTree import DummyTree
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import (
Atom,
best_match_to_list,
extract_affecting_use,
check_required_use,
human_readable_required_use,
match_from_list,
_repo_separator,
)
from portage.dep._slot_operator import ignore_built_slot_operator_deps, strip_slots
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, _get_eapi_attrs
from portage.exception import (
InvalidAtom,
InvalidData,
InvalidDependString,
PackageNotFound,
PortageException,
)
from portage.localization import _
from portage.output import colorize, create_color_func, darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import _getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ConfigProtect, shlex_split, new_protect_filename
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs, normalize_path
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
from portage.util.futures import asyncio
from portage.util._async.TaskScheduler import TaskScheduler
from portage.versions import _pkg_str, catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import (
insert_category_into_atom,
is_valid_package_atom,
)
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import Display, format_unmatched_atom
# Exposes a depgraph interface to dep_check.
_dep_check_graph_interface = collections.namedtuple(
"_dep_check_graph_interface",
(
# Checks if parent package will replace child.
"will_replace_child",
# Indicates a removal action, like depclean or prune.
"removal_action",
# Checks if update is desirable for a given package.
"want_update_pkg",
),
)
class _scheduler_graph_config:
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
try:
x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
pkgs.add(x)
return pkgs
class _frozen_depgraph_config:
def __init__(self, settings, trees, myopts, params, spinner):
self.settings = settings
self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.requested_depth = params.get("deep", 0)
self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
# We can't know that an soname dep is unsatisfied if there are
# any unbuilt ebuilds in the graph, since unbuilt ebuilds have
# no soname data. Therefore, only enable soname dependency
# resolution if --usepkgonly is enabled, or for removal actions.
self.soname_deps_enabled = (
"--usepkgonly" in myopts or "remove" in params
) and params.get("ignore_soname_deps") != "y"
dynamic_deps = "dynamic_deps" in params
ignore_built_slot_operator_deps = (
myopts.get("--ignore-built-slot-operator-deps", "n") == "y"
)
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig,
)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = FakeVartree(
trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
ignore_built_slot_operator_deps=ignore_built_slot_operator_deps,
soname_deps=self.soname_deps_enabled,
)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings
)
if self.soname_deps_enabled and "remove" not in params:
self.trees[myroot]["bintree"] = DummyTree(
DbapiProvidesIndex(trees[myroot]["bintree"].dbapi)
)
if params.get("ignore_world", False):
self._required_set_names = set()
else:
self._required_set_names = {"world"}
atoms = " ".join(myopts.get("--exclude", [])).split()
self.excluded_pkgs = _wildcard_set(atoms)
atoms = " ".join(myopts.get("--reinstall-atoms", [])).split()
self.reinstall_atoms = _wildcard_set(atoms)
atoms = " ".join(myopts.get("--usepkg-exclude", [])).split()
self.usepkg_exclude = _wildcard_set(atoms)
atoms = " ".join(myopts.get("--useoldpkg-atoms", [])).split()
self.useoldpkg_atoms = _wildcard_set(atoms)
atoms = " ".join(myopts.get("--rebuild-exclude", [])).split()
self.rebuild_exclude = _wildcard_set(atoms)
atoms = " ".join(myopts.get("--rebuild-ignore", [])).split()
self.rebuild_ignore = _wildcard_set(atoms)
self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
class _depgraph_sets:
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets["__non_set_args__"] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _rebuild_config:
def __init__(self, frozen_config, backtrack_parameters):
self._graph = digraph()
self._frozen_config = frozen_config
self.rebuild_list = backtrack_parameters.rebuild_list.copy()
self.orig_rebuild_list = self.rebuild_list.copy()
self.reinstall_list = backtrack_parameters.reinstall_list.copy()
self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
self.rebuild = (
self.rebuild_if_new_rev
or self.rebuild_if_new_ver
or self.rebuild_if_unbuilt
)
def add(self, dep_pkg, dep):
parent = dep.collapsed_parent
priority = dep.collapsed_priority
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (
self.rebuild
and isinstance(parent, Package)
and parent.built
and priority.buildtime
and isinstance(dep_pkg, Package)
and not rebuild_exclude.findAtomForPackage(parent)
and not rebuild_ignore.findAtomForPackage(dep_pkg)
):
self._graph.add(dep_pkg, parent, priority)
def _needs_rebuild(self, dep_pkg):
"""Check whether packages that depend on dep_pkg need to be rebuilt."""
dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
return False
if self.rebuild_if_unbuilt:
# dep_pkg is being installed from source, so binary
# packages for parents are invalid. Force rebuild
return True
trees = self._frozen_config.trees
vardb = trees[dep_pkg.root]["vartree"].dbapi
if self.rebuild_if_new_rev:
# Parent packages are valid if a package with the same
# cpv is already installed.
return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
# Otherwise, parent packages are valid if a package with the same
# version (excluding revision) is already installed.
assert self.rebuild_if_new_ver
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for inst_cpv in vardb.match(dep_pkg.slot_atom):
inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
if inst_cpv_norev == cpv_norev:
return False
return True
def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
reinstall = False
for slot_atom, dep_pkg in build_deps.items():
dep_root_slot = (dep_pkg.root, slot_atom)
if self._needs_rebuild(dep_pkg):
self.rebuild_list.add(root_slot)
return True
if "--usepkg" in self._frozen_config.myopts and (
dep_root_slot in self.reinstall_list
or dep_root_slot in self.rebuild_list
or not dep_pkg.installed
):
# A direct rebuild dependency is being installed. We
# should update the parent as well to the latest binary,
# if that binary is valid.
#
# To validate the binary, we check whether all of the
# rebuild dependencies are present on the same binhost.
#
# 1) If parent is present on the binhost, but one of its
# rebuild dependencies is not, then the parent should
# be rebuilt from source.
# 2) Otherwise, the parent binary is assumed to be valid,
# because all of its rebuild dependencies are
# consistent.
bintree = trees[parent.root]["bintree"]
uri = bintree.get_pkgindex_uri(parent.cpv)
dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
bindb = bintree.dbapi
if self.rebuild_if_new_ver and uri and uri != dep_uri:
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for cpv in bindb.match(dep_pkg.slot_atom):
if cpv_norev == catpkgsplit(cpv)[:-1]:
dep_uri = bintree.get_pkgindex_uri(cpv)
if uri == dep_uri:
break
if uri and uri != dep_uri:
# 1) Remote binary package is invalid because it was
# built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
if parent.installed and root_slot not in self.reinstall_list:
try:
(bin_build_time,) = bindb.aux_get(parent.cpv, ["BUILD_TIME"])
except KeyError:
continue
if bin_build_time != str(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
def trigger_rebuilds(self):
"""
Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
depends on pkgA at both build-time and run-time, pkgB needs to be
rebuilt.
"""
need_restart = False
graph = self._graph
build_deps = {}
leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
# We'll have to drop an edge. This should be quite rare.
leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
# This can be triggered by circular dependencies.
continue
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
# have been processed, the build_deps will be completely filled in,
# and self.rebuild_list / self.reinstall_list will tell us whether
# any of our children need to be rebuilt or reinstalled.
if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
class _use_changes(tuple):
def __new__(cls, new_use, new_changes, required_use_satisfied=True):
obj = tuple.__new__(cls, [new_use, new_changes])
obj.required_use_satisfied = required_use_satisfied
return obj
class _dynamic_depgraph_config:
"""
``dynamic_depgraph_config`` is an object that is used to collect settings and important data structures that are
used in calculating Portage dependencies. Each depgraph created by the depgraph.py code gets its own
``dynamic_depgraph_config``, whereas ``frozen_depgraph_config`` is shared among all depgraphs.
**self.digraph**
Of particular importance is the instance variable ``self.digraph``, which is an instance of
``portage.util.digraph``, a directed graph data structure. ``portage.util.digraph`` is used for a variety of
purposes in the Portage codebase, but in this particular scenario as ``self.digraph``, it is used to create a
dependency tree of Portage packages. So for ``self.digraph``, each *node* of the directed graph is a ``Package``,
while *edges* connect nodes and each edge can have a Priority. The Priority setting is used to help resolve
circular dependencies, and should be interpreted in the direction of parent to child.
Conceptually, think of ``self.digraph`` as containing user-specified packages or sets at the very top, with
dependencies hanging down as children, and dependencies of those children as children of children, etc. The depgraph
is intended to model dependency relationships, not the order that packages should be installed.
**resolving the digraph**
To convert a digraph to an ordered list of packages to merge in an order where all dependencies are properly
satisfied, we would first start by looking at leaf nodes, which are nodes that have no dependencies of their own. We
could then traverse the digraph upwards from the leaf nodes, towards the parents. Along the way, depending on emerge
options, we could make decisions what packages should be installed or rebuilt. This is how ``self.digraph`` is used
in the code.
**digraph creation**
The ``depgraph.py`` code creates the digraph by first adding emerge arguments to the digraph as the main parents,
so if ``@world`` is specified, then the world set is added as the main parents. Then, ``emerge`` will determine
the dependencies of these packages, and depending on what options are passed to ``emerge``, will look at installed
packages, binary packages and available ebuilds that could be merged to satisfy dependencies, and these will be
added as children in the digraph. Children of children will be added as dependencies as needed, depending on the
depth setting used by ``emerge``.
As the digraph is created, it is perfectly fine for Packages to be added to the digraph that conflict with one
another. After the digraph has been fully populated to the necessary depth, code within ``depgraph.py`` will
identify any conflicts that are modeled within the digraph and determine the best way to handle them.
"""
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
# contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
# Do not initialize this until the depgraph _validate_blockers
# method is called, so that the _in_blocker_conflict method can
# assert that _validate_blockers has been called first.
self._blocked_pkgs = None
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._highest_pkg_cache_cp_map = {}
self._flatten_atoms_cache = {}
self._changed_deps_pkgs = {}
# Binary packages that have been rejected because their USE
# didn't match the user's config. It maps packages to a set
# of flags causing the rejection.
self.ignored_binaries = {}
self._circular_dependency = backtrack_parameters.circular_dependency
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._slot_operator_replace_installed = (
backtrack_parameters.slot_operator_replace_installed
)
self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
self._need_config_reload = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
self._buildpkgonly_deps_unsatisfied = False
self._quickpkg_direct_deps_unsatisfied = False
self._autounmask = self.myparams["autounmask"]
self._displayed_autounmask = False
self._success_without_autounmask = False
self._autounmask_backtrack_disabled = False
self._required_use_unsatisfied = False
self._traverse_ignored_deps = False
self._complete_mode = False
self._slot_operator_deps = {}
self._installed_sonames = collections.defaultdict(list)
self._package_tracker = PackageTracker(
soname_deps=depgraph._frozen_config.soname_deps_enabled
)
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
dep_check_iface = _dep_check_graph_interface(
will_replace_child=depgraph._will_replace_child,
removal_action="remove" in myparams,
want_update_pkg=depgraph._want_update_pkg,
)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
self._graph_trees[myroot]["graph_interface"] = dep_check_iface
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = depgraph._frozen_config.trees[
myroot
]["vartree"]
self._filtered_trees[myroot]["graph_interface"] = dep_check_iface
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
else:
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(
depgraph._frozen_config._trees_orig[myroot][
"vartree"
].dbapi._aux_cache_keys
)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph:
# Represents the depth of a node that is unreachable from explicit
# user arguments (or their deep dependencies). Such nodes are pulled
# in by the _complete_graph method.
_UNREACHABLE_DEPTH = object()
pkg_tree_map = RootConfig.pkg_tree_map
def __init__(
self,
settings,
trees,
myopts,
myparams,
spinner,
frozen_config=None,
backtrack_parameters=BacktrackParameter(),
allow_backtracking=False,
):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(
settings, trees, myopts, myparams, spinner
)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(
self, myparams, allow_backtracking, backtrack_parameters
)
self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
self._event_loop = asyncio._safe_loop()
self._select_atoms_parent = None
self.query = UserQuery(myopts).query
def _index_binpkgs(self):
for root in self._frozen_config.trees:
bindb = self._frozen_config.trees[root]["bintree"].dbapi
if bindb._provides_index:
# don't repeat this when backtracking
continue
root_config = self._frozen_config.roots[root]
for cpv in self._frozen_config._trees_orig[root]["bintree"].dbapi.cpv_all():
bindb._provides_inject(self._pkg(cpv, "binary", root_config))
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
dynamic_deps = "dynamic_deps" in self._dynamic_config.myparams
preload_installed_pkgs = "--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = portage.config(
clone=fake_vartree.settings
)
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
if not dynamic_deps:
for pkg in vardb:
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
else:
max_jobs = self._frozen_config.myopts.get("--jobs")
max_load = self._frozen_config.myopts.get("--load-average")
scheduler = TaskScheduler(
self._dynamic_deps_preload(fake_vartree),
max_jobs=max_jobs,
max_load=max_load,
event_loop=fake_vartree._portdb._event_loop,
)
scheduler.start()
scheduler.wait()
self._dynamic_config._vdb_loaded = True
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
for pkg in fake_vartree.dbapi:
self._spinner_update()
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
ebuild_path, repo_path = portdb.findname2(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
fake_vartree.dynamic_deps_preload(pkg, None)
continue
metadata, ebuild_hash = portdb._pull_valid_cache(
pkg.cpv, ebuild_path, repo_path
)
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
else:
proc = EbuildMetadataPhase(
cpv=pkg.cpv,
ebuild_hash=ebuild_hash,
portdb=portdb,
repo_path=repo_path,
settings=portdb.doebuild_settings,
)
proc.addExitListener(self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
class _dynamic_deps_proc_exit:
__slots__ = ("_pkg", "_fake_vartree")
def __init__(self, pkg, fake_vartree):
self._pkg = pkg
self._fake_vartree = fake_vartree
def __call__(self, proc):
metadata = None
if proc.returncode == os.EX_OK:
metadata = proc.metadata
self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _compute_abi_rebuild_info(self):
"""
Fill self._forced_rebuilds with packages that cause rebuilds.
"""
debug = "--debug" in self._frozen_config.myopts
installed_sonames = self._dynamic_config._installed_sonames
package_tracker = self._dynamic_config._package_tracker
# Get all atoms that might have caused a forced rebuild.
atoms = {}
for s in self._dynamic_config._initial_arg_list:
if s.force_reinstall:
root = s.root_config.root
atoms.setdefault(root, set()).update(s.pset)
if debug:
writemsg_level(
"forced reinstall atoms:\n", level=logging.DEBUG, noiselevel=-1
)
for root in atoms:
writemsg_level(
" root: %s\n" % root, level=logging.DEBUG, noiselevel=-1
)
for atom in atoms[root]:
writemsg_level(
" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1
)
writemsg_level("\n\n", level=logging.DEBUG, noiselevel=-1)
# Go through all slot operator deps and check if one of these deps
# has a parent that is matched by one of the atoms from above.
forced_rebuilds = {}
for root, rebuild_atoms in atoms.items():
for slot_atom in rebuild_atoms:
inst_pkg, reinst_pkg = self._select_pkg_from_installed(root, slot_atom)
if inst_pkg is reinst_pkg or reinst_pkg is None:
continue
if inst_pkg is not None and inst_pkg.requires is not None:
for atom in inst_pkg.requires:
initial_providers = installed_sonames.get((root, atom))
if initial_providers is None:
continue
final_provider = next(package_tracker.match(root, atom), None)
if final_provider:
continue
for provider in initial_providers:
# Find the replacement child.
child = next(
(
pkg
for pkg in package_tracker.match(
root, provider.slot_atom
)
if not pkg.installed
),
None,
)
if child is None:
continue
forced_rebuilds.setdefault(root, {}).setdefault(
child, set()
).add(inst_pkg)
# Generate pseudo-deps for any slot-operator deps of
# inst_pkg. Its deps aren't in _slot_operator_deps
# because it hasn't been added to the graph, but we
# are interested in any rebuilds that it triggered.
built_slot_op_atoms = []
if inst_pkg is not None:
selected_atoms = self._select_atoms_probe(inst_pkg.root, inst_pkg)
for atom in selected_atoms:
if atom.slot_operator_built:
built_slot_op_atoms.append(atom)
if not built_slot_op_atoms:
continue
# Use a cloned list, since we may append to it below.
deps = self._dynamic_config._slot_operator_deps.get(
(root, slot_atom), []
)[:]
if built_slot_op_atoms and reinst_pkg is not None:
for child in self._dynamic_config.digraph.child_nodes(reinst_pkg):
if child.installed:
continue
for atom in built_slot_op_atoms:
# NOTE: Since atom comes from inst_pkg, and
# reinst_pkg is the replacement parent, there's
# no guarantee that atom will completely match
# child. So, simply use atom.cp and atom.slot
# for matching.
if atom.cp != child.cp:
continue
if atom.slot and atom.slot != child.slot:
continue
deps.append(
Dependency(
atom=atom,
child=child,
root=child.root,
parent=reinst_pkg,
)
)
for dep in deps:
if dep.child.installed:
# Find the replacement child.
child = next(
(
pkg
for pkg in self._dynamic_config._package_tracker.match(
dep.root, dep.child.slot_atom
)
if not pkg.installed
),
None,
)
if child is None:
continue
inst_child = dep.child
else:
child = dep.child
inst_child = self._select_pkg_from_installed(
child.root, child.slot_atom
)[0]
# Make sure the child's slot/subslot has changed. If it
# hasn't, then another child has forced this rebuild.
if (
inst_child
and inst_child.slot == child.slot
and inst_child.sub_slot == child.sub_slot
):
continue
if dep.parent.installed:
# Find the replacement parent.
parent = next(
(
pkg
for pkg in self._dynamic_config._package_tracker.match(
dep.parent.root, dep.parent.slot_atom
)
if not pkg.installed
),
None,
)
if parent is None:
continue
else:
parent = dep.parent
# The child has forced a rebuild of the parent
forced_rebuilds.setdefault(root, {}).setdefault(child, set()).add(
parent
)
if debug:
writemsg_level(
"slot operator dependencies:\n", level=logging.DEBUG, noiselevel=-1
)
for (
root,
slot_atom,
), deps in self._dynamic_config._slot_operator_deps.items():
writemsg_level(
" (%s, %s)\n" % (root, slot_atom),
level=logging.DEBUG,
noiselevel=-1,
)
for dep in deps:
writemsg_level(
" parent: %s\n" % dep.parent,
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level(
" child: %s (%s)\n" % (dep.child, dep.priority),
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level("\n\n", level=logging.DEBUG, noiselevel=-1)
writemsg_level("forced rebuilds:\n", level=logging.DEBUG, noiselevel=-1)
for root in forced_rebuilds:
writemsg_level(
" root: %s\n" % root, level=logging.DEBUG, noiselevel=-1
)
for child in forced_rebuilds[root]:
writemsg_level(
" child: %s\n" % child, level=logging.DEBUG, noiselevel=-1
)
for parent in forced_rebuilds[root][child]:
writemsg_level(
" parent: %s\n" % parent,
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level("\n\n", level=logging.DEBUG, noiselevel=-1)
self._forced_rebuilds = forced_rebuilds
def _show_abi_rebuild_info(self):
if not self._forced_rebuilds:
return
writemsg_stdout(
"\nThe following packages are causing rebuilds:\n\n", noiselevel=-1
)
for root in self._forced_rebuilds:
for child in self._forced_rebuilds[root]:
writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
for parent in self._forced_rebuilds[root][child]:
writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
def _eliminate_ignored_binaries(self):
"""
Eliminate any package from self._dynamic_config.ignored_binaries
for which a more optimal alternative exists.
"""
for pkg in list(self._dynamic_config.ignored_binaries):
for selected_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom
):
if selected_pkg > pkg:
self._dynamic_config.ignored_binaries.pop(pkg)
break
# NOTE: The Package.__ge__ implementation accounts for
# differences in build_time, so the warning about "ignored"
# packages will be triggered if both packages are the same
# version and selected_pkg is not the most recent build.
if selected_pkg.type_name == "binary" and selected_pkg >= pkg:
self._dynamic_config.ignored_binaries.pop(pkg)
break
if (
selected_pkg.installed
and selected_pkg.cpv == pkg.cpv
and selected_pkg.build_time == pkg.build_time
):
# We don't care about ignored binaries when an
# identical installed instance is selected to
# fill the slot.
self._dynamic_config.ignored_binaries.pop(pkg)
break
def _ignored_binaries_autounmask_backtrack(self):
"""
Check if there are ignored binaries that would have been
accepted with the current autounmask USE changes.
@rtype: bool
@return: True if there are unnecessary rebuilds that
can be avoided by backtracking
"""
if not all(
[
self._dynamic_config._allow_backtracking,
self._dynamic_config._needed_use_config_changes,
self._dynamic_config.ignored_binaries,
]
):
return False
self._eliminate_ignored_binaries()
# _eliminate_ignored_binaries may have eliminated
# all of the ignored binaries
if not self._dynamic_config.ignored_binaries:
return False
use_changes = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
for pkg, (
new_use,
changes,
) in self._dynamic_config._needed_use_config_changes.items():
if pkg in self._dynamic_config.digraph:
use_changes[pkg.root][pkg.slot_atom] = (pkg, new_use)
for pkg in self._dynamic_config.ignored_binaries:
selected_pkg, new_use = use_changes[pkg.root].get(
pkg.slot_atom, (None, None)
)
if new_use is None:
continue
if new_use != pkg.use.enabled:
continue
if selected_pkg > pkg:
continue
return True
return False
def _changed_deps_report(self):
"""
Report ebuilds for which the ebuild dependencies have
changed since the installed instance was built. This is
completely silent in the following cases:
* --changed-deps or --dynamic-deps is enabled
* none of the packages with changed deps are in the graph
"""
if (
self._dynamic_config.myparams.get("changed_deps", "n") == "y"
or "dynamic_deps" in self._dynamic_config.myparams
):
return
report_pkgs = []
for pkg, ebuild in self._dynamic_config._changed_deps_pkgs.items():
if pkg.repo != ebuild.repo:
continue
report_pkgs.append((pkg, ebuild))
if not report_pkgs:
return
# TODO: Detect and report various issues:
# - packages with unsatisfiable dependencies
# - packages involved directly in slot or blocker conflicts
# - direct parents of conflict packages
# - packages that prevent upgrade of dependencies to latest versions
graph = self._dynamic_config.digraph
in_graph = False
for pkg, ebuild in report_pkgs:
if pkg in graph:
in_graph = True
break
# Packages with changed deps are harmless if they're not in the
# graph, so it's safe to silently ignore them. This suppresses
# noise for the unaffected user, even though some of the changed
# dependencies might be worthy of revision bumps.
if not in_graph:
return
writemsg(
"\n%s\n\n"
% colorize(
"WARN",
"!!! Detected ebuild dependency change(s) without revision bump:",
),
noiselevel=-1,
)
for pkg, ebuild in report_pkgs:
writemsg(" %s::%s" % (pkg.cpv, pkg.repo), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = []
if "--quiet" not in self._frozen_config.myopts:
msg.extend(
[
"",
"NOTE: Refer to the following page for more information about dependency",
" change(s) without revision bump:",
"",
" https://wiki.gentoo.org/wiki/Project:Portage/Changed_Deps",
"",
" In order to suppress reports about dependency changes, add",
" --changed-deps-report=n to the EMERGE_DEFAULT_OPTS variable in",
" '/etc/portage/make.conf'.",
]
)
# Include this message for --quiet mode, since the user may be experiencing
# problems that are solvable by using --changed-deps.
msg.extend(
[
"",
"HINT: In order to avoid problems involving changed dependencies, use the",
" --changed-deps option to automatically trigger rebuilds when changed",
" dependencies are detected. Refer to the emerge man page for more",
" information about this option.",
]
)
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
match the user's config.
"""
if (
not self._dynamic_config.ignored_binaries
or "--quiet" in self._frozen_config.myopts
):
return
self._eliminate_ignored_binaries()
ignored_binaries = {}
for pkg in self._dynamic_config.ignored_binaries:
for reason, info in self._dynamic_config.ignored_binaries[pkg].items():
ignored_binaries.setdefault(reason, {})[pkg] = info
if self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "n"):
ignored_binaries.pop("respect_use", None)
if self._dynamic_config.myparams.get("binpkg_changed_deps") in ("y", "n"):
ignored_binaries.pop("changed_deps", None)
if not ignored_binaries:
return
self._show_merge_list()
if "respect_use" in ignored_binaries:
self._show_ignored_binaries_respect_use(ignored_binaries["respect_use"])
if "changed_deps" in ignored_binaries:
self._show_ignored_binaries_changed_deps(ignored_binaries["changed_deps"])
def _show_ignored_binaries_respect_use(self, respect_use):
writemsg(
"\n!!! The following binary packages have been ignored "
+ "due to non matching USE:\n\n",
noiselevel=-1,
)
for pkg, flags in respect_use.items():
flag_display = []
for flag in sorted(flags):
if flag not in pkg.use.enabled:
flag = "-" + flag
flag_display.append(flag)
flag_display = " ".join(flag_display)
# The user can paste this line into package.use
writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-respect-use=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-respect-use=y will silence this warning.",
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
writemsg(
"\n!!! The following binary packages have been "
"ignored due to changed dependencies:\n\n",
noiselevel=-1,
)
for pkg in changed_deps:
msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % pkg.root
writemsg("%s\n" % msg, noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-changed-deps=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-changed-deps=y will silence this warning.",
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in chain(
self._dynamic_config._runtime_pkg_mask.items(),
self._dynamic_config._conflict_missed_update.items(),
):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
missed_update = True
any_selected = False
for chosen_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom
):
any_selected = True
if chosen_pkg > pkg or (
not chosen_pkg.installed and chosen_pkg.version == pkg.version
):
missed_update = False
break
if any_selected and missed_update:
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
return missed_updates
def _show_missed_update(self):
missed_updates = self._get_missed_updates()
if not missed_updates:
return
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type, []).append((pkg, parent_atoms))
if (
"--quiet" in self._frozen_config.myopts
and "--debug" not in self._frozen_config.myopts
):
missed_update_types.pop("slot conflict", None)
missed_update_types.pop("missing dependency", None)
self._show_missed_update_slot_conflicts(
missed_update_types.get("slot conflict")
)
self._show_missed_update_unsatisfied_dep(
missed_update_types.get("missing dependency")
)
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
try:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(
root, atom, myparent=parent, check_backtrack=True
)
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
continue
writemsg(
"\n!!! The following update has been skipped "
+ "due to unsatisfied dependencies:\n\n",
noiselevel=-1,
)
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n\n", noiselevel=-1)
selected_pkg = next(
self._dynamic_config._package_tracker.match(pkg.root, pkg.slot_atom),
None,
)
writemsg(" selected: %s\n" % (selected_pkg,), noiselevel=-1)
writemsg(
" skipped: %s (see unsatisfied dependency below)\n" % (pkg,),
noiselevel=-1,
)
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
writemsg("\n", noiselevel=-1)
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
writemsg(
"\n!!! The following update(s) have been skipped "
+ "due to unsatisfied dependencies\n"
+ "!!! triggered by backtracking:\n\n",
noiselevel=-1,
)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
msg = [
"\nWARNING: One or more updates/rebuilds have been "
"skipped due to a dependency conflict:\n\n"
]
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
msg.append(indent)
msg.append(
"%s %s"
% (
pkg,
pkg_use_display(
pkg,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg),
),
)
)
msg.append(" conflicts with\n")
for parent, atom in parent_atoms:
if isinstance(parent, (PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(2 * indent)
msg.append(str(parent))
msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
atom, marker = format_unmatched_atom(
pkg, atom, self._pkg_use_enabled
)
if isinstance(parent, Package):
use_display = pkg_use_display(
parent,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(parent),
)
else:
use_display = ""
msg.append(2 * indent)
msg.append("%s required by %s %s\n" % (atom, parent, use_display))
msg.append(2 * indent)
msg.append(marker)
msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases.
"""
if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
if self._dynamic_config._slot_conflict_handler is None:
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
handler = self._dynamic_config._slot_conflict_handler
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
return
if "--quiet" in self._frozen_config.myopts:
return
msg = [
"It may be possible to solve this problem "
"by using package.mask to prevent one of "
"those packages from being selected. "
"However, it is also possible that conflicting "
"dependencies exist such that they are impossible to "
"satisfy simultaneously. If such a conflict exists in "
"the dependencies of two different packages, then those "
"packages can not be installed simultaneously."
]
backtrack_opt = self._frozen_config.myopts.get("--backtrack")
if not self._dynamic_config._allow_backtracking and (
backtrack_opt is None or (backtrack_opt > 0 and backtrack_opt < 30)
):
msg.append(
" You may want to try a larger value of the "
"--backtrack option, such as --backtrack=30, "
"in order to see if that will solve this conflict "
"automatically."
)
for line in textwrap.wrap("".join(msg), 70):
writemsg(line + "\n", noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = (
"For more information, see MASKED PACKAGES "
"section in the emerge man page or refer "
"to the Gentoo Handbook."
)
for line in textwrap.wrap(msg, 70):
writemsg(line + "\n", noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _solve_non_slot_operator_slot_conflicts(self):
"""
This function solves slot conflicts which can
be solved by simply choosing one of the conflicting
and removing all the other ones.
It is able to solve somewhat more complex cases where
conflicts can only be solved simultaniously.
"""
debug = "--debug" in self._frozen_config.myopts
# List all conflicts. Ignore those that involve slot operator rebuilds
# as the logic there needs special slot conflict behavior which isn't
# provided by this function.
conflicts = []
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
slot_key = conflict.root, conflict.atom
if slot_key not in self._dynamic_config._slot_operator_replace_installed:
conflicts.append(conflict)
if not conflicts:
return
if debug:
writemsg_level(
"\n!!! Slot conflict handler started.\n",
level=logging.DEBUG,
noiselevel=-1,
)
# Get a set of all conflicting packages.
conflict_pkgs = set()
for conflict in conflicts:
conflict_pkgs.update(conflict)
# Get the list of other packages which are only
# required by conflict packages.
indirect_conflict_candidates = set()
for pkg in conflict_pkgs:
indirect_conflict_candidates.update(
self._dynamic_config.digraph.child_nodes(pkg)
)
indirect_conflict_candidates -= conflict_pkgs
indirect_conflict_pkgs = set()
while indirect_conflict_candidates:
pkg = indirect_conflict_candidates.pop()
only_conflict_parents = True
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
only_conflict_parents = False
break
if not only_conflict_parents:
continue
indirect_conflict_pkgs.add(pkg)
for child in self._dynamic_config.digraph.child_nodes(pkg):
if child in conflict_pkgs or child in indirect_conflict_pkgs:
continue
indirect_conflict_candidates.add(child)
# Create a graph containing the conflict packages
# and a special 'non_conflict_node' that represents
# all non-conflict packages.
conflict_graph = digraph()
non_conflict_node = "(non-conflict package)"
conflict_graph.add(non_conflict_node, None)
for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
conflict_graph.add(pkg, None)
# Add parent->child edges for each conflict package.
# Parents, which aren't conflict packages are represented
# by 'non_conflict_node'.
# If several conflicting packages are matched, but not all,
# add a tuple with the matched packages to the graph.
class or_tuple(tuple):
"""
Helper class for debug printing.
"""
def __str__(self):
return "(%s)" % ",".join(str(pkg) for pkg in self)
non_matching_forced = set()
for conflict in conflicts:
if debug:
writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
writemsg_level(
" root: %s\n" % conflict.root,
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level(
" atom: %s\n" % conflict.atom,
level=logging.DEBUG,
noiselevel=-1,
)
for pkg in conflict:
writemsg_level(
" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1
)
all_parent_atoms = set()
highest_pkg = None
inst_pkg = None
for pkg in conflict:
if pkg.installed:
inst_pkg = pkg
if highest_pkg is None or highest_pkg < pkg:
highest_pkg = pkg
all_parent_atoms.update(self._dynamic_config._parent_atoms.get(pkg, []))
for parent, atom in all_parent_atoms:
is_arg_parent = inst_pkg is not None and not self._want_installed_pkg(
inst_pkg
)
is_non_conflict_parent = (
parent not in conflict_pkgs and parent not in indirect_conflict_pkgs
)
if debug:
writemsg_level(
" parent: %s\n" % parent,
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level(
" arg, non-conflict: %s, %s\n"
% (is_arg_parent, is_non_conflict_parent),
level=logging.DEBUG,
noiselevel=-1,
)
writemsg_level(
" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1
)
if is_non_conflict_parent:
parent = non_conflict_node
matched = []
for pkg in conflict:
if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))) and not (
is_arg_parent and pkg.installed
):
matched.append(pkg)
if debug:
for match in matched:
writemsg_level(
" match: %s\n" % match,
level=logging.DEBUG,
noiselevel=-1,
)
if len(matched) > 1:
# Even if all packages match, this parent must still
# be added to the conflict_graph. Otherwise, we risk
# removing all of these packages from the depgraph,
# which could cause a missed update (bug #522084).
conflict_graph.add(or_tuple(matched), parent)
elif len(matched) == 1:
conflict_graph.add(matched[0], parent)
else:
# This typically means that autounmask broke a
# USE-dep, but it could also be due to the slot
# not matching due to multislot (bug #220341).
# Either way, don't try to solve this conflict.
# Instead, force them all into the graph so that
# they are protected from removal.
non_matching_forced.update(conflict)
if debug:
for pkg in conflict:
writemsg_level(
" non-match: %s\n" % pkg,
level=logging.DEBUG,
noiselevel=-1,
)
for pkg in indirect_conflict_pkgs:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
parent = non_conflict_node
conflict_graph.add(pkg, parent)
if debug:
writemsg_level(
"\n!!! Slot conflict graph:\n", level=logging.DEBUG, noiselevel=-1
)
conflict_graph.debug_print()
# Now select required packages. Collect them in the
# 'forced' set.
forced = {non_conflict_node}
forced |= non_matching_forced
unexplored = {non_conflict_node}
# or_tuples get special handling. We first explore
# all packages in the hope of having forced one of
# the packages in the tuple. This way we don't have
# to choose one.
unexplored_tuples = set()
explored_nodes = set()
while unexplored:
while True:
try:
node = unexplored.pop()
except KeyError:
break
for child in conflict_graph.child_nodes(node):
# Don't explore a node more than once, in order
# to avoid infinite recursion. The forced set
# cannot be used for this purpose, since it can
# contain unexplored nodes from non_matching_forced.
if child in explored_nodes:
continue
explored_nodes.add(child)
forced.add(child)
if isinstance(child, Package):
unexplored.add(child)
else:
unexplored_tuples.add(child)
# Now handle unexplored or_tuples. Move on with packages
# once we had to choose one.
while unexplored_tuples:
nodes = unexplored_tuples.pop()
if any(node in forced for node in nodes):
# At least one of the packages in the
# tuple is already forced, which means the
# dependency represented by this tuple
# is satisfied.
continue
# We now have to choose one of packages in the tuple.
# In theory one could solve more conflicts if we'd be
# able to try different choices here, but that has lots
# of other problems. For now choose the package that was
# pulled first, as this should be the most desirable choice
# (otherwise it wouldn't have been the first one).
forced.add(nodes[0])
unexplored.add(nodes[0])
break
# Remove 'non_conflict_node' and or_tuples from 'forced'.
forced = {pkg for pkg in forced if isinstance(pkg, Package)}
# Add dependendencies of forced packages.
stack = list(forced)
traversed = set()
while stack:
pkg = stack.pop()
traversed.add(pkg)
for child in conflict_graph.child_nodes(pkg):
if isinstance(child, Package) and child not in traversed:
forced.add(child)
stack.append(child)
non_forced = {pkg for pkg in conflict_pkgs if pkg not in forced}
if debug:
writemsg_level(
"\n!!! Slot conflict solution:\n", level=logging.DEBUG, noiselevel=-1
)
for conflict in conflicts:
writemsg_level(
" Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
level=logging.DEBUG,
noiselevel=-1,
)
for pkg in conflict:
if pkg in forced:
writemsg_level(
" keep: %s\n" % pkg,
level=logging.DEBUG,
noiselevel=-1,
)
else:
writemsg_level(
" remove: %s\n" % pkg,
level=logging.DEBUG,
noiselevel=-1,
)
broken_packages = set()
for pkg in non_forced:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if isinstance(parent, Package) and parent not in non_forced:
# Non-forcing set args are expected to be a parent of all
# packages in the conflict.
broken_packages.add(parent)
self._remove_pkg(pkg)
# Process the dependencies of choosen conflict packages
# again to properly account for blockers.
broken_packages |= forced
# Filter out broken packages which have been removed during
# recursive removal in self._remove_pkg.
broken_packages = [
pkg
for pkg in broken_packages
if pkg in broken_packages
and self._dynamic_config._package_tracker.contains(pkg, installed=False)
]
self._dynamic_config._dep_stack.extend(broken_packages)
if broken_packages:
# Process dependencies. This cannot fail because we just ensured that
# the remaining packages satisfy all dependencies.
self._create_graph()
# Record missed updates.
for conflict in conflicts:
for pkg in conflict:
if pkg not in non_forced:
continue
for other in conflict:
if other is pkg:
continue
for parent, atom in self._dynamic_config._parent_atoms.get(
other, []
):
if not atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
self._dynamic_config._conflict_missed_update[
pkg
].setdefault("slot conflict", set())
self._dynamic_config._conflict_missed_update[pkg][
"slot conflict"
].add((parent, atom))
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
_complete_graph should complete the graph before this method
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
self._solve_non_slot_operator_slot_conflicts()
if not self._validate_blockers():
# Blockers don't trigger the _skip_restart flag, since
# backtracking may solve blockers when it solves slot
# conflicts (or by blind luck).
raise self._unknown_internal_error()
# Both _process_slot_conflict and _slot_operator_trigger_reinstalls
# can call _slot_operator_update_probe, which requires that
# self._dynamic_config._blocked_pkgs has been initialized by a
# call to the _validate_blockers method.
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
self._process_slot_conflict(conflict)
if self._dynamic_config._allow_backtracking:
self._slot_operator_trigger_reinstalls()
def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
root = conflict.root
slot_atom = conflict.atom
slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
slot_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
continue
slot_parent_atoms.update(parent_atoms)
conflict_pkgs = []
conflict_atoms = {}
for pkg in slot_nodes:
if (
self._dynamic_config._allow_backtracking
and pkg in self._dynamic_config._runtime_pkg_mask
):
if debug:
writemsg_level(
"!!! backtracking loop detected: %s %s\n"
% (pkg, self._dynamic_config._runtime_pkg_mask[pkg]),
level=logging.DEBUG,
noiselevel=-1,
)
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
all_match = True
for parent_atom in slot_parent_atoms:
if parent_atom in parent_atoms:
continue
parent, atom = parent_atom
if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
parent_atoms.add(parent_atom)
else:
all_match = False
conflict_atoms.setdefault(parent_atom, set()).add(pkg)
if not all_match:
conflict_pkgs.append(pkg)
if (
conflict_pkgs
and self._dynamic_config._allow_backtracking
and not self._accept_blocker_conflicts()
):
remaining = []
for pkg in conflict_pkgs:
if self._slot_conflict_backtrack_abi(pkg, slot_nodes, conflict_atoms):
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config.setdefault("slot_conflict_abi", set()).add(pkg)
else:
remaining.append(pkg)
if remaining:
self._slot_confict_backtrack(
root, slot_atom, slot_parent_atoms, remaining
)
def _slot_confict_backtrack(self, root, slot_atom, all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
existing_node = next(
self._dynamic_config._package_tracker.match(
root, slot_atom, installed=False
)
)
if existing_node not in conflict_pkgs:
# Even though all parent atoms match existing_node,
# consider masking it in order to avoid a missed update
# as in bug 692746.
conflict_pkgs.append(existing_node)
# In order to avoid a missed update, first mask lower versions
# that conflict with higher versions (the backtracker visits
# these in reverse order).
conflict_pkgs.sort(reverse=True)
backtrack_data = []
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
# match to_be_masked.
parent_atoms = self._dynamic_config._parent_atoms.get(to_be_masked, set())
conflict_atoms = set(
parent_atom
for parent_atom in all_parents
if parent_atom not in parent_atoms
)
similar_pkgs = []
if conflict_atoms:
# If the conflict has been triggered by a missed update, then
# we can avoid excessive backtracking if we detect similar missed
# updates and mask them as part of the same backtracking choice.
for similar_pkg in self._iter_similar_available(
to_be_masked, slot_atom
):
if similar_pkg in conflict_pkgs:
continue
similar_conflict_atoms = []
for parent_atom in conflict_atoms:
parent, atom = parent_atom
if not atom.match(similar_pkg):
similar_conflict_atoms.append(parent_atom)
if similar_conflict_atoms:
similar_pkgs.append((similar_pkg, set(similar_conflict_atoms)))
similar_pkgs.append((to_be_masked, conflict_atoms))
backtrack_data.append(tuple(similar_pkgs))
# Prefer choices that minimize conflict atoms. This is intended
# to take precedence over the earlier package version sort. The
# package version sort is still needed or else choices for the
# testOverlapSlotConflict method of VirtualMinimizeChildrenTestCase
# become non-deterministic.
backtrack_data.sort(key=lambda similar_pkgs: len(similar_pkgs[-1][1]))
to_be_masked = [item[0] for item in backtrack_data[-1]]
self._dynamic_config._backtrack_infos.setdefault("slot conflict", []).append(
backtrack_data
)
self._dynamic_config._need_restart = True
if debug:
msg = [
"",
"",
"backtracking due to slot conflict:",
" first package: %s" % existing_node,
" package(s) to mask: %s" % str(to_be_masked),
" slot: %s" % slot_atom,
" parents: %s"
% ", ".join("(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents),
"",
]
writemsg_level(
"".join("%s\n" % l for l in msg), noiselevel=-1, level=logging.DEBUG
)
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
if not isinstance(parent, Package):
continue
if not parent.built:
continue
if not atom.soname and not (atom.package and atom.slot_operator_built):
continue
for other_pkg in slot_nodes:
if other_pkg in conflict_pkgs:
continue
dep = Dependency(
atom=atom, child=other_pkg, parent=parent, root=pkg.root
)
new_dep = self._slot_operator_update_probe_slot_conflict(dep)
if new_dep is not None:
self._slot_operator_update_backtrack(dep, new_dep=new_dep)
found_update = True
return found_update
def _slot_change_probe(self, dep):
"""
@rtype: bool
@return: True if dep.child should be rebuilt due to a change
in sub-slot (without revbump, as in bug #456208).
"""
if not (
isinstance(dep.parent, Package) and not dep.parent.built and dep.child.built
):
return None
root_config = self._frozen_config.roots[dep.root]
matches = []
try:
matches.append(
self._pkg(dep.child.cpv, "ebuild", root_config, myrepo=dep.child.repo)
)
except PackageNotFound:
pass
for unbuilt_child in chain(
matches,
self._iter_match_pkgs(
root_config, "ebuild", Atom("=%s" % (dep.child.cpv,))
),
):
if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(
unbuilt_child, modified_use=self._pkg_use_enabled(unbuilt_child)
):
continue
if not self._pkg_visibility_check(unbuilt_child):
continue
break
else:
return None
if (
unbuilt_child.slot == dep.child.slot
and unbuilt_child.sub_slot == dep.child.sub_slot
):
return None
return unbuilt_child
def _slot_change_backtrack(self, dep, new_child_slot):
child = dep.child
if "--debug" in self._frozen_config.myopts:
msg = [
"",
"",
"backtracking due to slot/sub-slot change:",
" child package: %s" % child,
" child slot: %s/%s" % (child.slot, child.sub_slot),
" new child: %s" % new_child_slot,
" new child slot: %s/%s"
% (new_child_slot.slot, new_child_slot.sub_slot),
" parent package: %s" % dep.parent,
" atom: %s" % dep.atom,
"",
]
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not child.installed:
masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
reinstalls.add((child.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed", set()).update(
reinstalls
)
self._dynamic_config._need_restart = True
def _slot_operator_update_backtrack(self, dep, new_child_slot=None, new_dep=None):
if new_child_slot is None:
child = dep.child
else:
child = new_child_slot
if "--debug" in self._frozen_config.myopts:
msg = [
"",
"",
"backtracking due to missed slot abi update:",
" child package: %s" % child,
]
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
if new_dep is not None:
msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
abi_masks = {}
if new_child_slot is None:
if not child.installed:
abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
if new_dep is not None:
replacement_atom = new_dep.parent.slot_atom
else:
replacement_atom = self._replace_installed_atom(dep.parent)
if replacement_atom is not None:
abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
config.setdefault("slot_operator_replace_installed", set()).update(
abi_reinstalls
)
self._dynamic_config._need_restart = True
def _slot_operator_update_probe_slot_conflict(self, dep):
new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
if new_dep is not None:
return new_dep
if self._dynamic_config._autounmask is True:
for autounmask_level in self._autounmask_levels():
new_dep = self._slot_operator_update_probe(
dep, slot_conflict=True, autounmask_level=autounmask_level
)
if new_dep is not None:
return new_dep
return None
def _slot_operator_update_probe(
self, dep, new_child_slot=False, slot_conflict=False, autounmask_level=None
):
"""
slot/sub-slot := operators tend to prevent updates from getting pulled in,
since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
to higher versions, since the logic required to detect when a
downgrade would be desirable is not implemented.
"""
if (
dep.child.installed
and self._frozen_config.excluded_pkgs.findAtomForPackage(
dep.child, modified_use=self._pkg_use_enabled(dep.child)
)
):
return None
if (
dep.parent.installed
and self._frozen_config.excluded_pkgs.findAtomForPackage(
dep.parent, modified_use=self._pkg_use_enabled(dep.parent)
)
):
return None
debug = "--debug" in self._frozen_config.myopts
selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
want_downgrade_parent = None
def check_reverse_dependencies(
existing_pkg, candidate_pkg, replacement_parent=None
):
"""
Check if candidate_pkg satisfies all of existing_pkg's non-
slot operator parents.
"""
built_slot_operator_parents = set()
for parent, atom in self._dynamic_config._parent_atoms.get(
existing_pkg, []
):
if atom.soname or atom.slot_operator_built:
built_slot_operator_parents.add(parent)
for parent, atom in self._dynamic_config._parent_atoms.get(
existing_pkg, []
):
if isinstance(parent, Package):
if parent in built_slot_operator_parents:
if hasattr(atom, "_orig_atom"):
# If atom is the result of virtual expansion, then
# derefrence it to _orig_atom so that it will be correctly
# handled as a built slot operator dependency when
# appropriate (see bug 764764).
atom = atom._orig_atom
# This parent may need to be rebuilt, therefore
# discard its soname and built slot operator
# dependency components which are not necessarily
# relevant.
if atom.soname:
continue
elif atom.package and atom.slot_operator_built:
# This discards the slot/subslot component.
atom = atom.with_slot("=")
if replacement_parent is not None and (
replacement_parent.slot_atom == parent.slot_atom
or replacement_parent.cpv == parent.cpv
):
# This parent is irrelevant because we intend to
# replace it with replacement_parent.
continue
if any(
pkg is not parent
and (pkg.slot_atom == parent.slot_atom or pkg.cpv == parent.cpv)
for pkg in self._dynamic_config._package_tracker.match(
parent.root, Atom(parent.cp)
)
):
# This parent may need to be eliminated due to a
# slot conflict, so its dependencies aren't
# necessarily relevant.
continue
if not self._too_deep(
parent.depth
) and not self._frozen_config.excluded_pkgs.findAtomForPackage(
parent, modified_use=self._pkg_use_enabled(parent)
):
# Check for common reasons that the parent's
# dependency might be irrelevant.
if self._upgrade_available(parent):
# This parent could be replaced by
# an upgrade (bug 584626).
continue
if parent.installed and self._in_blocker_conflict(parent):
# This parent could be uninstalled in order
# to solve a blocker conflict (bug 612772).
continue
if self._dynamic_config.digraph.has_edge(parent, existing_pkg):
# There is a direct circular dependency between
# parent and existing_pkg. This type of
# relationship tends to prevent updates
# of packages (bug 612874). Since candidate_pkg
# is available, we risk a missed update if we
# don't try to eliminate this parent from the
# graph. Therefore, we give candidate_pkg a
# chance, and assume that it will be masked
# by backtracking if necessary.
continue
atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
if not atom_set.findAtomForPackage(
candidate_pkg, modified_use=self._pkg_use_enabled(candidate_pkg)
):
if debug:
parent_atoms = []
for (
other_parent,
other_atom,
) in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if other_parent is parent:
parent_atoms.append(other_atom)
msg = (
"",
"",
"check_reverse_dependencies:",
" candidate package does not match atom '%s': %s"
% (atom, candidate_pkg),
" parent: %s" % parent,
" parent atoms: %s" % " ".join(parent_atoms),
"",
)
writemsg_level(
"\n".join(msg), noiselevel=-1, level=logging.DEBUG
)
return False
return True
for replacement_parent in self._iter_similar_available(
dep.parent, dep.parent.slot_atom, autounmask_level=autounmask_level
):
if replacement_parent is dep.parent:
continue
if replacement_parent < dep.parent:
if want_downgrade_parent is None:
want_downgrade_parent = self._downgrade_probe(dep.parent)
if not want_downgrade_parent:
continue
if not check_reverse_dependencies(dep.parent, replacement_parent):
continue
selected_atoms = None
try:
atoms = self._flatten_atoms(
replacement_parent, self._pkg_use_enabled(replacement_parent)
)
except InvalidDependString:
continue
if replacement_parent.requires is not None:
atoms = list(atoms)
atoms.extend(replacement_parent.requires)
# List of list of child,atom pairs for each atom.
replacement_candidates = []
# Set of all packages all atoms can agree on.
all_candidate_pkgs = None
for atom in atoms:
# The _select_atoms_probe method is expensive, so initialization
# of this variable is only performed on demand.
atom_not_selected = None
if not atom.package:
unevaluated_atom = None
if atom.match(dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
else:
if atom.blocker or atom.cp != dep.child.cp:
continue
# Discard USE deps, we're only searching for an
# approximate pattern, and dealing with USE states
# is too complex for this purpose.
unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and portage.dep._match_slot(
atom, dep.child
):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
candidate_pkg_atoms = []
candidate_pkgs = []
for pkg in self._iter_similar_available(dep.child, atom):
if (
dep.atom.package
and pkg.slot == dep.child.slot
and pkg.sub_slot == dep.child.sub_slot
):
# If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
if pkg.slot == dep.child.slot:
continue
if pkg < dep.child:
# the new slot only matters if the
# package version is higher
continue
else:
if pkg.slot != dep.child.slot:
continue
if pkg < dep.child:
if want_downgrade is None:
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
# different slot_operator is an older version
if not want_downgrade:
continue
if pkg.version == dep.child.version and not dep.child.built:
continue
insignificant = False
if (
not slot_conflict
and selective
and dep.parent.installed
and dep.child.installed
and dep.parent >= replacement_parent
and dep.child.cpv == pkg.cpv
):
# Then can happen if the child's sub-slot changed
# without a revision bump. The sub-slot change is
# considered insignificant until one of its parent
# packages needs to be rebuilt (which may trigger a
# slot conflict).
insignificant = True
if not insignificant and unevaluated_atom is not None:
# Evaluate USE conditionals and || deps, in order
# to see if this atom is really desirable, since
# otherwise we may trigger an undesirable rebuild
# as in bug #460304.
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent
)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
break
if not insignificant and check_reverse_dependencies(
dep.child, pkg, replacement_parent=replacement_parent
):
candidate_pkg_atoms.append((pkg, unevaluated_atom or atom))
candidate_pkgs.append(pkg)
# When unevaluated_atom is None, it means that atom is
# an soname atom which is unconditionally selected, and
# _select_atoms_probe is not applicable.
if atom_not_selected is None and unevaluated_atom is not None:
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent
)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
continue
replacement_candidates.append(candidate_pkg_atoms)
if all_candidate_pkgs is None:
all_candidate_pkgs = set(candidate_pkgs)
else:
all_candidate_pkgs.intersection_update(candidate_pkgs)
if not all_candidate_pkgs:
# If the atoms that connect parent and child can't agree on
# any replacement child, we can't do anything.
continue
# Now select one of the pkgs as replacement. This is as easy as
# selecting the highest version.
# The more complicated part is to choose an atom for the
# new Dependency object. Choose the one which ranked the selected
# parent highest.
selected = None
for candidate_pkg_atoms in replacement_candidates:
for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
if pkg not in all_candidate_pkgs:
continue
if (
selected is None
or selected[0] < pkg
or (selected[0] is pkg and i < selected[2])
):
selected = (pkg, atom, i)
if debug:
msg = (
"",
"",
"slot_operator_update_probe:",
" existing child package: %s" % dep.child,
" existing parent package: %s" % dep.parent,
" new child package: %s" % selected[0],
" new parent package: %s" % replacement_parent,
"",
)
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
return Dependency(
parent=replacement_parent, child=selected[0], atom=selected[1]
)
if debug:
msg = (
"",
"",
"slot_operator_update_probe:",
" existing child package: %s" % dep.child,
" existing parent package: %s" % dep.parent,
" new child package: %s" % None,
" new parent package: %s" % None,
"",
)
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
return None
def _slot_operator_unsatisfied_probe(self, dep):
if (
dep.parent.installed
and self._frozen_config.excluded_pkgs.findAtomForPackage(
dep.parent, modified_use=self._pkg_use_enabled(dep.parent)
)
):
return False
debug = "--debug" in self._frozen_config.myopts
for replacement_parent in self._iter_similar_available(
dep.parent, dep.parent.slot_atom
):
for atom in replacement_parent.validated_atoms:
if (
not atom.slot_operator == "="
or atom.blocker
or atom.cp != dep.atom.cp
):
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
atom = atom.without_use
pkg, existing_node = self._select_package(
dep.root, atom, onlydeps=dep.onlydeps
)
if pkg is not None:
if debug:
msg = (
"",
"",
"slot_operator_unsatisfied_probe:",
" existing parent package: %s" % dep.parent,
" existing parent atom: %s" % dep.atom,
" new parent package: %s" % replacement_parent,
" new child package: %s" % pkg,
"",
)
writemsg_level(
"\n".join(msg), noiselevel=-1, level=logging.DEBUG
)
return True
if debug:
msg = (
"",
"",
"slot_operator_unsatisfied_probe:",
" existing parent package: %s" % dep.parent,
" existing parent atom: %s" % dep.atom,
" new parent package: %s" % None,
" new child package: %s" % None,
"",
)
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
return False
def _slot_operator_unsatisfied_backtrack(self, dep):
parent = dep.parent
if "--debug" in self._frozen_config.myopts:
msg = (
"",
"",
"backtracking due to unsatisfied built slot-operator dep:",
" parent package: %s" % parent,
" atom: %s" % dep.atom,
"",
)
writemsg_level("\n".join(msg), noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not parent.installed:
masks.setdefault(parent, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if parent.installed:
replacement_atom = self._replace_installed_atom(parent)
if replacement_atom is not None:
reinstalls.add((parent.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed", set()).update(
reinstalls
)
self._dynamic_config._need_restart = True
def _in_blocker_conflict(self, pkg):
"""
Check if pkg is involved in a blocker conflict. This method
only works after the _validate_blockers method has been called.
"""
if self._dynamic_config._blocked_pkgs is None and not self._validate_blockers():
raise self._unknown_internal_error()
if pkg in self._dynamic_config._blocked_pkgs:
return True
if pkg in self._dynamic_config._blocker_parents:
return True
return False
def _upgrade_available(self, pkg):
"""
Detect cases where an upgrade of the given package is available
within the same slot.
"""
for available_pkg in self._iter_similar_available(pkg, pkg.slot_atom):
if available_pkg > pkg:
return True
return False
def _downgrade_probe(self, pkg):
"""
Detect cases where a downgrade of the given package is considered
desirable due to the current version being masked or unavailable.
"""
available_pkg = None
for available_pkg in self._iter_similar_available(pkg, pkg.slot_atom):
if available_pkg >= pkg:
# There's an available package of the same or higher
# version, so downgrade seems undesirable.
return False
return available_pkg is not None
def _select_atoms_probe(self, root, pkg):
selected_atoms = []
use = self._pkg_use_enabled(pkg)
for k in pkg._dep_keys:
v = pkg._metadata.get(k)
if not v:
continue
selected_atoms.extend(
self._select_atoms(root, v, myuse=use, parent=pkg)[pkg]
)
return frozenset(x.unevaluated_atom for x in selected_atoms)
def _flatten_atoms(self, pkg, use):
"""
Evaluate all dependency atoms of the given package, and return
them as a frozenset. For performance, results are cached.
@param pkg: a Package instance
@type pkg: Package
@param pkg: set of enabled USE flags
@type pkg: frozenset
@rtype: frozenset
@return: set of evaluated atoms
"""
cache_key = (pkg, use)
try:
return self._dynamic_config._flatten_atoms_cache[cache_key]
except KeyError:
pass
atoms = []
for dep_key in pkg._dep_keys:
dep_string = pkg._metadata[dep_key]
if not dep_string:
continue
dep_string = portage.dep.use_reduce(
dep_string,
uselist=use,
is_valid_flag=pkg.iuse.is_valid_flag,
flat=True,
token_class=Atom,
eapi=pkg.eapi,
)
atoms.extend(token for token in dep_string if isinstance(token, Atom))
atoms = frozenset(atoms)
self._dynamic_config._flatten_atoms_cache[cache_key] = atoms
return atoms
def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
graph_pkg itself may be yielded only if it's not installed.
"""
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
use_ebuild_visibility = (
self._frozen_config.myopts.get("--use-ebuild-visibility", "n") != "n"
)
for pkg in self._iter_match_pkgs_any(graph_pkg.root_config, atom):
if pkg.cp != graph_pkg.cp:
# discard old-style virtual match
continue
if pkg.installed:
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)
):
continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (
not use_ebuild_visibility
and (
usepkgonly
or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)
)
)
) and not self._equiv_ebuild_visible(
pkg, autounmask_level=autounmask_level
):
continue
if not self._pkg_visibility_check(pkg, autounmask_level=autounmask_level):
continue
yield pkg
def _replace_installed_atom(self, inst_pkg):
"""
Given an installed package, generate an atom suitable for
slot_operator_replace_installed backtracking info. The replacement
SLOT may differ from the installed SLOT, so first search by cpv.
"""
built_pkgs = []
for pkg in self._iter_similar_available(inst_pkg, Atom("=%s" % inst_pkg.cpv)):
if not pkg.built:
return pkg.slot_atom
if not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
if not pkg.built:
return pkg.slot_atom
if not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
if built_pkgs:
best_version = None
for pkg in built_pkgs:
if best_version is None or pkg > best_version:
best_version = pkg
return best_version.slot_atom
return None
def _slot_operator_trigger_reinstalls(self):
"""
Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
rebuild_if_new_slot = (
self._dynamic_config.myparams.get("rebuild_if_new_slot", "y") == "y"
)
for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
atom = dep.atom
if not (atom.soname or atom.slot_operator_built):
new_child_slot = self._slot_change_probe(dep)
if new_child_slot is not None:
self._slot_change_backtrack(dep, new_child_slot)
continue
if not (
dep.parent and isinstance(dep.parent, Package) and dep.parent.built
):
continue
# If the parent is not installed, check if it needs to be
# rebuilt against an installed instance, since otherwise
# it could trigger downgrade of an installed instance as
# in bug #652938.
want_update_probe = dep.want_update or not dep.parent.installed
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
if rebuild_if_new_slot and want_update_probe:
new_dep = self._slot_operator_update_probe(dep, new_child_slot=True)
if new_dep is not None:
self._slot_operator_update_backtrack(
dep, new_child_slot=new_dep.child
)
if want_update_probe:
if self._slot_operator_update_probe(dep):
self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(
self, pkg, forced_flags, orig_use, orig_iuse, cur_use, cur_iuse
):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
# binpkg_respect_use: Behave like newuse by default. If newuse is
# False and changed_use is True, then behave like changed_use.
binpkg_respect_use = pkg.built and self._dynamic_config.myparams.get(
"binpkg_respect_use"
) in ("y", "auto")
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse)
flags ^= cur_iuse
flags -= forced_flags
flags |= orig_iuse.intersection(orig_use) ^ cur_iuse.intersection(cur_use)
flags -= feature_flags
if flags:
return flags
elif changed_use or binpkg_respect_use:
flags = set(orig_iuse)
flags.intersection_update(orig_use)
flags ^= cur_iuse.intersection(cur_use)
flags -= feature_flags
if flags:
return flags
return None
def _changed_deps(self, pkg):
ebuild = None
try:
ebuild = self._pkg(pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
except PackageNotFound:
# Use first available instance of the same version.
for ebuild in self._iter_match_pkgs(
pkg.root_config, "ebuild", Atom("=" + pkg.cpv)
):
break
if ebuild is None:
changed = False
else:
if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
depvars = Package._dep_keys
else:
depvars = Package._runtime_keys
# Use _raw_metadata, in order to avoid interaction
# with --dynamic-deps.
try:
built_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
pkg._raw_metadata[k],
uselist=pkg.use.enabled,
eapi=pkg.eapi,
token_class=Atom,
)
strip_slots(dep_struct)
built_deps.append(dep_struct)
except InvalidDependString:
changed = True
else:
unbuilt_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
ebuild._raw_metadata[k],
uselist=pkg.use.enabled,
eapi=ebuild.eapi,
token_class=Atom,
)
strip_slots(dep_struct)
unbuilt_deps.append(dep_struct)
changed = built_deps != unbuilt_deps
if (
changed
and pkg.installed
and self._dynamic_config.myparams.get("changed_deps_report")
):
self._dynamic_config._changed_deps_pkgs[pkg] = ebuild
return changed
def _changed_slot(self, pkg):
ebuild = self._equiv_ebuild(pkg)
return ebuild is not None and (ebuild.slot, ebuild.sub_slot) != (
pkg.slot,
pkg.sub_slot,
)
def _create_graph(self, allow_unsatisfied=False):
dep_stack = self._dynamic_config._dep_stack
dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
while dep_stack or dep_disjunctive_stack:
self._spinner_update()
while dep_stack:
dep = dep_stack.pop()
if isinstance(dep, Package):
if not self._add_pkg_deps(dep, allow_unsatisfied=allow_unsatisfied):
return 0
continue
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if dep_disjunctive_stack:
if not self._pop_disjunction(allow_unsatisfied):
return 0
return 1
def _expand_set_args(self, input_args, add_to_digraph=False):
"""
Iterate over a list of DependencyArg instances and yield all
instances given in the input together with additional SetArg
instances that are generated from nested sets.
@param input_args: An iterable of DependencyArg instances
@type input_args: Iterable
@param add_to_digraph: If True then add SetArg instances
to the digraph, in order to record parent -> child
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
@return: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
traversed_set_args = set()
for arg in input_args:
if not isinstance(arg, SetArg):
yield arg
continue
root_config = arg.root_config
depgraph_sets = self._dynamic_config.sets[root_config.root]
arg_stack = [arg]
while arg_stack:
arg = arg_stack.pop()
if arg in traversed_set_args:
continue
# If a node with the same hash already exists in
# the digraph, preserve the existing instance which
# may have a different reset_depth attribute
# (distiguishes user arguments from sets added for
# another reason such as complete mode).
arg = self._dynamic_config.digraph.get(arg, arg)
traversed_set_args.add(arg)
if add_to_digraph:
self._dynamic_config.digraph.add(
arg, None, priority=BlockerDepPriority.instance
)
yield arg
# Traverse nested sets and add them to the stack
# if they're not already in the graph. Also, graph
# edges between parent and nested sets.
for token in sorted(arg.pset.getNonAtoms()):
if not token.startswith(SETPREFIX):
continue
s = token[len(SETPREFIX) :]
nested_set = depgraph_sets.sets.get(s)
if nested_set is None:
nested_set = root_config.sets.get(s)
if nested_set is not None:
# Propagate the reset_depth attribute from
# parent set to nested set.
nested_arg = SetArg(
arg=token,
pset=nested_set,
reset_depth=arg.reset_depth,
root_config=root_config,
)
# Preserve instances already in the graph (same
# reason as for the "arg" variable above).
nested_arg = self._dynamic_config.digraph.get(
nested_arg, nested_arg
)
arg_stack.append(nested_arg)
if add_to_digraph:
self._dynamic_config.digraph.add(
nested_arg, arg, priority=BlockerDepPriority.instance
)
depgraph_sets.sets[nested_arg.name] = nested_arg.pset
def _add_dep(self, dep, allow_unsatisfied=False):
debug = "--debug" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
is_slot_conflict_parent = any(
dep.parent in conflict.pkgs[1:]
for conflict in self._dynamic_config._package_tracker.slot_conflicts()
)
if (
not nodeps
and not dep.collapsed_priority.ignored
and not dep.collapsed_priority.optional
and not is_slot_conflict_parent
):
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
return 1
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(
atom=dep.atom,
eapi=dep.parent.eapi,
priority=dep.priority,
root=dep.parent.root,
)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
if dep.child is None:
dep_pkg, existing_node = self._select_package(
dep.root, dep.atom, onlydeps=dep.onlydeps
)
else:
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
existing_node = next(
self._dynamic_config._package_tracker.match(
dep.root, dep_pkg.slot_atom, installed=False
),
None,
)
if not dep_pkg:
if dep.collapsed_priority.optional or dep.collapsed_priority.ignored:
# This is an unnecessary build-time dep.
return 1
# NOTE: For removal actions, allow_unsatisfied is always
# True since all existing removal actions traverse all
# installed deps deeply via the _complete_graph method,
# which calls _create_graph with allow_unsatisfied = True.
if allow_unsatisfied:
self._dynamic_config._unsatisfied_deps.append(dep)
return 1
# The following case occurs when
# _solve_non_slot_operator_slot_conflicts calls
# _create_graph. In this case, ignore unsatisfied deps for
# installed packages only if their depth is beyond the depth
# requested by the user and the dep was initially
# unsatisfied (not broken by a slot conflict in the current
# graph). See bug #520950.
# NOTE: The value of dep.parent.depth is guaranteed to be
# either an integer or _UNREACHABLE_DEPTH, where
# _UNREACHABLE_DEPTH indicates that the parent has been
# pulled in by the _complete_graph method (rather than by
# explicit arguments or their deep dependencies). These
# cases must be distinguished because depth is meaningless
# for packages that are not reachable as deep dependencies
# of arguments.
if (
self._dynamic_config._complete_mode
and isinstance(dep.parent, Package)
and dep.parent.installed
and (
dep.parent.depth is self._UNREACHABLE_DEPTH
or (
self._frozen_config.requested_depth is not True
and dep.parent.depth >= self._frozen_config.requested_depth
)
)
):
inst_pkg, in_graph = self._select_pkg_from_installed(dep.root, dep.atom)
if inst_pkg is None:
self._dynamic_config._initially_unsatisfied_deps.append(dep)
return 1
self._dynamic_config._unsatisfied_deps_for_display.append(
((dep.root, dep.atom), {"myparent": dep.parent})
)
# The parent node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if (
dep.parent not in self._dynamic_config._runtime_pkg_mask
and dep.atom.package
and dep.atom.slot_operator_built
and self._slot_operator_unsatisfied_probe(dep)
):
self._slot_operator_unsatisfied_backtrack(dep)
return 1
# This is for backward-compatibility with previous
# behavior, so that installed packages with unsatisfied
# dependencies trigger an error message but do not
# cause the dependency calculation to fail. Only do
# this if the parent is already in the runtime package
# mask, since otherwise we need to backtrack.
if (
dep.parent.installed
and dep.parent in self._dynamic_config._runtime_pkg_mask
and not any(
self._iter_match_pkgs_any(dep.parent.root_config, dep.atom)
)
):
self._dynamic_config._initially_unsatisfied_deps.append(dep)
return 1
# Do not backtrack if only USE have to be changed in
# order to satisfy the dependency. Note that when
# want_restart_for_use_change sets the need_restart
# flag, it causes _select_pkg_highest_available to
# return None, and eventually we come through here
# and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = self._select_package(
dep.root,
dep.atom.without_use if dep.atom.package else dep.atom,
onlydeps=dep.onlydeps,
)
if dep_pkg is None:
# In order to suppress the sort of aggressive
# backtracking that can trigger undesirable downgrades
# as in bug 693836, do not backtrack if there's an
# available package which was involved in a slot
# conflict and satisfied all involved parent atoms.
for (
dep_pkg,
reasons,
) in self._dynamic_config._runtime_pkg_mask.items():
if (
dep.atom.match(dep_pkg)
and len(reasons) == 1
and not reasons.get("slot conflict", True)
):
self._dynamic_config._skip_restart = True
return 0
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied dep:")
msg.append(" parent: %s" % dep.parent)
msg.append(" priority: %s" % dep.priority)
msg.append(" root: %s" % dep.root)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level(
"".join("%s\n" % l for l in msg),
noiselevel=-1,
level=logging.DEBUG,
)
return 0
self._rebuild.add(dep_pkg, dep)
ignore = (
dep.collapsed_priority.ignored
and not self._dynamic_config._traverse_ignored_deps
)
if not ignore and not self._add_pkg(dep_pkg, dep):
return 0
return 1
def _check_slot_conflict(self, pkg, atom):
existing_node = next(
self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom, installed=False
),
None,
)
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
if pkg != existing_node and atom is not None:
matches = atom.match(
existing_node.with_use(self._pkg_use_enabled(existing_node))
)
return (existing_node, matches)
def _add_pkg(self, pkg, dep):
"""
Adds a package to the depgraph, queues dependencies, and handles
slot conflicts.
"""
debug = "--debug" in self._frozen_config.myopts
myparent = None
priority = None
depth = 0
if dep is None:
dep = Dependency()
else:
myparent = dep.parent
priority = dep.priority
depth = dep.depth
if priority is None:
priority = DepPriority()
if debug:
writemsg_level(
"\n%s%s %s\n"
% (
"Child:".ljust(15),
pkg,
pkg_use_display(
pkg,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg),
),
),
level=logging.DEBUG,
noiselevel=-1,
)
if isinstance(myparent, (PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
writemsg_level(
"%s%s\n" % ("Parent Dep:".ljust(15), myparent),
level=logging.DEBUG,
noiselevel=-1,
)
else:
# Display the specific atom from SetArg or
# Package types.
uneval = ""
if (
dep.atom
and dep.atom.package
and dep.atom is not dep.atom.unevaluated_atom
):
uneval = " (%s)" % (dep.atom.unevaluated_atom,)
writemsg_level(
"%s%s%s required by %s\n"
% ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
level=logging.DEBUG,
noiselevel=-1,
)
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
if True:
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
# NOTE: REQUIRED_USE checks are delayed until after
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
if (
not pkg.built
and pkg._metadata.get("REQUIRED_USE")
and eapi_has_required_use(pkg.eapi)
):
required_use_is_sat = check_required_use(
pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag,
eapi=pkg.eapi,
)
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._add_parent_atom(pkg, parent_atom)
atom = dep.atom
if atom is None:
atom = Atom("=" + pkg.cpv)
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, atom), {"myparent": dep.parent, "show_req_use": pkg})
)
self._dynamic_config._required_use_unsatisfied = True
self._dynamic_config._skip_restart = True
# Add pkg to digraph in order to enable autounmask messages
# for this package, which is useful when autounmask USE
# changes have violated REQUIRED_USE.
self._dynamic_config.digraph.add(pkg, dep.parent, priority=priority)
return 0
if not pkg.onlydeps:
existing_node, existing_node_matches = self._check_slot_conflict(
pkg, dep.atom
)
if existing_node:
if existing_node_matches:
# The existing node can be reused.
if pkg != existing_node:
pkg = existing_node
previously_added = True
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except InvalidDependString as e:
if not pkg.installed:
# should have been masked before
# it was selected
raise
if debug:
writemsg_level(
"%s%s %s\n"
% (
"Re-used Child:".ljust(15),
pkg,
pkg_use_display(
pkg,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg),
),
),
level=logging.DEBUG,
noiselevel=-1,
)
elif (
pkg.installed
and isinstance(myparent, Package)
and pkg.root == myparent.root
and pkg.slot_atom == myparent.slot_atom
):
# If the parent package is replacing the child package then
# there's no slot conflict. Since the child will be replaced,
# do not add it to the graph. No attempt will be made to
# satisfy its dependencies, which is unsafe if it has any
# missing dependencies, as discussed in bug 199856.
if debug:
writemsg_level(
"%s%s %s\n"
% (
"Replace Child:".ljust(15),
pkg,
pkg_use_display(
pkg,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg),
),
),
level=logging.DEBUG,
noiselevel=-1,
)
return 1
else:
if debug:
writemsg_level(
"%s%s %s\n"
% (
"Slot Conflict:".ljust(15),
existing_node,
pkg_use_display(
existing_node,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(existing_node),
),
),
level=logging.DEBUG,
noiselevel=-1,
)
if not previously_added:
self._dynamic_config._package_tracker.add_pkg(pkg)
self._dynamic_config._filtered_trees[pkg.root][
"porttree"
].dbapi._clear_cache()
self._check_masks(pkg)
self._prune_highest_pkg_cache(pkg)
if not pkg.installed:
# Allow this package to satisfy old-style virtuals in case it
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
pkgsettings.setinst(pkg.cpv, pkg._metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
settings.setinst(pkg.cpv, pkg._metadata)
settings.lock()
except portage.exception.InvalidDependString:
if not pkg.installed:
# should have been masked before it was selected
raise
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
# Do this even for onlydeps, so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
# If a direct circular dependency is not an unsatisfied
# buildtime dependency then drop it here since otherwise
# it can skew the merge order calculation in an unwanted
# way.
if pkg != dep.parent or (priority.buildtime and not priority.satisfied):
self._dynamic_config.digraph.add(pkg, dep.parent, priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
# This section determines whether we go deeper into dependencies or not.
# We want to go deeper on a few occasions:
# Installing package A, we need to make sure package A's deps are met.
# emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
# If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
if arg_atoms and depth != 0:
for parent, atom in arg_atoms:
if parent.reset_depth:
depth = 0
break
if previously_added and depth != 0 and isinstance(pkg.depth, int):
# Use pkg.depth if it is less than depth.
if isinstance(depth, int):
depth = min(pkg.depth, depth)
else:
# depth is _UNREACHABLE_DEPTH and pkg.depth is
# an int, so use the int because it's considered
# to be less than _UNREACHABLE_DEPTH.
depth = pkg.depth
pkg.depth = depth
deep = self._dynamic_config.myparams.get("deep", 0)
update = "--update" in self._frozen_config.myopts
dep.want_update = (
not self._dynamic_config._complete_mode
and (arg_atoms or update)
and not self._too_deep(depth)
)
dep.child = pkg
if (
not pkg.onlydeps
and dep.atom
and (dep.atom.soname or dep.atom.slot_operator == "=")
):
self._add_slot_operator_dep(dep)
recurse = deep is True or not self._too_deep(self._depth_increment(depth, n=1))
dep_stack = self._dynamic_config._dep_stack
if "recurse" not in self._dynamic_config.myparams:
return 1
if pkg.installed and not recurse:
dep_stack = self._dynamic_config._ignored_deps
self._spinner_update()
if not previously_added:
dep_stack.append(pkg)
return 1
def _add_installed_sonames(self, pkg):
if self._frozen_config.soname_deps_enabled and pkg.provides is not None:
for atom in pkg.provides:
self._dynamic_config._installed_sonames[(pkg.root, atom)].append(pkg)
def _add_pkg_soname_deps(self, pkg, allow_unsatisfied=False):
if self._frozen_config.soname_deps_enabled and pkg.requires is not None:
if isinstance(pkg.depth, int):
depth = pkg.depth + 1
else:
depth = pkg.depth
soname_provided = self._frozen_config.roots[
pkg.root
].settings.soname_provided
for atom in pkg.requires:
if atom in soname_provided:
continue
dep = Dependency(
atom=atom,
blocker=False,
depth=depth,
parent=pkg,
priority=self._priority(runtime=True),
root=pkg.root,
)
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return False
return True
def _remove_pkg(self, pkg):
"""
Remove a package and all its then parentless digraph
children from all depgraph datastructures.
"""
debug = "--debug" in self._frozen_config.myopts
if debug:
writemsg_level(
"Removing package: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1
)
try:
children = [
child
for child in self._dynamic_config.digraph.child_nodes(pkg)
if child is not pkg
]
self._dynamic_config.digraph.remove(pkg)
except KeyError:
children = []
self._dynamic_config._package_tracker.discard_pkg(pkg)
self._dynamic_config._parent_atoms.pop(pkg, None)
self._dynamic_config._set_nodes.discard(pkg)
for child in children:
try:
self._dynamic_config._parent_atoms[child] = set(
(parent, atom)
for (parent, atom) in self._dynamic_config._parent_atoms[child]
if parent is not pkg
)
except KeyError:
pass
# Remove slot operator dependencies.
slot_key = (pkg.root, pkg.slot_atom)
if slot_key in self._dynamic_config._slot_operator_deps:
self._dynamic_config._slot_operator_deps[slot_key] = [
dep
for dep in self._dynamic_config._slot_operator_deps[slot_key]
if dep.child is not pkg
]
if not self._dynamic_config._slot_operator_deps[slot_key]:
del self._dynamic_config._slot_operator_deps[slot_key]
# Remove blockers.
self._dynamic_config._blocker_parents.discard(pkg)
self._dynamic_config._irrelevant_blockers.discard(pkg)
self._dynamic_config._unsolvable_blockers.discard(pkg)
if self._dynamic_config._blocked_pkgs is not None:
self._dynamic_config._blocked_pkgs.discard(pkg)
self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
for child in children:
if (
child in self._dynamic_config.digraph
and not self._dynamic_config.digraph.parent_nodes(child)
):
self._remove_pkg(child)
# Clear caches.
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._dynamic_config._highest_pkg_cache_cp_map.clear()
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
# Check for upgrades in the same slot that are
# masked due to a LICENSE change in a newer
# version that is not masked for any other reason.
other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
if other_pkg is not None and pkg < other_pkg:
self._dynamic_config._masked_license_updates.add(other_pkg)
def _add_parent_atom(self, pkg, parent_atom):
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
def _add_slot_operator_dep(self, dep):
slot_key = (dep.root, dep.child.slot_atom)
slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
if slot_info is None:
slot_info = []
self._dynamic_config._slot_operator_deps[slot_key] = slot_info
slot_info.append(dep)
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
if not self._add_pkg_soname_deps(pkg, allow_unsatisfied=allow_unsatisfied):
return False
myroot = pkg.root
metadata = pkg._metadata
removal_action = "remove" in self._dynamic_config.myparams
eapi_attrs = _get_eapi_attrs(pkg.eapi)
edepend = {}
for k in Package._dep_keys:
edepend[k] = metadata[k]
use_enabled = self._pkg_use_enabled(pkg)
with_test_deps = (
not removal_action
and "with_test_deps" in self._dynamic_config.myparams
and pkg.depth == 0
and "test" not in use_enabled
and pkg.iuse.is_valid_flag("test")
and self._is_argument(pkg)
)
if (
not pkg.built
and "--buildpkgonly" in self._frozen_config.myopts
and "deep" not in self._dynamic_config.myparams
):
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
edepend["IDEPEND"] = ""
if (
pkg.onlydeps
and self._frozen_config.myopts.get("--onlydeps-with-rdeps") == "n"
):
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
edepend["IDEPEND"] = ""
ignore_build_time_deps = False
if pkg.built and not removal_action:
if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
# Pull in build time deps as requested, but marked them as
# "optional" since they are not strictly required. This allows
# more freedom in the merge order calculation for solving
# circular dependencies. Don't convert to PDEPEND since that
# could make --with-bdeps=y less effective if it is used to
# adjust merge order to prevent built_with_use() calls from
# failing.
pass
else:
ignore_build_time_deps = True
if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
edepend["BDEPEND"] = ""
ignore_build_time_deps = True
ignore_depend_deps = ignore_build_time_deps
ignore_bdepend_deps = ignore_build_time_deps
if removal_action:
depend_root = myroot
else:
if eapi_attrs.bdepend:
depend_root = pkg.root_config.settings["ESYSROOT"]
else:
depend_root = self._frozen_config._running_root.root
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
depend_root = myroot
elif root_deps == "rdeps":
ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
if not self._rebuild.rebuild:
if ignore_depend_deps:
edepend["DEPEND"] = ""
if ignore_bdepend_deps:
edepend["BDEPEND"] = ""
# Since build-time deps tend to be a superset of run-time deps, order
# dep processing such that build-time deps are popped from
# _dep_disjunctive_stack first, so that choices for build-time
# deps influence choices for run-time deps (bug 639346).
deps = (
(myroot, edepend["RDEPEND"], self._priority(runtime=True)),
(
self._frozen_config._running_root.root,
edepend["IDEPEND"],
self._priority(runtime=True),
),
(myroot, edepend["PDEPEND"], self._priority(runtime_post=True)),
(
depend_root,
edepend["DEPEND"],
self._priority(
buildtime=True,
optional=(pkg.built or ignore_depend_deps),
ignored=ignore_depend_deps,
),
),
(
self._frozen_config._running_root.root,
edepend["BDEPEND"],
self._priority(
buildtime=True,
optional=(pkg.built or ignore_bdepend_deps),
ignored=ignore_bdepend_deps,
),
),
)
debug = "--debug" in self._frozen_config.myopts
for dep_root, dep_string, dep_priority in deps:
if not dep_string:
continue
if debug:
writemsg_level(
"\nParent: %s\n" % (pkg,), noiselevel=-1, level=logging.DEBUG
)
writemsg_level(
"Depstring: %s\n" % (dep_string,),
noiselevel=-1,
level=logging.DEBUG,
)
writemsg_level(
"Priority: %s\n" % (dep_priority,),
noiselevel=-1,
level=logging.DEBUG,
)
try:
if (
with_test_deps
and "test" not in use_enabled
and pkg.iuse.is_valid_flag("test")
):
test_deps = portage.dep.use_reduce(
dep_string,
uselist=use_enabled | {"test"},
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True,
token_class=Atom,
eapi=pkg.eapi,
subset={"test"},
)
if test_deps:
test_deps = list(
self._queue_disjunctive_deps(
pkg,
dep_root,
self._priority(runtime_post=True),
test_deps,
)
)
if test_deps and not self._add_pkg_dep_string(
pkg,
dep_root,
self._priority(runtime_post=True),
test_deps,
allow_unsatisfied,
):
return 0
dep_string = portage.dep.use_reduce(
dep_string,
uselist=use_enabled,
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True,
token_class=Atom,
eapi=pkg.eapi,
)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
# Try again, but omit the is_valid_flag argument, since
# invalid USE conditionals are a common problem and it's
# practical to ignore this issue for installed packages.
try:
dep_string = portage.dep.use_reduce(
dep_string,
uselist=use_enabled,
opconvert=True,
token_class=Atom,
eapi=pkg.eapi,
)
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
try:
dep_string = list(
self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, dep_string
)
)
except portage.exception.InvalidDependString as e:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
# should have been masked before it was selected
raise
if not dep_string:
continue
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string, allow_unsatisfied
):
return 0
self._dynamic_config._traversed_pkg_deps.add(pkg)
return 1
def _add_pkg_dep_string(
self, pkg, dep_root, dep_priority, dep_string, allow_unsatisfied
):
_autounmask_backup = self._dynamic_config._autounmask
if dep_priority.optional or dep_priority.ignored:
# Temporarily disable autounmask for deps that
# don't necessarily need to be satisfied.
self._dynamic_config._autounmask = False
try:
return self._wrapped_add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string, allow_unsatisfied
)
finally:
self._dynamic_config._autounmask = _autounmask_backup
def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
"""
In some cases, dep_check will return deps that shouldn't
be processed any further, so they are identified and
discarded here. Try to discard as few as possible since
discarded dependencies reduce the amount of information
available for optimization of merge order.
Don't ignore dependencies if pkg has a slot operator dependency on the child
and the child has changed slot/sub_slot.
"""
if not mypriority.satisfied:
return False
slot_operator_rebuild = False
if (
atom.slot_operator == "="
and (pkg.root, pkg.slot_atom)
in self._dynamic_config._slot_operator_replace_installed
and mypriority.satisfied is not child
and mypriority.satisfied.installed
and child
and not child.installed
and (
child.slot != mypriority.satisfied.slot
or child.sub_slot != mypriority.satisfied.sub_slot
)
):
slot_operator_rebuild = True
return (
not atom.blocker
and not recurse_satisfied
and mypriority.satisfied.visible
and dep.child is not None
and not dep.child.installed
and not any(
self._dynamic_config._package_tracker.match(
dep.child.root, dep.child.slot_atom, installed=False
)
)
and not slot_operator_rebuild
)
def _wrapped_add_pkg_dep_string(
self, pkg, dep_root, dep_priority, dep_string, allow_unsatisfied
):
if isinstance(pkg.depth, int):
depth = pkg.depth + 1
else:
depth = pkg.depth
deep = self._dynamic_config.myparams.get("deep", 0)
recurse_satisfied = deep is True or depth <= deep
debug = "--debug" in self._frozen_config.myopts
strict = pkg.type_name != "installed"
if debug:
writemsg_level(
"\nParent: %s\n" % (pkg,), noiselevel=-1, level=logging.DEBUG
)
dep_repr = portage.dep.paren_enclose(
dep_string, unevaluated_atom=True, opconvert=True
)
writemsg_level(
"Depstring: %s\n" % (dep_repr,), noiselevel=-1, level=logging.DEBUG
)
writemsg_level(
"Priority: %s\n" % (dep_priority,), noiselevel=-1, level=logging.DEBUG
)
try:
selected_atoms = self._select_atoms(
dep_root,
dep_string,
myuse=self._pkg_use_enabled(pkg),
parent=pkg,
strict=strict,
priority=dep_priority,
)
except portage.exception.InvalidDependString:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
return 1
# should have been masked before it was selected
raise
if debug:
writemsg_level(
"Candidates: %s\n" % ([str(x) for x in selected_atoms[pkg]],),
noiselevel=-1,
level=logging.DEBUG,
)
root_config = self._frozen_config.roots[dep_root]
vardb = root_config.trees["vartree"].dbapi
traversed_virt_pkgs = set()
reinstall_atoms = self._frozen_config.reinstall_atoms
for atom, child in self._minimize_children(
pkg, dep_priority, root_config, selected_atoms[pkg]
):
# If this was a specially generated virtual atom
# from dep_check, map it back to the original, in
# order to avoid distortion in places like display
# or conflict resolution code.
is_virt = hasattr(atom, "_orig_atom")
atom = getattr(atom, "_orig_atom", atom)
if atom.blocker and (dep_priority.optional or dep_priority.ignored):
# For --with-bdeps, ignore build-time only blockers
# that originate from built packages.
continue
mypriority = dep_priority.copy()
if not atom.blocker:
if atom.slot_operator == "=":
if mypriority.buildtime:
mypriority.buildtime_slot_op = True
if mypriority.runtime:
mypriority.runtime_slot_op = True
inst_pkgs = [
inst_pkg
for inst_pkg in reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(
inst_pkg, modified_use=self._pkg_use_enabled(inst_pkg)
)
]
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
mypriority.satisfied = inst_pkg
break
if not mypriority.satisfied:
# none visible, so use highest
mypriority.satisfied = inst_pkgs[0]
dep = Dependency(
atom=atom,
blocker=atom.blocker,
child=child,
depth=depth,
parent=pkg,
priority=mypriority,
root=dep_root,
)
# In some cases, dep_check will return deps that shouldn't
# be processed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
ignored = False
if self._ignore_dependency(
atom, pkg, child, dep, mypriority, recurse_satisfied
):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
except InvalidDependString:
if not dep.child.installed:
raise
if myarg is None:
# Existing child selection may not be valid unless
# it's added to the graph immediately, since "complete"
# mode may select a different child later.
ignored = True
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
if not ignored:
if (
dep_priority.ignored
and not self._dynamic_config._traverse_ignored_deps
):
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
else:
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
selected_atoms.pop(pkg)
# Add selected indirect virtual deps to the graph. This
# takes advantage of circular dependency avoidance that's done
# by dep_zapdeps. We preserve actual parent/child relationships