blob: 63e905cdbb484417e8582a65ff11230fefe9be7d [file] [log] [blame]
# Copyright 1999-2018 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import collections
import errno
import functools
import io
import logging
import stat
import sys
import textwrap
import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi.DummyTree import DummyTree
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
from portage.dep._slot_operator import (ignore_built_slot_operator_deps,
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.localization import _
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ConfigProtect, shlex_split, new_protect_filename
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
from portage.util._async.TaskScheduler import TaskScheduler
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import _pkg_str, catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
long = int
_unicode = str
_unicode = unicode
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
return pkgs
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, params, spinner):
self.settings = settings
self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.requested_depth = params.get("deep", 0)
self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
# We can't know that an soname dep is unsatisfied if there are
# any unbuilt ebuilds in the graph, since unbuilt ebuilds have
# no soname data. Therefore, only enable soname dependency
# resolution if --usepkgonly is enabled, or for removal actions.
self.soname_deps_enabled = (
("--usepkgonly" in myopts or "remove" in params) and
params.get("ignore_soname_deps") != "y")
dynamic_deps = "dynamic_deps" in params
ignore_built_slot_operator_deps = myopts.get(
"--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
self.pkgsettings[myroot] = portage.config(
if self.soname_deps_enabled and "remove" not in params:
self.trees[myroot]["bintree"] = DummyTree(
if params.get("ignore_world", False):
self._required_set_names = set()
self._required_set_names = set(["world"])
atoms = ' '.join(myopts.get("--exclude", [])).split()
self.excluded_pkgs = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
self.reinstall_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
self.usepkg_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
self.useoldpkg_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
self.rebuild_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
self.rebuild_ignore = _wildcard_set(atoms)
self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
class _depgraph_sets(object):
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _rebuild_config(object):
def __init__(self, frozen_config, backtrack_parameters):
self._graph = digraph()
self._frozen_config = frozen_config
self.rebuild_list = backtrack_parameters.rebuild_list.copy()
self.orig_rebuild_list = self.rebuild_list.copy()
self.reinstall_list = backtrack_parameters.reinstall_list.copy()
self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
def add(self, dep_pkg, dep):
parent = dep.collapsed_parent
priority = dep.collapsed_priority
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
self._graph.add(dep_pkg, parent, priority)
def _needs_rebuild(self, dep_pkg):
"""Check whether packages that depend on dep_pkg need to be rebuilt."""
dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
return False
if self.rebuild_if_unbuilt:
# dep_pkg is being installed from source, so binary
# packages for parents are invalid. Force rebuild
return True
trees = self._frozen_config.trees
vardb = trees[dep_pkg.root]["vartree"].dbapi
if self.rebuild_if_new_rev:
# Parent packages are valid if a package with the same
# cpv is already installed.
return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
# Otherwise, parent packages are valid if a package with the same
# version (excluding revision) is already installed.
assert self.rebuild_if_new_ver
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for inst_cpv in vardb.match(dep_pkg.slot_atom):
inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
if inst_cpv_norev == cpv_norev:
return False
return True
def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
reinstall = False
for slot_atom, dep_pkg in build_deps.items():
dep_root_slot = (dep_pkg.root, slot_atom)
if self._needs_rebuild(dep_pkg):
return True
elif ("--usepkg" in self._frozen_config.myopts and
(dep_root_slot in self.reinstall_list or
dep_root_slot in self.rebuild_list or
not dep_pkg.installed)):
# A direct rebuild dependency is being installed. We
# should update the parent as well to the latest binary,
# if that binary is valid.
# To validate the binary, we check whether all of the
# rebuild dependencies are present on the same binhost.
# 1) If parent is present on the binhost, but one of its
# rebuild dependencies is not, then the parent should
# be rebuilt from source.
# 2) Otherwise, the parent binary is assumed to be valid,
# because all of its rebuild dependencies are
# consistent.
bintree = trees[parent.root]["bintree"]
uri = bintree.get_pkgindex_uri(parent.cpv)
dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
bindb = bintree.dbapi
if self.rebuild_if_new_ver and uri and uri != dep_uri:
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for cpv in bindb.match(dep_pkg.slot_atom):
if cpv_norev == catpkgsplit(cpv)[:-1]:
dep_uri = bintree.get_pkgindex_uri(cpv)
if uri == dep_uri:
if uri and uri != dep_uri:
# 1) Remote binary package is invalid because it was
# built without dep_pkg. Force rebuild.
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
bin_build_time, = bindb.aux_get(parent.cpv,
except KeyError:
if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
if reinstall:
return reinstall
def trigger_rebuilds(self):
Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
depends on pkgA at both build-time and run-time, pkgB needs to be
need_restart = False
graph = self._graph
build_deps = {}
leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
# We'll have to drop an edge. This should be quite rare.
node = leaf_nodes.popleft()
if node not in graph:
# This can be triggered by circular dependencies.
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
parents = graph.parent_nodes(node)
node_build_deps = build_deps.get(node, {})
for parent in parents:
if parent == node:
# Ignore a direct cycle.
parent_bdeps = build_deps.setdefault(parent, {})
parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
# Trigger rebuilds for our leaf node. Because all of our children
# have been processed, the build_deps will be completely filled in,
# and self.rebuild_list / self.reinstall_list will tell us whether
# any of our children need to be rebuilt or reinstalled.
if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
class _use_changes(tuple):
def __new__(cls, new_use, new_changes, required_use_satisfied=True):
obj = tuple.__new__(cls, [new_use, new_changes])
obj.required_use_satisfied = required_use_satisfied
return obj
class _dynamic_depgraph_config(object):
``dynamic_depgraph_config`` is an object that is used to collect settings and important data structures that are
used in calculating Portage dependencies. Each depgraph created by the code gets its own
``dynamic_depgraph_config``, whereas ``frozen_depgraph_config`` is shared among all depgraphs.
Of particular importance is the instance variable ``self.digraph``, which is an instance of
``portage.util.digraph``, a directed graph data structure. ``portage.util.digraph`` is used for a variety of
purposes in the Portage codebase, but in this particular scenario as ``self.digraph``, it is used to create a
dependency tree of Portage packages. So for ``self.digraph``, each *node* of the directed graph is a ``Package``,
while *edges* connect nodes and each edge can have a Priority. The Priority setting is used to help resolve
circular dependencies, and should be interpreted in the direction of parent to child.
Conceptually, think of ``self.digraph`` as containing user-specified packages or sets at the very top, with
dependencies hanging down as children, and dependencies of those children as children of children, etc. The depgraph
is intended to model dependency relationships, not the order that packages should be installed.
**resolving the digraph**
To convert a digraph to an ordered list of packages to merge in an order where all dependencies are properly
satisfied, we would first start by looking at leaf nodes, which are nodes that have no dependencies of their own. We
could then traverse the digraph upwards from the leaf nodes, towards the parents. Along the way, depending on emerge
options, we could make decisions what packages should be installed or rebuilt. This is how ``self.digraph`` is used
in the code.
**digraph creation**
The ```` code creates the digraph by first adding emerge arguments to the digraph as the main parents,
so if ``@world`` is specified, then the world set is added as the main parents. Then, ``emerge`` will determine
the dependencies of these packages, and depending on what options are passed to ``emerge``, will look at installed
packages, binary packages and available ebuilds that could be merged to satisfy dependencies, and these will be
added as children in the digraph. Children of children will be added as dependencies as needed, depending on the
depth setting used by ``emerge``.
As the digraph is created, it is perfectly fine for Packages to be added to the digraph that conflict with one
another. After the digraph has been fully populated to the necessary depth, code within ```` will
identify any conflicts that are modeled within the digraph and determine the best way to handle them.
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
# Do not initialize this until the depgraph _validate_blockers
# method is called, so that the _in_blocker_conflict method can
# assert that _validate_blockers has been called first.
self._blocked_pkgs = None
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._highest_pkg_cache_cp_map = {}
self._flatten_atoms_cache = {}
self._changed_deps_pkgs = {}
# Binary packages that have been rejected because their USE
# didn't match the user's config. It maps packages to a set
# of flags causing the rejection.
self.ignored_binaries = {}
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
self._need_config_reload = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._displayed_autounmask = False
self._success_without_autounmask = False
self._autounmask_backtrack_disabled = False
self._required_use_unsatisfied = False
self._traverse_ignored_deps = False
self._complete_mode = False
self._slot_operator_deps = {}
self._installed_sonames = collections.defaultdict(list)
self._package_tracker = PackageTracker(
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
def graph_tree():
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(depgraph._frozen_config._trees_orig[myroot
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph(object):
# Represents the depth of a node that is unreachable from explicit
# user arguments (or their deep dependencies). Such nodes are pulled
# in by the _complete_graph method.
pkg_tree_map = RootConfig.pkg_tree_map
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, backtrack_parameters)
self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
self._event_loop = (portage._internal_caller and
global_event_loop() or EventLoop(main=False))
self._select_atoms_parent = None
self.query = UserQuery(myopts).query
def _index_binpkgs(self):
for root in self._frozen_config.trees:
bindb = self._frozen_config.trees[root]["bintree"].dbapi
if bindb._provides_index:
# don't repeat this when backtracking
root_config = self._frozen_config.roots[root]
for cpv in self._frozen_config._trees_orig[
self._pkg(cpv, "binary", root_config))
def _load_vdb(self):
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
if self._dynamic_config._vdb_loaded:
for myroot in self._frozen_config.trees:
dynamic_deps = "dynamic_deps" in self._dynamic_config.myparams
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = \
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
if not dynamic_deps:
for pkg in vardb:
max_jobs = self._frozen_config.myopts.get("--jobs")
max_load = self._frozen_config.myopts.get("--load-average")
scheduler = TaskScheduler(
self._dynamic_config._vdb_loaded = True
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
for pkg in fake_vartree.dbapi:
ebuild_path, repo_path = \
portdb.findname2(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
fake_vartree.dynamic_deps_preload(pkg, None)
metadata, ebuild_hash = portdb._pull_valid_cache(
pkg.cpv, ebuild_path, repo_path)
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
proc = EbuildMetadataPhase(cpv=pkg.cpv,
portdb=portdb, repo_path=repo_path,
self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
class _dynamic_deps_proc_exit(object):
__slots__ = ('_pkg', '_fake_vartree')
def __init__(self, pkg, fake_vartree):
self._pkg = pkg
self._fake_vartree = fake_vartree
def __call__(self, proc):
metadata = None
if proc.returncode == os.EX_OK:
metadata = proc.metadata
self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
def _spinner_update(self):
if self._frozen_config.spinner:
def _compute_abi_rebuild_info(self):
Fill self._forced_rebuilds with packages that cause rebuilds.
debug = "--debug" in self._frozen_config.myopts
installed_sonames = self._dynamic_config._installed_sonames
package_tracker = self._dynamic_config._package_tracker
# Get all atoms that might have caused a forced rebuild.
atoms = {}
for s in self._dynamic_config._initial_arg_list:
if s.force_reinstall:
root = s.root_config.root
atoms.setdefault(root, set()).update(s.pset)
if debug:
writemsg_level("forced reinstall atoms:\n",
level=logging.DEBUG, noiselevel=-1)
for root in atoms:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for atom in atoms[root]:
writemsg_level(" atom: %s\n" % atom,
level=logging.DEBUG, noiselevel=-1)
level=logging.DEBUG, noiselevel=-1)
# Go through all slot operator deps and check if one of these deps
# has a parent that is matched by one of the atoms from above.
forced_rebuilds = {}
for root, rebuild_atoms in atoms.items():
for slot_atom in rebuild_atoms:
inst_pkg, reinst_pkg = \
self._select_pkg_from_installed(root, slot_atom)
if inst_pkg is reinst_pkg or reinst_pkg is None:
if (inst_pkg is not None and
inst_pkg.requires is not None):
for atom in inst_pkg.requires:
initial_providers = installed_sonames.get(
(root, atom))
if initial_providers is None:
final_provider = next(
package_tracker.match(root, atom),
if final_provider:
for provider in initial_providers:
# Find the replacement child.
child = next((pkg for pkg in
root, provider.slot_atom)
if not pkg.installed), None)
if child is None:
root, {}).setdefault(
child, set()).add(inst_pkg)
# Generate pseudo-deps for any slot-operator deps of
# inst_pkg. Its deps aren't in _slot_operator_deps
# because it hasn't been added to the graph, but we
# are interested in any rebuilds that it triggered.
built_slot_op_atoms = []
if inst_pkg is not None:
selected_atoms = self._select_atoms_probe(
inst_pkg.root, inst_pkg)
for atom in selected_atoms:
if atom.slot_operator_built:
if not built_slot_op_atoms:
# Use a cloned list, since we may append to it below.
deps = self._dynamic_config._slot_operator_deps.get(
(root, slot_atom), [])[:]
if built_slot_op_atoms and reinst_pkg is not None:
for child in self._dynamic_config.digraph.child_nodes(
if child.installed:
for atom in built_slot_op_atoms:
# NOTE: Since atom comes from inst_pkg, and
# reinst_pkg is the replacement parent, there's
# no guarantee that atom will completely match
# child. So, simply use atom.cp and atom.slot
# for matching.
if atom.cp != child.cp:
if atom.slot and atom.slot != child.slot:
deps.append(Dependency(atom=atom, child=child,
root=child.root, parent=reinst_pkg))
for dep in deps:
if dep.child.installed:
# Find the replacement child.
child = next((pkg for pkg in
dep.root, dep.child.slot_atom)
if not pkg.installed), None)
if child is None:
inst_child = dep.child
child = dep.child
inst_child = self._select_pkg_from_installed(
child.root, child.slot_atom)[0]
# Make sure the child's slot/subslot has changed. If it
# hasn't, then another child has forced this rebuild.
if inst_child and inst_child.slot == child.slot and \
inst_child.sub_slot == child.sub_slot:
if dep.parent.installed:
# Find the replacement parent.
parent = next((pkg for pkg in
dep.parent.root, dep.parent.slot_atom)
if not pkg.installed), None)
if parent is None:
parent = dep.parent
# The child has forced a rebuild of the parent
forced_rebuilds.setdefault(root, {}
).setdefault(child, set()).add(parent)
if debug:
writemsg_level("slot operator dependencies:\n",
level=logging.DEBUG, noiselevel=-1)
for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
writemsg_level(" (%s, %s)\n" % \
(root, slot_atom), level=logging.DEBUG, noiselevel=-1)
for dep in deps:
writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
level=logging.DEBUG, noiselevel=-1)
writemsg_level("forced rebuilds:\n",
level=logging.DEBUG, noiselevel=-1)
for root in forced_rebuilds:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for child in forced_rebuilds[root]:
writemsg_level(" child: %s\n" % child,
level=logging.DEBUG, noiselevel=-1)
for parent in forced_rebuilds[root][child]:
writemsg_level(" parent: %s\n" % parent,
level=logging.DEBUG, noiselevel=-1)
level=logging.DEBUG, noiselevel=-1)
self._forced_rebuilds = forced_rebuilds
def _show_abi_rebuild_info(self):
if not self._forced_rebuilds:
writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
for root in self._forced_rebuilds:
for child in self._forced_rebuilds[root]:
writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
for parent in self._forced_rebuilds[root][child]:
writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
def _eliminate_ignored_binaries(self):
Eliminate any package from self._dynamic_config.ignored_binaries
for which a more optimal alternative exists.
for pkg in list(self._dynamic_config.ignored_binaries):
for selected_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
if selected_pkg > pkg:
# NOTE: The Package.__ge__ implementation accounts for
# differences in build_time, so the warning about "ignored"
# packages will be triggered if both packages are the same
# version and selected_pkg is not the most recent build.
if (selected_pkg.type_name == "binary" and
selected_pkg >= pkg):
if selected_pkg.installed and \
selected_pkg.cpv == pkg.cpv and \
selected_pkg.build_time == pkg.build_time:
# We don't care about ignored binaries when an
# identical installed instance is selected to
# fill the slot.
def _ignored_binaries_autounmask_backtrack(self):
Check if there are ignored binaries that would have been
accepted with the current autounmask USE changes.
@rtype: bool
@return: True if there are unnecessary rebuilds that
can be avoided by backtracking
if not all([
return False
# _eliminate_ignored_binaries may have eliminated
# all of the ignored binaries
if not self._dynamic_config.ignored_binaries:
return False
use_changes = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
for pkg, (new_use, changes) in self._dynamic_config._needed_use_config_changes.items():
if pkg in self._dynamic_config.digraph:
use_changes[pkg.root][pkg.slot_atom] = (pkg, new_use)
for pkg in self._dynamic_config.ignored_binaries:
selected_pkg, new_use = use_changes[pkg.root].get(
pkg.slot_atom, (None, None))
if new_use is None:
if new_use != pkg.use.enabled:
if selected_pkg > pkg:
return True
return False
def _changed_deps_report(self):
Report ebuilds for which the ebuild dependencies have
changed since the installed instance was built. This is
completely silent in the following cases:
* --changed-deps or --dynamic-deps is enabled
* none of the packages with changed deps are in the graph
if (self._dynamic_config.myparams.get("changed_deps", "n") == "y" or
"dynamic_deps" in self._dynamic_config.myparams):
report_pkgs = []
for pkg, ebuild in self._dynamic_config._changed_deps_pkgs.items():
if pkg.repo != ebuild.repo:
report_pkgs.append((pkg, ebuild))
if not report_pkgs:
# TODO: Detect and report various issues:
# - packages with unsatisfiable dependencies
# - packages involved directly in slot or blocker conflicts
# - direct parents of conflict packages
# - packages that prevent upgrade of dependencies to latest versions
graph = self._dynamic_config.digraph
in_graph = False
for pkg, ebuild in report_pkgs:
if pkg in graph:
in_graph = True
# Packages with changed deps are harmless if they're not in the
# graph, so it's safe to silently ignore them. This suppresses
# noise for the unaffected user, even though some of the changed
# dependencies might be worthy of revision bumps.
if not in_graph:
writemsg("\n%s\n\n" % colorize("WARN",
"!!! Detected ebuild dependency change(s) without revision bump:"),
for pkg, ebuild in report_pkgs:
writemsg(" %s::%s" % (pkg.cpv, pkg.repo), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = []
if '--quiet' not in self._frozen_config.myopts:
"NOTE: Refer to the following page for more information about dependency",
" change(s) without revision bump:",
" In order to suppress reports about dependency changes, add",
" --changed-deps-report=n to the EMERGE_DEFAULT_OPTS variable in",
" '/etc/portage/make.conf'.",
# Include this message for --quiet mode, since the user may be experiencing
# problems that are solvable by using --changed-deps.
"HINT: In order to avoid problems involving changed dependencies, use the",
" --changed-deps option to automatically trigger rebuilds when changed",
" dependencies are detected. Refer to the emerge man page for more",
" information about this option.",
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries(self):
Show binaries that have been ignored because their USE didn't
match the user's config.
if not self._dynamic_config.ignored_binaries \
or '--quiet' in self._frozen_config.myopts:
ignored_binaries = {}
for pkg in self._dynamic_config.ignored_binaries:
for reason, info in self._dynamic_config.\
ignored_binaries.setdefault(reason, {})[pkg] = info
if self._dynamic_config.myparams.get(
"binpkg_respect_use") in ("y", "n"):
ignored_binaries.pop("respect_use", None)
if self._dynamic_config.myparams.get(
"binpkg_changed_deps") in ("y", "n"):
ignored_binaries.pop("changed_deps", None)
if not ignored_binaries:
if ignored_binaries.get("respect_use"):
if ignored_binaries.get("changed_deps"):
def _show_ignored_binaries_respect_use(self, respect_use):
writemsg("\n!!! The following binary packages have been ignored " + \
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in respect_use.items():
flag_display = []
for flag in sorted(flags):
if flag not in pkg.use.enabled:
flag = "-" + flag
flag_display = " ".join(flag_display)
# The user can paste this line into package.use
writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = [
"NOTE: The --binpkg-respect-use=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-respect-use=y will silence this warning."
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
writemsg("\n!!! The following binary packages have been "
"ignored due to changed dependencies:\n\n",
for pkg in changed_deps:
msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % pkg.root
writemsg("%s\n" % msg, noiselevel=-1)
msg = [
"NOTE: The --binpkg-changed-deps=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-changed-deps=y will silence this warning."
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
missed_update = True
any_selected = False
for chosen_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
any_selected = True
if chosen_pkg > pkg or (not chosen_pkg.installed and \
chosen_pkg.version == pkg.version):
missed_update = False
if any_selected and missed_update:
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
missed_updates[k] = (pkg, mask_type, parent_atoms)
return missed_updates
def _show_missed_update(self):
missed_updates = self._get_missed_updates()
if not missed_updates:
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
[]).append((pkg, parent_atoms))
if '--quiet' in self._frozen_config.myopts and \
'--debug' not in self._frozen_config.myopts:
missed_update_types.pop("slot conflict", None)
missed_update_types.pop("missing dependency", None)
missed_update_types.get("slot conflict"))
missed_update_types.get("missing dependency"))
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent,
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
writemsg("\n!!! The following update has been skipped " + \
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n\n", noiselevel=-1)
selected_pkg = next(self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom), None)
writemsg(" selected: %s\n" % (selected_pkg,), noiselevel=-1)
writemsg(" skipped: %s (see unsatisfied dependency below)\n"
% (pkg,), noiselevel=-1)
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
writemsg("\n", noiselevel=-1)
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
writemsg("\n!!! The following update(s) have been skipped " + \
"due to unsatisfied dependencies\n" + \
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
msg = []
msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
for pkg, parent_atoms in missed_updates:
if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append(" conflicts with\n")
for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
# Display the specific atom from SetArg or
# Package types.
atom, marker = format_unmatched_atom(
pkg, atom, self._pkg_use_enabled)
msg.append("%s required by %s\n" % (atom, parent))
writemsg("".join(msg), noiselevel=-1)
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
if not any(self._dynamic_config._package_tracker.slot_conflicts()):
if self._dynamic_config._slot_conflict_handler is None:
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
handler = self._dynamic_config._slot_conflict_handler
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
if "--quiet" in self._frozen_config.myopts:
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
backtrack_opt = self._frozen_config.myopts.get('--backtrack')
if not self._dynamic_config._allow_backtracking and \
(backtrack_opt is None or \
(backtrack_opt > 0 and backtrack_opt < 30)):
msg.append(" You may want to try a larger value of the ")
msg.append("--backtrack option, such as --backtrack=30, ")
msg.append("in order to see if that will solve this conflict ")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
def _solve_non_slot_operator_slot_conflicts(self):
This function solves slot conflicts which can
be solved by simply choosing one of the conflicting
and removing all the other ones.
It is able to solve somewhat more complex cases where
conflicts can only be solved simultaniously.
debug = "--debug" in self._frozen_config.myopts
# List all conflicts. Ignore those that involve slot operator rebuilds
# as the logic there needs special slot conflict behavior which isn't
# provided by this function.
conflicts = []
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
slot_key = conflict.root, conflict.atom
if slot_key not in self._dynamic_config._slot_operator_replace_installed:
if not conflicts:
if debug:
"\n!!! Slot conflict handler started.\n",
level=logging.DEBUG, noiselevel=-1)
# Get a set of all conflicting packages.
conflict_pkgs = set()
for conflict in conflicts:
# Get the list of other packages which are only
# required by conflict packages.
indirect_conflict_candidates = set()
for pkg in conflict_pkgs:
indirect_conflict_pkgs = set()
while indirect_conflict_candidates:
pkg = indirect_conflict_candidates.pop()
only_conflict_parents = True
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
only_conflict_parents = False
if not only_conflict_parents:
for child in self._dynamic_config.digraph.child_nodes(pkg):
if child in conflict_pkgs or child in indirect_conflict_pkgs:
# Create a graph containing the conflict packages
# and a special 'non_conflict_node' that represents
# all non-conflict packages.
conflict_graph = digraph()
non_conflict_node = "(non-conflict package)"
conflict_graph.add(non_conflict_node, None)
for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
conflict_graph.add(pkg, None)
# Add parent->child edges for each conflict package.
# Parents, which aren't conflict packages are represented
# by 'non_conflict_node'.
# If several conflicting packages are matched, but not all,
# add a tuple with the matched packages to the graph.
class or_tuple(tuple):
Helper class for debug printing.
def __str__(self):
return "(%s)" % ",".join(str(pkg) for pkg in self)
non_matching_forced = set()
for conflict in conflicts:
if debug:
writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1)
all_parent_atoms = set()
highest_pkg = None
inst_pkg = None
for pkg in conflict:
if pkg.installed:
inst_pkg = pkg
if highest_pkg is None or highest_pkg < pkg:
highest_pkg = pkg
self._dynamic_config._parent_atoms.get(pkg, []))
for parent, atom in all_parent_atoms:
is_arg_parent = (inst_pkg is not None and
not self._want_installed_pkg(inst_pkg))
is_non_conflict_parent = parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs
if debug:
writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent),
level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1)
if is_non_conflict_parent:
parent = non_conflict_node
matched = []
for pkg in conflict:
if (pkg is highest_pkg and
not highest_pkg.installed and
inst_pkg is not None and
inst_pkg.sub_slot != highest_pkg.sub_slot and
not self._downgrade_probe(highest_pkg)):
# If an upgrade is desired, force the highest
# version into the graph (bug #531656).
if atom.match(pkg.with_use(
self._pkg_use_enabled(pkg))) and \
not (is_arg_parent and pkg.installed):
if debug:
for match in matched:
writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1)
if len(matched) > 1:
# Even if all packages match, this parent must still
# be added to the conflict_graph. Otherwise, we risk
# removing all of these packages from the depgraph,
# which could cause a missed update (bug #522084).
conflict_graph.add(or_tuple(matched), parent)
elif len(matched) == 1:
conflict_graph.add(matched[0], parent)
# This typically means that autounmask broke a
# USE-dep, but it could also be due to the slot
# not matching due to multislot (bug #220341).
# Either way, don't try to solve this conflict.
# Instead, force them all into the graph so that
# they are protected from removal.
if debug:
for pkg in conflict:
writemsg_level(" non-match: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
for pkg in indirect_conflict_pkgs:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs:
parent = non_conflict_node
conflict_graph.add(pkg, parent)
if debug:
"\n!!! Slot conflict graph:\n",
level=logging.DEBUG, noiselevel=-1)
# Now select required packages. Collect them in the
# 'forced' set.
forced = set([non_conflict_node])
unexplored = set([non_conflict_node])
# or_tuples get special handling. We first explore
# all packages in the hope of having forced one of
# the packages in the tuple. This way we don't have
# to choose one.
unexplored_tuples = set()
explored_nodes = set()
while unexplored:
while True:
node = unexplored.pop()
except KeyError:
for child in conflict_graph.child_nodes(node):
# Don't explore a node more than once, in order
# to avoid infinite recursion. The forced set
# cannot be used for this purpose, since it can
# contain unexplored nodes from non_matching_forced.
if child in explored_nodes:
if isinstance(child, Package):
# Now handle unexplored or_tuples. Move on with packages
# once we had to choose one.
while unexplored_tuples:
nodes = unexplored_tuples.pop()
if any(node in forced for node in nodes):
# At least one of the packages in the
# tuple is already forced, which means the
# dependency represented by this tuple
# is satisfied.
# We now have to choose one of packages in the tuple.
# In theory one could solve more conflicts if we'd be
# able to try different choices here, but that has lots
# of other problems. For now choose the package that was
# pulled first, as this should be the most desirable choice
# (otherwise it wouldn't have been the first one).
# Remove 'non_conflict_node' and or_tuples from 'forced'.
forced = set(pkg for pkg in forced if isinstance(pkg, Package))
# Add dependendencies of forced packages.
stack = list(forced)
traversed = set()
while stack:
pkg = stack.pop()
for child in conflict_graph.child_nodes(pkg):
if (isinstance(child, Package) and
child not in traversed):
non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
if debug:
"\n!!! Slot conflict solution:\n",
level=logging.DEBUG, noiselevel=-1)
for conflict in conflicts:
" Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
if pkg in forced:
" keep: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
" remove: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
broken_packages = set()
for pkg in non_forced:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if isinstance(parent, Package) and parent not in non_forced:
# Non-forcing set args are expected to be a parent of all
# packages in the conflict.
# Process the dependencies of choosen conflict packages
# again to properly account for blockers.
# Filter out broken packages which have been removed during
# recursive removal in self._remove_pkg.
broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
if self._dynamic_config._package_tracker.contains(pkg, installed=False))
if broken_packages:
# Process dependencies. This cannot fail because we just ensured that
# the remaining packages satisfy all dependencies.
# Record missed updates.
for conflict in conflicts:
if not any(pkg in non_forced for pkg in conflict):
for pkg in conflict:
if pkg not in non_forced:
for other in conflict:
if other is pkg:
for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
if not atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
"slot conflict", set())
self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
(parent, atom))
def _process_slot_conflicts(self):
If there are any slot conflicts and backtracking is enabled,
_complete_graph should complete the graph before this method
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
if not self._validate_blockers():
# Blockers don't trigger the _skip_restart flag, since
# backtracking may solve blockers when it solves slot
# conflicts (or by blind luck).
raise self._unknown_internal_error()
# Both _process_slot_conflict and _slot_operator_trigger_reinstalls
# can call _slot_operator_update_probe, which requires that
# self._dynamic_config._blocked_pkgs has been initialized by a
# call to the _validate_blockers method.
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
if self._dynamic_config._allow_backtracking:
def _process_slot_conflict(self, conflict):
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
root = conflict.root
slot_atom = conflict.atom
slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
slot_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
conflict_pkgs = []
conflict_atoms = {}
for pkg in slot_nodes:
if self._dynamic_config._allow_backtracking and \
pkg in self._dynamic_config._runtime_pkg_mask:
if debug:
"!!! backtracking loop detected: %s %s\n" % \
level=logging.DEBUG, noiselevel=-1)
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
all_match = True
for parent_atom in slot_parent_atoms:
if parent_atom in parent_atoms:
parent, atom = parent_atom
if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
all_match = False
conflict_atoms.setdefault(parent_atom, set()).add(pkg)
if not all_match:
if conflict_pkgs and \
self._dynamic_config._allow_backtracking and \
not self._accept_blocker_conflicts():
remaining = []
for pkg in conflict_pkgs:
if self._slot_conflict_backtrack_abi(pkg,
slot_nodes, conflict_atoms):
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config.setdefault("slot_conflict_abi", set()).add(pkg)
if remaining:
self._slot_confict_backtrack(root, slot_atom,
slot_parent_atoms, remaining)
def _slot_confict_backtrack(self, root, slot_atom,
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
existing_node = next(self._dynamic_config._package_tracker.match(
root, slot_atom, installed=False))
# In order to avoid a missed update, first mask lower versions
# that conflict with higher versions (the backtracker visits
# these in reverse order).
backtrack_data = []
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
# match to_be_masked.
parent_atoms = \
self._dynamic_config._parent_atoms.get(to_be_masked, set())
conflict_atoms = set(parent_atom for parent_atom in all_parents \
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
to_be_masked = backtrack_data[-1][0]
"slot conflict", []).append(backtrack_data)
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("backtracking due to slot conflict:")
msg.append(" first package: %s" % existing_node)
msg.append(" package to mask: %s" % to_be_masked)
msg.append(" slot: %s" % slot_atom)
msg.append(" parents: %s" % ", ".join( \
"(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
if not isinstance(parent, Package):
if not parent.built:
if not atom.soname and not (
atom.package and atom.slot_operator_built):
for other_pkg in slot_nodes:
if other_pkg in conflict_pkgs:
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
new_dep = \
if new_dep is not None:
found_update = True
return found_update
def _slot_change_probe(self, dep):
@rtype: bool
@return: True if dep.child should be rebuilt due to a change
in sub-slot (without revbump, as in bug #456208).
if not (isinstance(dep.parent, Package) and \
not dep.parent.built and dep.child.built):
return None
root_config = self._frozen_config.roots[dep.root]
matches = []
matches.append(self._pkg(dep.child.cpv, "ebuild",
root_config, myrepo=dep.child.repo))
except PackageNotFound:
for unbuilt_child in chain(matches,
self._iter_match_pkgs(root_config, "ebuild",
Atom("=%s" % (dep.child.cpv,)))):
if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
if self._frozen_config.excluded_pkgs.findAtomForPackage(
if not self._pkg_visibility_check(unbuilt_child):
return None
if unbuilt_child.slot == dep.child.slot and \
unbuilt_child.sub_slot == dep.child.sub_slot:
return None
return unbuilt_child
def _slot_change_backtrack(self, dep, new_child_slot):
child = dep.child
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("backtracking due to slot/sub-slot change:")
msg.append(" child package: %s" % child)
msg.append(" child slot: %s/%s" %
(child.slot, child.sub_slot))
msg.append(" new child: %s" % new_child_slot)
msg.append(" new child slot: %s/%s" %
(new_child_slot.slot, new_child_slot.sub_slot))
msg.append(" parent package: %s" % dep.parent)
msg.append(" atom: %s" % dep.atom)
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not child.installed:
masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
reinstalls.add((child.root, replacement_atom))
if reinstalls:
self._dynamic_config._need_restart = True
def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
if new_child_slot is None:
child = dep.child
child = new_child_slot
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("backtracking due to missed slot abi update:")
msg.append(" child package: %s" % child)
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
if new_dep is not None:
msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
abi_masks = {}
if new_child_slot is None:
if not child.installed:
abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
if new_dep is not None:
replacement_atom = new_dep.parent.slot_atom
replacement_atom = self._replace_installed_atom(dep.parent)
if replacement_atom is not None:
abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
self._dynamic_config._need_restart = True
def _slot_operator_update_probe_slot_conflict(self, dep):
new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
if new_dep is not None:
return new_dep
if self._dynamic_config._autounmask is True:
for autounmask_level in self._autounmask_levels():
new_dep = self._slot_operator_update_probe(dep,
slot_conflict=True, autounmask_level=autounmask_level)
if new_dep is not None:
return new_dep
return None
def _slot_operator_update_probe(self, dep, new_child_slot=False,
slot_conflict=False, autounmask_level=None):
slot/sub-slot := operators tend to prevent updates from getting pulled in,
since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
to higher versions, since the logic required to detect when a
downgrade would be desirable is not implemented.
if dep.child.installed and \
return None
if dep.parent.installed and \
return None
debug = "--debug" in self._frozen_config.myopts
selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
want_downgrade_parent = None
def check_reverse_dependencies(existing_pkg, candidate_pkg,
Check if candidate_pkg satisfies all of existing_pkg's non-
slot operator parents.
built_slot_operator_parents = set()
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if atom.soname or atom.slot_operator_built:
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if isinstance(parent, Package):
if parent in built_slot_operator_parents:
# This parent may need to be rebuilt, so its
# dependencies aren't necessarily relevant.
if replacement_parent is not None and \
(replacement_parent.slot_atom == parent.slot_atom
or replacement_parent.cpv == parent.cpv):
# This parent is irrelevant because we intend to
# replace it with replacement_parent.
if any(pkg is not parent and
(pkg.slot_atom == parent.slot_atom or
pkg.cpv == parent.cpv) for pkg in
parent.root, Atom(parent.cp))):
# This parent may need to be eliminated due to a
# slot conflict, so its dependencies aren't
# necessarily relevant.
if (not self._too_deep(parent.depth) and
not self._frozen_config.excluded_pkgs.
# Check for common reasons that the parent's
# dependency might be irrelevant.
if self._upgrade_available(parent):
# This parent could be replaced by
# an upgrade (bug 584626).
if parent.installed and self._in_blocker_conflict(parent):
# This parent could be uninstalled in order
# to solve a blocker conflict (bug 612772).
if self._dynamic_config.digraph.has_edge(parent,
# There is a direct circular dependency between
# parent and existing_pkg. This type of
# relationship tends to prevent updates
# of packages (bug 612874). Since candidate_pkg
# is available, we risk a missed update if we
# don't try to eliminate this parent from the
# graph. Therefore, we give candidate_pkg a
# chance, and assume that it will be masked
# by backtracking if necessary.
atom_set = InternalPackageSet(initial_atoms=(atom,),
if not atom_set.findAtomForPackage(candidate_pkg,
return False
return True
for replacement_parent in self._iter_similar_available(dep.parent,
dep.parent.slot_atom, autounmask_level=autounmask_level):
if replacement_parent is dep.parent:
if replacement_parent < dep.parent:
if want_downgrade_parent is None:
want_downgrade_parent = self._downgrade_probe(
if not want_downgrade_parent:
if not check_reverse_dependencies(dep.parent, replacement_parent):
selected_atoms = None
atoms = self._flatten_atoms(replacement_parent,
except InvalidDependString:
if replacement_parent.requires is not None:
atoms = list(atoms)
# List of list of child,atom pairs for each atom.
replacement_candidates = []
# Set of all packages all atoms can agree on.
all_candidate_pkgs = None
for atom in atoms:
# The _select_atoms_probe method is expensive, so initialization
# of this variable is only performed on demand.
atom_not_selected = None
if not atom.package:
unevaluated_atom = None
if atom.match(dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
if atom.blocker or \
atom.cp != dep.child.cp:
# Discard USE deps, we're only searching for an
# approximate pattern, and dealing with USE states
# is too complex for this purpose.
unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
portage.dep._match_slot(atom, dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
candidate_pkg_atoms = []
candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if (dep.atom.package and
pkg.slot == dep.child.slot and
pkg.sub_slot == dep.child.sub_slot):
# If slot/sub-slot is identical, then there's
# no point in updating.
if new_child_slot:
if pkg.slot == dep.child.slot:
if pkg < dep.child:
# the new slot only matters if the
# package version is higher
if pkg.slot != dep.child.slot:
if pkg < dep.child:
if want_downgrade is None:
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
# different slot_operator is an older version
if not want_downgrade:
if pkg.version == dep.child.version and not dep.child.built:
insignificant = False
if not slot_conflict and \
selective and \
dep.parent.installed and \
dep.child.installed and \
dep.parent >= replacement_parent and \
dep.child.cpv == pkg.cpv:
# Then can happen if the child's sub-slot changed
# without a revision bump. The sub-slot change is
# considered insignificant until one of its parent
# packages needs to be rebuilt (which may trigger a
# slot conflict).
insignificant = True
if (not insignificant and
unevaluated_atom is not None):
# Evaluate USE conditionals and || deps, in order
# to see if this atom is really desirable, since
# otherwise we may trigger an undesirable rebuild
# as in bug #460304.
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
if not insignificant and \
check_reverse_dependencies(dep.child, pkg,
(pkg, unevaluated_atom or atom))
# When unevaluated_atom is None, it means that atom is
# an soname atom which is unconditionally selected, and
# _select_atoms_probe is not applicable.
if atom_not_selected is None and unevaluated_atom is not None:
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
if all_candidate_pkgs is None:
all_candidate_pkgs = set(candidate_pkgs)
if not all_candidate_pkgs:
# If the atoms that connect parent and child can't agree on
# any replacement child, we can't do anything.
# Now select one of the pkgs as replacement. This is as easy as
# selecting the highest version.
# The more complicated part is to choose an atom for the
# new Dependency object. Choose the one which ranked the selected
# parent highest.
selected = None
for candidate_pkg_atoms in replacement_candidates:
for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
if pkg not in all_candidate_pkgs:
if selected is None or \
selected[0] < pkg or \
(selected[0] is pkg and i < selected[2]):
selected = (pkg, atom, i)
if debug:
msg = []
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % selected[0])
msg.append(" new parent package: %s" % replacement_parent)
noiselevel=-1, level=logging.DEBUG)
return Dependency(parent=replacement_parent,
child=selected[0], atom=selected[1])
if debug:
msg = []
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % None)
msg.append(" new parent package: %s" % None)
noiselevel=-1, level=logging.DEBUG)
return None
def _slot_operator_unsatisfied_probe(self, dep):
if dep.parent.installed and \
return False
debug = "--debug" in self._frozen_config.myopts
for replacement_parent in self._iter_similar_available(dep.parent,
for atom in replacement_parent.validated_atoms:
if not atom.slot_operator == "=" or \
atom.blocker or \
atom.cp != dep.atom.cp:
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
atom = atom.without_use
pkg, existing_node = self._select_package(dep.root, atom,
if pkg is not None:
if debug:
msg = []
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
msg.append(" new child package: %s" % pkg)
noiselevel=-1, level=logging.DEBUG)
return True
if debug:
msg = []
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
msg.append(" new child package: %s" % None)
noiselevel=-1, level=logging.DEBUG)
return False
def _slot_operator_unsatisfied_backtrack(self, dep):
parent = dep.parent
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("backtracking due to unsatisfied "
"built slot-operator dep:")
msg.append(" parent package: %s" % parent)
msg.append(" atom: %s" % dep.atom)
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not parent.installed:
masks.setdefault(parent, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if parent.installed:
replacement_atom = self._replace_installed_atom(parent)
if replacement_atom is not None:
reinstalls.add((parent.root, replacement_atom))
if reinstalls:
self._dynamic_config._need_restart = True
def _in_blocker_conflict(self, pkg):
Check if pkg is involved in a blocker conflict. This method
only works after the _validate_blockers method has been called.
if (self._dynamic_config._blocked_pkgs is None
and not self._validate_blockers()):
raise self._unknown_internal_error()
if pkg in self._dynamic_config._blocked_pkgs:
return True
if pkg in self._dynamic_config._blocker_parents:
return True
return False
def _upgrade_available(self, pkg):
Detect cases where an upgrade of the given package is available
within the same slot.
for available_pkg in self._iter_similar_available(pkg,
if available_pkg > pkg:
return True
return False
def _downgrade_probe(self, pkg):
Detect cases where a downgrade of the given package is considered
desirable due to the current version being masked or unavailable.
available_pkg = None
for available_pkg in self._iter_similar_available(pkg,
if available_pkg >= pkg:
# There's an available package of the same or higher
# version, so downgrade seems undesirable.
return False
return available_pkg is not None
def _select_atoms_probe(self, root, pkg):
selected_atoms = []
use = self._pkg_use_enabled(pkg)
for k in pkg._dep_keys:
v = pkg._metadata.get(k)
if not v:
root, v, myuse=use, parent=pkg)[pkg])
return frozenset(x.unevaluated_atom for
x in selected_atoms)
def _flatten_atoms(self, pkg, use):
Evaluate all dependency atoms of the given package, and return
them as a frozenset. For performance, results are cached.
@param pkg: a Package instance
@type pkg: Package
@param pkg: set of enabled USE flags
@type pkg: frozenset
@rtype: frozenset
@return: set of evaluated atoms
cache_key = (pkg, use)
return self._dynamic_config._flatten_atoms_cache[cache_key]
except KeyError:
atoms = []
for dep_key in pkg._dep_keys:
dep_string = pkg._metadata[dep_key]
if not dep_string:
dep_string = portage.dep.use_reduce(
dep_string, uselist=use,
flat=True, token_class=Atom, eapi=pkg.eapi)
atoms.extend(token for token in dep_string
if isinstance(token, Atom))
atoms = frozenset(atoms)
self._dynamic_config._flatten_atoms_cache[cache_key] = atoms
return atoms
def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
graph_pkg itself may be yielded only if it's not installed.
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
for pkg in self._iter_match_pkgs_any(
graph_pkg.root_config, atom):
if pkg.cp != graph_pkg.cp:
# discard old-style virtual match
if pkg.installed:
if pkg in self._dynamic_config._runtime_pkg_mask:
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
if pkg.built:
if self._equiv_binary_installed(pkg):
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
not self._equiv_ebuild_visible(pkg,
if not self._pkg_visibility_check(pkg,
yield pkg
def _replace_installed_atom(self, inst_pkg):
Given an installed package, generate an atom suitable for
slot_operator_replace_installed backtracking info. The replacement
SLOT may differ from the installed SLOT, so first search by cpv.
built_pkgs = []
for pkg in self._iter_similar_available(inst_pkg,
Atom("=%s" % inst_pkg.cpv)):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
if built_pkgs:
best_version = None
for pkg in built_pkgs:
if best_version is None or pkg > best_version:
best_version = pkg
return best_version.slot_atom
return None
def _slot_operator_trigger_reinstalls(self):
Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
rebuild_if_new_slot = self._dynamic_config.myparams.get(
"rebuild_if_new_slot", "y") == "y"
for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
atom = dep.atom
if not (atom.soname or atom.slot_operator_built):
new_child_slot = self._slot_change_probe(dep)
if new_child_slot is not None:
self._slot_change_backtrack(dep, new_child_slot)
if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
# If the parent is not installed, check if it needs to be
# rebuilt against an installed instance, since otherwise
# it could trigger downgrade of an installed instance as
# in bug #652938.
want_update_probe = dep.want_update or not dep.parent.installed
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
if rebuild_if_new_slot and want_update_probe:
new_dep = self._slot_operator_update_probe(dep,
if new_dep is not None:
if want_update_probe:
if self._slot_operator_update_probe(dep):
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
# binpkg_respect_use: Behave like newuse by default. If newuse is
# False and changed_use is True, then behave like changed_use.
binpkg_respect_use = (pkg.built and
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
feature_flags = _get_feature_flags(
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
if flags:
return flags
elif changed_use or binpkg_respect_use:
flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
if flags:
return flags
return None
def _changed_deps(self, pkg):
ebuild = None
ebuild = self._pkg(pkg.cpv, "ebuild",
pkg.root_config, myrepo=pkg.repo)
except PackageNotFound:
# Use first available instance of the same version.
for ebuild in self._iter_match_pkgs(
pkg.root_config, "ebuild", Atom("=" + pkg.cpv)):
if ebuild is None:
changed = False
if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
depvars = Package._dep_keys
depvars = Package._runtime_keys
# Use _raw_metadata, in order to avoid interaction
# with --dynamic-deps.
built_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
pkg._raw_metadata[k], uselist=pkg.</