blob: cb12b0534ef390b4d3719ef14435eff3469addb1 [file] [log] [blame]
# Copyright 1999-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import collections
import errno
import io
import logging
import stat
import sys
import textwrap
import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi.DummyTree import DummyTree
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
from portage.dep._slot_operator import (ignore_built_slot_operator_deps,
strip_slots)
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
_get_eapi_attrs
from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.localization import _
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ConfigProtect, shlex_split, new_protect_filename
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
from portage.util._async.TaskScheduler import TaskScheduler
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
is_valid_package_atom
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
long = int
_unicode = str
else:
_unicode = unicode
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
try:
x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
pkgs.add(x)
return pkgs
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, params, spinner):
self.settings = settings
self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.requested_depth = params.get("deep", 0)
self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
# We can't know that an soname dep is unsatisfied if there are
# any unbuilt ebuilds in the graph, since unbuilt ebuilds have
# no soname data. Therefore, only enable soname dependency
# resolution if --usepkgonly is enabled, or for removal actions.
self.soname_deps_enabled = (
("--usepkgonly" in myopts or "remove" in params) and
params.get("ignore_soname_deps") != "y")
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
ignore_built_slot_operator_deps = myopts.get(
"--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
ignore_built_slot_operator_deps=ignore_built_slot_operator_deps,
soname_deps=self.soname_deps_enabled)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
if self.soname_deps_enabled and "remove" not in params:
self.trees[myroot]["bintree"] = DummyTree(
DbapiProvidesIndex(trees[myroot]["bintree"].dbapi))
self._required_set_names = set(["world"])
atoms = ' '.join(myopts.get("--exclude", [])).split()
self.excluded_pkgs = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
self.reinstall_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
self.usepkg_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
self.useoldpkg_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
self.rebuild_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
self.rebuild_ignore = _wildcard_set(atoms)
self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
class _depgraph_sets(object):
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _rebuild_config(object):
def __init__(self, frozen_config, backtrack_parameters):
self._graph = digraph()
self._frozen_config = frozen_config
self.rebuild_list = backtrack_parameters.rebuild_list.copy()
self.orig_rebuild_list = self.rebuild_list.copy()
self.reinstall_list = backtrack_parameters.reinstall_list.copy()
self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
self.rebuild_if_unbuilt)
def add(self, dep_pkg, dep):
parent = dep.collapsed_parent
priority = dep.collapsed_priority
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
self._graph.add(dep_pkg, parent, priority)
def _needs_rebuild(self, dep_pkg):
"""Check whether packages that depend on dep_pkg need to be rebuilt."""
dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
return False
if self.rebuild_if_unbuilt:
# dep_pkg is being installed from source, so binary
# packages for parents are invalid. Force rebuild
return True
trees = self._frozen_config.trees
vardb = trees[dep_pkg.root]["vartree"].dbapi
if self.rebuild_if_new_rev:
# Parent packages are valid if a package with the same
# cpv is already installed.
return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
# Otherwise, parent packages are valid if a package with the same
# version (excluding revision) is already installed.
assert self.rebuild_if_new_ver
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for inst_cpv in vardb.match(dep_pkg.slot_atom):
inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
if inst_cpv_norev == cpv_norev:
return False
return True
def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
reinstall = False
for slot_atom, dep_pkg in build_deps.items():
dep_root_slot = (dep_pkg.root, slot_atom)
if self._needs_rebuild(dep_pkg):
self.rebuild_list.add(root_slot)
return True
elif ("--usepkg" in self._frozen_config.myopts and
(dep_root_slot in self.reinstall_list or
dep_root_slot in self.rebuild_list or
not dep_pkg.installed)):
# A direct rebuild dependency is being installed. We
# should update the parent as well to the latest binary,
# if that binary is valid.
#
# To validate the binary, we check whether all of the
# rebuild dependencies are present on the same binhost.
#
# 1) If parent is present on the binhost, but one of its
# rebuild dependencies is not, then the parent should
# be rebuilt from source.
# 2) Otherwise, the parent binary is assumed to be valid,
# because all of its rebuild dependencies are
# consistent.
bintree = trees[parent.root]["bintree"]
uri = bintree.get_pkgindex_uri(parent.cpv)
dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
bindb = bintree.dbapi
if self.rebuild_if_new_ver and uri and uri != dep_uri:
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for cpv in bindb.match(dep_pkg.slot_atom):
if cpv_norev == catpkgsplit(cpv)[:-1]:
dep_uri = bintree.get_pkgindex_uri(cpv)
if uri == dep_uri:
break
if uri and uri != dep_uri:
# 1) Remote binary package is invalid because it was
# built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
def trigger_rebuilds(self):
"""
Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
depends on pkgA at both build-time and run-time, pkgB needs to be
rebuilt.
"""
need_restart = False
graph = self._graph
build_deps = {}
leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
# We'll have to drop an edge. This should be quite rare.
leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
# This can be triggered by circular dependencies.
continue
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
# have been processed, the build_deps will be completely filled in,
# and self.rebuild_list / self.reinstall_list will tell us whether
# any of our children need to be rebuilt or reinstalled.
if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
class _dynamic_depgraph_config(object):
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
self._blocked_pkgs = digraph()
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._highest_pkg_cache_cp_map = {}
self._flatten_atoms_cache = {}
# Binary packages that have been rejected because their USE
# didn't match the user's config. It maps packages to a set
# of flags causing the rejection.
self.ignored_binaries = {}
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
self._need_config_reload = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._displayed_autounmask = False
self._success_without_autounmask = False
self._required_use_unsatisfied = False
self._traverse_ignored_deps = False
self._complete_mode = False
self._slot_operator_deps = {}
self._installed_sonames = collections.defaultdict(list)
self._package_tracker = PackageTracker(
soname_deps=depgraph._frozen_config.soname_deps_enabled)
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
else:
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(depgraph._frozen_config._trees_orig[myroot
]["vartree"].dbapi._aux_cache_keys)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph(object):
# Represents the depth of a node that is unreachable from explicit
# user arguments (or their deep dependencies). Such nodes are pulled
# in by the _complete_graph method.
_UNREACHABLE_DEPTH = object()
pkg_tree_map = RootConfig.pkg_tree_map
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, backtrack_parameters)
self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
self._event_loop = (portage._internal_caller and
global_event_loop() or EventLoop(main=False))
self._select_atoms_parent = None
self.query = UserQuery(myopts).query
def _index_binpkgs(self):
for root in self._frozen_config.trees:
bindb = self._frozen_config.trees[root]["bintree"].dbapi
if bindb._provides_index:
# don't repeat this when backtracking
continue
root_config = self._frozen_config.roots[root]
for cpv in self._frozen_config._trees_orig[
root]["bintree"].dbapi.cpv_all():
bindb._provides_inject(
self._pkg(cpv, "binary", root_config))
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
dynamic_deps = self._dynamic_config.myparams.get(
"dynamic_deps", "y") != "n"
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = \
portage.config(clone=fake_vartree.settings)
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
if not dynamic_deps:
for pkg in vardb:
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
else:
max_jobs = self._frozen_config.myopts.get("--jobs")
max_load = self._frozen_config.myopts.get("--load-average")
scheduler = TaskScheduler(
self._dynamic_deps_preload(fake_vartree),
max_jobs=max_jobs,
max_load=max_load,
event_loop=fake_vartree._portdb._event_loop)
scheduler.start()
scheduler.wait()
self._dynamic_config._vdb_loaded = True
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
for pkg in fake_vartree.dbapi:
self._spinner_update()
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
ebuild_path, repo_path = \
portdb.findname2(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
fake_vartree.dynamic_deps_preload(pkg, None)
continue
metadata, ebuild_hash = portdb._pull_valid_cache(
pkg.cpv, ebuild_path, repo_path)
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
else:
proc = EbuildMetadataPhase(cpv=pkg.cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
settings=portdb.doebuild_settings)
proc.addExitListener(
self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
class _dynamic_deps_proc_exit(object):
__slots__ = ('_pkg', '_fake_vartree')
def __init__(self, pkg, fake_vartree):
self._pkg = pkg
self._fake_vartree = fake_vartree
def __call__(self, proc):
metadata = None
if proc.returncode == os.EX_OK:
metadata = proc.metadata
self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _compute_abi_rebuild_info(self):
"""
Fill self._forced_rebuilds with packages that cause rebuilds.
"""
debug = "--debug" in self._frozen_config.myopts
installed_sonames = self._dynamic_config._installed_sonames
package_tracker = self._dynamic_config._package_tracker
# Get all atoms that might have caused a forced rebuild.
atoms = {}
for s in self._dynamic_config._initial_arg_list:
if s.force_reinstall:
root = s.root_config.root
atoms.setdefault(root, set()).update(s.pset)
if debug:
writemsg_level("forced reinstall atoms:\n",
level=logging.DEBUG, noiselevel=-1)
for root in atoms:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for atom in atoms[root]:
writemsg_level(" atom: %s\n" % atom,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
# Go through all slot operator deps and check if one of these deps
# has a parent that is matched by one of the atoms from above.
forced_rebuilds = {}
for root, rebuild_atoms in atoms.items():
for slot_atom in rebuild_atoms:
inst_pkg, reinst_pkg = \
self._select_pkg_from_installed(root, slot_atom)
if inst_pkg is reinst_pkg or reinst_pkg is None:
continue
if (inst_pkg is not None and
inst_pkg.requires is not None):
for atom in inst_pkg.requires:
initial_providers = installed_sonames.get(
(root, atom))
if initial_providers is None:
continue
final_provider = next(
package_tracker.match(root, atom),
None)
if final_provider:
continue
for provider in initial_providers:
# Find the replacement child.
child = next((pkg for pkg in
package_tracker.match(
root, provider.slot_atom)
if not pkg.installed), None)
if child is None:
continue
forced_rebuilds.setdefault(
root, {}).setdefault(
child, set()).add(inst_pkg)
# Generate pseudo-deps for any slot-operator deps of
# inst_pkg. Its deps aren't in _slot_operator_deps
# because it hasn't been added to the graph, but we
# are interested in any rebuilds that it triggered.
built_slot_op_atoms = []
if inst_pkg is not None:
selected_atoms = self._select_atoms_probe(
inst_pkg.root, inst_pkg)
for atom in selected_atoms:
if atom.slot_operator_built:
built_slot_op_atoms.append(atom)
if not built_slot_op_atoms:
continue
# Use a cloned list, since we may append to it below.
deps = self._dynamic_config._slot_operator_deps.get(
(root, slot_atom), [])[:]
if built_slot_op_atoms and reinst_pkg is not None:
for child in self._dynamic_config.digraph.child_nodes(
reinst_pkg):
if child.installed:
continue
for atom in built_slot_op_atoms:
# NOTE: Since atom comes from inst_pkg, and
# reinst_pkg is the replacement parent, there's
# no guarantee that atom will completely match
# child. So, simply use atom.cp and atom.slot
# for matching.
if atom.cp != child.cp:
continue
if atom.slot and atom.slot != child.slot:
continue
deps.append(Dependency(atom=atom, child=child,
root=child.root, parent=reinst_pkg))
for dep in deps:
if dep.child.installed:
# Find the replacement child.
child = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.root, dep.child.slot_atom)
if not pkg.installed), None)
if child is None:
continue
inst_child = dep.child
else:
child = dep.child
inst_child = self._select_pkg_from_installed(
child.root, child.slot_atom)[0]
# Make sure the child's slot/subslot has changed. If it
# hasn't, then another child has forced this rebuild.
if inst_child and inst_child.slot == child.slot and \
inst_child.sub_slot == child.sub_slot:
continue
if dep.parent.installed:
# Find the replacement parent.
parent = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.parent.root, dep.parent.slot_atom)
if not pkg.installed), None)
if parent is None:
continue
else:
parent = dep.parent
# The child has forced a rebuild of the parent
forced_rebuilds.setdefault(root, {}
).setdefault(child, set()).add(parent)
if debug:
writemsg_level("slot operator dependencies:\n",
level=logging.DEBUG, noiselevel=-1)
for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
writemsg_level(" (%s, %s)\n" % \
(root, slot_atom), level=logging.DEBUG, noiselevel=-1)
for dep in deps:
writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
writemsg_level("forced rebuilds:\n",
level=logging.DEBUG, noiselevel=-1)
for root in forced_rebuilds:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for child in forced_rebuilds[root]:
writemsg_level(" child: %s\n" % child,
level=logging.DEBUG, noiselevel=-1)
for parent in forced_rebuilds[root][child]:
writemsg_level(" parent: %s\n" % parent,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
self._forced_rebuilds = forced_rebuilds
def _show_abi_rebuild_info(self):
if not self._forced_rebuilds:
return
writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
for root in self._forced_rebuilds:
for child in self._forced_rebuilds[root]:
writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
for parent in self._forced_rebuilds[root][child]:
writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
match the user's config.
"""
if not self._dynamic_config.ignored_binaries \
or '--quiet' in self._frozen_config.myopts:
return
ignored_binaries = {}
for pkg in list(self._dynamic_config.ignored_binaries):
for selected_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
if selected_pkg > pkg:
self._dynamic_config.ignored_binaries.pop(pkg)
break
if selected_pkg.installed and \
selected_pkg.cpv == pkg.cpv and \
selected_pkg.build_time == pkg.build_time:
# We don't care about ignored binaries when an
# identical installed instance is selected to
# fill the slot.
self._dynamic_config.ignored_binaries.pop(pkg)
break
else:
for reason, info in self._dynamic_config.\
ignored_binaries[pkg].items():
ignored_binaries.setdefault(reason, {})[pkg] = info
if self._dynamic_config.myparams.get(
"binpkg_respect_use") in ("y", "n"):
ignored_binaries.pop("respect_use", None)
if self._dynamic_config.myparams.get(
"binpkg_changed_deps") in ("y", "n"):
ignored_binaries.pop("changed_deps", None)
if not ignored_binaries:
return
self._show_merge_list()
if ignored_binaries.get("respect_use"):
self._show_ignored_binaries_respect_use(
ignored_binaries["respect_use"])
if ignored_binaries.get("changed_deps"):
self._show_ignored_binaries_changed_deps(
ignored_binaries["changed_deps"])
def _show_ignored_binaries_respect_use(self, respect_use):
writemsg("\n!!! The following binary packages have been ignored " + \
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in respect_use.items():
flag_display = []
for flag in sorted(flags):
if flag not in pkg.use.enabled:
flag = "-" + flag
flag_display.append(flag)
flag_display = " ".join(flag_display)
# The user can paste this line into package.use
writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-respect-use=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-respect-use=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
writemsg("\n!!! The following binary packages have been "
"ignored due to changed dependencies:\n\n",
noiselevel=-1)
for pkg in changed_deps:
msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % pkg.root
writemsg("%s\n" % msg, noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-changed-deps=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-changed-deps=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
chain(self._dynamic_config._runtime_pkg_mask.items(),
self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
missed_update = True
any_selected = False
for chosen_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
any_selected = True
if chosen_pkg > pkg or (not chosen_pkg.installed and \
chosen_pkg.version == pkg.version):
missed_update = False
break
if any_selected and missed_update:
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
return missed_updates
def _show_missed_update(self):
missed_updates = self._get_missed_updates()
if not missed_updates:
return
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type,
[]).append((pkg, parent_atoms))
if '--quiet' in self._frozen_config.myopts and \
'--debug' not in self._frozen_config.myopts:
missed_update_types.pop("slot conflict", None)
missed_update_types.pop("missing dependency", None)
self._show_missed_update_slot_conflicts(
missed_update_types.get("slot conflict"))
self._show_missed_update_unsatisfied_dep(
missed_update_types.get("missing dependency"))
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
try:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent,
check_backtrack=True)
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
continue
writemsg("\n!!! The following update has been skipped " + \
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
writemsg("\n", noiselevel=-1)
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
writemsg("\n!!! The following update(s) have been skipped " + \
"due to unsatisfied dependencies\n" + \
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
msg = []
msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
msg.append(indent)
msg.append(str(pkg))
msg.append(" conflicts with\n")
for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(2*indent)
msg.append(str(parent))
msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
atom, marker = format_unmatched_atom(
pkg, atom, self._pkg_use_enabled)
msg.append(2*indent)
msg.append("%s required by %s\n" % (atom, parent))
msg.append(2*indent)
msg.append(marker)
msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases.
"""
if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
handler = self._dynamic_config._slot_conflict_handler
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
return
if "--quiet" in self._frozen_config.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
backtrack_opt = self._frozen_config.myopts.get('--backtrack')
if not self._dynamic_config._allow_backtracking and \
(backtrack_opt is None or \
(backtrack_opt > 0 and backtrack_opt < 30)):
msg.append(" You may want to try a larger value of the ")
msg.append("--backtrack option, such as --backtrack=30, ")
msg.append("in order to see if that will solve this conflict ")
msg.append("automatically.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
def _solve_non_slot_operator_slot_conflicts(self):
"""
This function solves slot conflicts which can
be solved by simply choosing one of the conflicting
and removing all the other ones.
It is able to solve somewhat more complex cases where
conflicts can only be solved simultaniously.
"""
debug = "--debug" in self._frozen_config.myopts
# List all conflicts. Ignore those that involve slot operator rebuilds
# as the logic there needs special slot conflict behavior which isn't
# provided by this function.
conflicts = []
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
slot_key = conflict.root, conflict.atom
if slot_key not in self._dynamic_config._slot_operator_replace_installed:
conflicts.append(conflict)
if not conflicts:
return
if debug:
writemsg_level(
"\n!!! Slot conflict handler started.\n",
level=logging.DEBUG, noiselevel=-1)
# Get a set of all conflicting packages.
conflict_pkgs = set()
for conflict in conflicts:
conflict_pkgs.update(conflict)
# Get the list of other packages which are only
# required by conflict packages.
indirect_conflict_candidates = set()
for pkg in conflict_pkgs:
indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
indirect_conflict_candidates.difference_update(conflict_pkgs)
indirect_conflict_pkgs = set()
while indirect_conflict_candidates:
pkg = indirect_conflict_candidates.pop()
only_conflict_parents = True
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
only_conflict_parents = False
break
if not only_conflict_parents:
continue
indirect_conflict_pkgs.add(pkg)
for child in self._dynamic_config.digraph.child_nodes(pkg):
if child in conflict_pkgs or child in indirect_conflict_pkgs:
continue
indirect_conflict_candidates.add(child)
# Create a graph containing the conflict packages
# and a special 'non_conflict_node' that represents
# all non-conflict packages.
conflict_graph = digraph()
non_conflict_node = "(non-conflict package)"
conflict_graph.add(non_conflict_node, None)
for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
conflict_graph.add(pkg, None)
# Add parent->child edges for each conflict package.
# Parents, which aren't conflict packages are represented
# by 'non_conflict_node'.
# If several conflicting packages are matched, but not all,
# add a tuple with the matched packages to the graph.
class or_tuple(tuple):
"""
Helper class for debug printing.
"""
def __str__(self):
return "(%s)" % ",".join(str(pkg) for pkg in self)
non_matching_forced = set()
for conflict in conflicts:
if debug:
writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1)
all_parent_atoms = set()
highest_pkg = None
inst_pkg = None
for pkg in conflict:
if pkg.installed:
inst_pkg = pkg
if highest_pkg is None or highest_pkg < pkg:
highest_pkg = pkg
all_parent_atoms.update(
self._dynamic_config._parent_atoms.get(pkg, []))
for parent, atom in all_parent_atoms:
is_arg_parent = isinstance(parent, AtomArg)
is_non_conflict_parent = parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs
if debug:
writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent),
level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1)
if is_non_conflict_parent:
parent = non_conflict_node
matched = []
for pkg in conflict:
if (pkg is highest_pkg and
not highest_pkg.installed and
inst_pkg is not None and
inst_pkg.sub_slot != highest_pkg.sub_slot and
not self._downgrade_probe(highest_pkg)):
# If an upgrade is desired, force the highest
# version into the graph (bug #531656).
non_matching_forced.add(highest_pkg)
if atom.match(pkg.with_use(
self._pkg_use_enabled(pkg))) and \
not (is_arg_parent and pkg.installed):
matched.append(pkg)
if debug:
for match in matched:
writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1)
if len(matched) > 1:
# Even if all packages match, this parent must still
# be added to the conflict_graph. Otherwise, we risk
# removing all of these packages from the depgraph,
# which could cause a missed update (bug #522084).
conflict_graph.add(or_tuple(matched), parent)
elif len(matched) == 1:
conflict_graph.add(matched[0], parent)
else:
# This typically means that autounmask broke a
# USE-dep, but it could also be due to the slot
# not matching due to multislot (bug #220341).
# Either way, don't try to solve this conflict.
# Instead, force them all into the graph so that
# they are protected from removal.
non_matching_forced.update(conflict)
if debug:
for pkg in conflict:
writemsg_level(" non-match: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
for pkg in indirect_conflict_pkgs:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs:
parent = non_conflict_node
conflict_graph.add(pkg, parent)
if debug:
writemsg_level(
"\n!!! Slot conflict graph:\n",
level=logging.DEBUG, noiselevel=-1)
conflict_graph.debug_print()
# Now select required packages. Collect them in the
# 'forced' set.
forced = set([non_conflict_node])
forced.update(non_matching_forced)
unexplored = set([non_conflict_node])
# or_tuples get special handling. We first explore
# all packages in the hope of having forced one of
# the packages in the tuple. This way we don't have
# to choose one.
unexplored_tuples = set()
explored_nodes = set()
while unexplored:
# Handle all unexplored packages.
while unexplored:
node = unexplored.pop()
for child in conflict_graph.child_nodes(node):
# Don't explore a node more than once, in order
# to avoid infinite recursion. The forced set
# cannot be used for this purpose, since it can
# contain unexplored nodes from non_matching_forced.
if child in explored_nodes:
continue
explored_nodes.add(child)
forced.add(child)
if isinstance(child, Package):
unexplored.add(child)
else:
unexplored_tuples.add(child)
# Now handle unexplored or_tuples. Move on with packages
# once we had to choose one.
while unexplored_tuples:
nodes = unexplored_tuples.pop()
if any(node in forced for node in nodes):
# At least one of the packages in the
# tuple is already forced, which means the
# dependency represented by this tuple
# is satisfied.
continue
# We now have to choose one of packages in the tuple.
# In theory one could solve more conflicts if we'd be
# able to try different choices here, but that has lots
# of other problems. For now choose the package that was
# pulled first, as this should be the most desirable choice
# (otherwise it wouldn't have been the first one).
forced.add(nodes[0])
unexplored.add(nodes[0])
break
# Remove 'non_conflict_node' and or_tuples from 'forced'.
forced = set(pkg for pkg in forced if isinstance(pkg, Package))
non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
if debug:
writemsg_level(
"\n!!! Slot conflict solution:\n",
level=logging.DEBUG, noiselevel=-1)
for conflict in conflicts:
writemsg_level(
" Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
if pkg in forced:
writemsg_level(
" keep: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
else:
writemsg_level(
" remove: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
broken_packages = set()
for pkg in non_forced:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if isinstance(parent, Package) and parent not in non_forced:
# Non-forcing set args are expected to be a parent of all
# packages in the conflict.
broken_packages.add(parent)
self._remove_pkg(pkg)
# Process the dependencies of choosen conflict packages
# again to properly account for blockers.
broken_packages.update(forced)
# Filter out broken packages which have been removed during
# recursive removal in self._remove_pkg.
broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
if self._dynamic_config._package_tracker.contains(pkg, installed=False))
self._dynamic_config._dep_stack.extend(broken_packages)
if broken_packages:
# Process dependencies. This cannot fail because we just ensured that
# the remaining packages satisfy all dependencies.
self._create_graph()
# Record missed updates.
for conflict in conflicts:
if not any(pkg in non_forced for pkg in conflict):
continue
for pkg in conflict:
if pkg not in non_forced:
continue
for other in conflict:
if other is pkg:
continue
for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
atom_set = InternalPackageSet(
initial_atoms=(atom,), allow_repo=True)
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
self._dynamic_config._conflict_missed_update[pkg].setdefault(
"slot conflict", set())
self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
(parent, atom))
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
_complete_graph should complete the graph before this method
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
self._solve_non_slot_operator_slot_conflicts()
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
self._process_slot_conflict(conflict)
def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
root = conflict.root
slot_atom = conflict.atom
slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
slot_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
continue
slot_parent_atoms.update(parent_atoms)
conflict_pkgs = []
conflict_atoms = {}
for pkg in slot_nodes:
if self._dynamic_config._allow_backtracking and \
pkg in self._dynamic_config._runtime_pkg_mask:
if debug:
writemsg_level(
"!!! backtracking loop detected: %s %s\n" % \
(pkg,
self._dynamic_config._runtime_pkg_mask[pkg]),
level=logging.DEBUG, noiselevel=-1)
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
all_match = True
for parent_atom in slot_parent_atoms:
if parent_atom in parent_atoms:
continue
parent, atom = parent_atom
if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
parent_atoms.add(parent_atom)
else:
all_match = False
conflict_atoms.setdefault(parent_atom, set()).add(pkg)
if not all_match:
conflict_pkgs.append(pkg)
if conflict_pkgs and \
self._dynamic_config._allow_backtracking and \
not self._accept_blocker_conflicts():
remaining = []
for pkg in conflict_pkgs:
if self._slot_conflict_backtrack_abi(pkg,
slot_nodes, conflict_atoms):
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config.setdefault("slot_conflict_abi", set()).add(pkg)
else:
remaining.append(pkg)
if remaining:
self._slot_confict_backtrack(root, slot_atom,
slot_parent_atoms, remaining)
def _slot_confict_backtrack(self, root, slot_atom,
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
existing_node = next(self._dynamic_config._package_tracker.match(
root, slot_atom, installed=False))
# In order to avoid a missed update, first mask lower versions
# that conflict with higher versions (the backtracker visits
# these in reverse order).
conflict_pkgs.sort(reverse=True)
backtrack_data = []
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
# match to_be_masked.
parent_atoms = \
self._dynamic_config._parent_atoms.get(to_be_masked, set())
conflict_atoms = set(parent_atom for parent_atom in all_parents \
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos.setdefault(
"slot conflict", []).append(backtrack_data)
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot conflict:")
msg.append(" first package: %s" % existing_node)
msg.append(" package to mask: %s" % to_be_masked)
msg.append(" slot: %s" % slot_atom)
msg.append(" parents: %s" % ", ".join( \
"(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
if not isinstance(parent, Package):
continue
if not parent.built:
continue
if not atom.soname and not (
atom.package and atom.slot_operator_built):
continue
if pkg not in conflict_pkgs:
continue
for other_pkg in slot_nodes:
if other_pkg in conflict_pkgs:
continue
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
new_dep = \
self._slot_operator_update_probe_slot_conflict(dep)
if new_dep is not None:
self._slot_operator_update_backtrack(dep,
new_dep=new_dep)
found_update = True
return found_update
def _slot_change_probe(self, dep):
"""
@rtype: bool
@return: True if dep.child should be rebuilt due to a change
in sub-slot (without revbump, as in bug #456208).
"""
if not (isinstance(dep.parent, Package) and \
not dep.parent.built and dep.child.built):
return None
root_config = self._frozen_config.roots[dep.root]
matches = []
try:
matches.append(self._pkg(dep.child.cpv, "ebuild",
root_config, myrepo=dep.child.repo))
except PackageNotFound:
pass
for unbuilt_child in chain(matches,
self._iter_match_pkgs(root_config, "ebuild",
Atom("=%s" % (dep.child.cpv,)))):
if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(
unbuilt_child,
modified_use=self._pkg_use_enabled(unbuilt_child)):
continue
if not self._pkg_visibility_check(unbuilt_child):
continue
break
else:
return None
if unbuilt_child.slot == dep.child.slot and \
unbuilt_child.sub_slot == dep.child.sub_slot:
return None
return unbuilt_child
def _slot_change_backtrack(self, dep, new_child_slot):
child = dep.child
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot/sub-slot change:")
msg.append(" child package: %s" % child)
msg.append(" child slot: %s/%s" %
(child.slot, child.sub_slot))
msg.append(" new child: %s" % new_child_slot)
msg.append(" new child slot: %s/%s" %
(new_child_slot.slot, new_child_slot.sub_slot))
msg.append(" parent package: %s" % dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not child.installed:
masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
reinstalls.add((child.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(reinstalls)
self._dynamic_config._need_restart = True
def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
new_dep=None):
if new_child_slot is None:
child = dep.child
else:
child = new_child_slot
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to missed slot abi update:")
msg.append(" child package: %s" % child)
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
if new_dep is not None:
msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
abi_masks = {}
if new_child_slot is None:
if not child.installed:
abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
if new_dep is not None:
replacement_atom = new_dep.parent.slot_atom
else:
replacement_atom = self._replace_installed_atom(dep.parent)
if replacement_atom is not None:
abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(abi_reinstalls)
self._dynamic_config._need_restart = True
def _slot_operator_update_probe_slot_conflict(self, dep):
new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
if new_dep is not None:
return new_dep
if self._dynamic_config._autounmask is True:
for autounmask_level in self._autounmask_levels():
new_dep = self._slot_operator_update_probe(dep,
slot_conflict=True, autounmask_level=autounmask_level)
if new_dep is not None:
return new_dep
return None
def _slot_operator_update_probe(self, dep, new_child_slot=False,
slot_conflict=False, autounmask_level=None):
"""
slot/sub-slot := operators tend to prevent updates from getting pulled in,
since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
to higher versions, since the logic required to detect when a
downgrade would be desirable is not implemented.
"""
if dep.child.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
modified_use=self._pkg_use_enabled(dep.child)):
return None
if dep.parent.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
modified_use=self._pkg_use_enabled(dep.parent)):
return None
debug = "--debug" in self._frozen_config.myopts
selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
want_downgrade_parent = None
def check_reverse_dependencies(existing_pkg, candidate_pkg,
replacement_parent=None):
"""
Check if candidate_pkg satisfies all of existing_pkg's non-
slot operator parents.
"""
built_slot_operator_parents = set()
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if atom.soname or atom.slot_operator_built:
built_slot_operator_parents.add(parent)
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if isinstance(parent, Package):
if parent in built_slot_operator_parents:
# This parent may need to be rebuilt, so its
# dependencies aren't necessarily relevant.
continue
if replacement_parent is not None and \
(replacement_parent.slot_atom == parent.slot_atom
or replacement_parent.cpv == parent.cpv):
# This parent is irrelevant because we intend to
# replace it with replacement_parent.
continue
if any(pkg is not parent and
(pkg.slot_atom == parent.slot_atom or
pkg.cpv == parent.cpv) for pkg in
self._dynamic_config._package_tracker.match(
parent.root, Atom(parent.cp))):
# This parent may need to be eliminated due to a
# slot conflict, so its dependencies aren't
# necessarily relevant.
continue
if (not self._too_deep(parent.depth) and
not self._frozen_config.excluded_pkgs.
findAtomForPackage(parent,
modified_use=self._pkg_use_enabled(parent)) and
self._upgrade_available(parent)):
# This parent may be irrelevant, since an
# update is available (see bug 584626).
continue
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if not atom_set.findAtomForPackage(candidate_pkg,
modified_use=self._pkg_use_enabled(candidate_pkg)):
return False
return True
for replacement_parent in self._iter_similar_available(dep.parent,
dep.parent.slot_atom, autounmask_level=autounmask_level):
if replacement_parent is dep.parent:
continue
if replacement_parent < dep.parent:
if want_downgrade_parent is None:
want_downgrade_parent = self._downgrade_probe(
dep.parent)
if not want_downgrade_parent:
continue
if not check_reverse_dependencies(dep.parent, replacement_parent):
continue
selected_atoms = None
try:
atoms = self._flatten_atoms(replacement_parent,
self._pkg_use_enabled(replacement_parent))
except InvalidDependString:
continue
if replacement_parent.requires is not None:
atoms = list(atoms)
atoms.extend(replacement_parent.requires)
# List of list of child,atom pairs for each atom.
replacement_candidates = []
# Set of all packages all atoms can agree on.
all_candidate_pkgs = None
for atom in atoms:
atom_not_selected = False
if not atom.package:
unevaluated_atom = None
if atom.match(dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
else:
if atom.blocker or \
atom.cp != dep.child.cp:
continue
# Discard USE deps, we're only searching for an
# approximate pattern, and dealing with USE states
# is too complex for this purpose.
unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
portage.dep._match_slot(atom, dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
candidate_pkg_atoms = []
candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if (dep.atom.package and
pkg.slot == dep.child.slot and
pkg.sub_slot == dep.child.sub_slot):
# If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
if pkg.slot == dep.child.slot:
continue
if pkg < dep.child:
# the new slot only matters if the
# package version is higher
continue
else:
if pkg.slot != dep.child.slot:
continue
if pkg < dep.child:
if want_downgrade is None:
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
# different slot_operator is an older version
if not want_downgrade:
continue
if pkg.version == dep.child.version and not dep.child.built:
continue
insignificant = False
if not slot_conflict and \
selective and \
dep.parent.installed and \
dep.child.installed and \
dep.parent >= replacement_parent and \
dep.child.cpv == pkg.cpv:
# Then can happen if the child's sub-slot changed
# without a revision bump. The sub-slot change is
# considered insignificant until one of its parent
# packages needs to be rebuilt (which may trigger a
# slot conflict).
insignificant = True
if (not insignificant and
unevaluated_atom is not None):
# Evaluate USE conditionals and || deps, in order
# to see if this atom is really desirable, since
# otherwise we may trigger an undesirable rebuild
# as in bug #460304.
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent)
if unevaluated_atom not in selected_atoms:
atom_not_selected = True
break
if not insignificant and \
check_reverse_dependencies(dep.child, pkg,
replacement_parent=replacement_parent):
candidate_pkg_atoms.append(
(pkg, unevaluated_atom or atom))
candidate_pkgs.append(pkg)
if atom_not_selected:
continue
replacement_candidates.append(candidate_pkg_atoms)
if all_candidate_pkgs is None:
all_candidate_pkgs = set(candidate_pkgs)
else:
all_candidate_pkgs.intersection_update(candidate_pkgs)
if not all_candidate_pkgs:
# If the atoms that connect parent and child can't agree on
# any replacement child, we can't do anything.
continue
# Now select one of the pkgs as replacement. This is as easy as
# selecting the highest version.
# The more complicated part is to choose an atom for the
# new Dependency object. Choose the one which ranked the selected
# parent highest.
selected = None
for candidate_pkg_atoms in replacement_candidates:
for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
if pkg not in all_candidate_pkgs:
continue
if selected is None or \
selected[0] < pkg or \
(selected[0] is pkg and i < selected[2]):
selected = (pkg, atom, i)
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_update_probe:")
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % selected[0])
msg.append(" new parent package: %s" % replacement_parent)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return Dependency(parent=replacement_parent,
child=selected[0], atom=selected[1])
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_update_probe:")
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % None)
msg.append(" new parent package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return None
def _slot_operator_unsatisfied_probe(self, dep):
if dep.parent.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
modified_use=self._pkg_use_enabled(dep.parent)):
return False
debug = "--debug" in self._frozen_config.myopts
for replacement_parent in self._iter_similar_available(dep.parent,
dep.parent.slot_atom):
for atom in replacement_parent.validated_atoms:
if not atom.slot_operator == "=" or \
atom.blocker or \
atom.cp != dep.atom.cp:
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
atom = atom.without_use
pkg, existing_node = self._select_package(dep.root, atom,
onlydeps=dep.onlydeps)
if pkg is not None:
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
msg.append(" new child package: %s" % pkg)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
msg.append(" new child package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return False
def _slot_operator_unsatisfied_backtrack(self, dep):
parent = dep.parent
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied "
"built slot-operator dep:")
msg.append(" parent package: %s" % parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not parent.installed:
masks.setdefault(parent, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if parent.installed:
replacement_atom = self._replace_installed_atom(parent)
if replacement_atom is not None:
reinstalls.add((parent.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(reinstalls)
self._dynamic_config._need_restart = True
def _upgrade_available(self, pkg):
"""
Detect cases where an upgrade of the given package is available
within the same slot.
"""
for available_pkg in self._iter_similar_available(pkg,
pkg.slot_atom):
if available_pkg > pkg:
return True
return False
def _downgrade_probe(self, pkg):
"""
Detect cases where a downgrade of the given package is considered
desirable due to the current version being masked or unavailable.
"""
available_pkg = None
for available_pkg in self._iter_similar_available(pkg,
pkg.slot_atom):
if available_pkg >= pkg:
# There's an available package of the same or higher
# version, so downgrade seems undesirable.
return False
return available_pkg is not None
def _select_atoms_probe(self, root, pkg):
selected_atoms = []
use = self._pkg_use_enabled(pkg)
for k in pkg._dep_keys:
v = pkg._metadata.get(k)
if not v:
continue
selected_atoms.extend(self._select_atoms(
root, v, myuse=use, parent=pkg)[pkg])
return frozenset(x.unevaluated_atom for
x in selected_atoms)
def _flatten_atoms(self, pkg, use):
"""
Evaluate all dependency atoms of the given package, and return
them as a frozenset. For performance, results are cached.
@param pkg: a Package instance
@type pkg: Package
@param pkg: set of enabled USE flags
@type pkg: frozenset
@rtype: frozenset
@return: set of evaluated atoms
"""
cache_key = (pkg, use)
try:
return self._dynamic_config._flatten_atoms_cache[cache_key]
except KeyError:
pass
atoms = []
for dep_key in pkg._dep_keys:
dep_string = pkg._metadata[dep_key]
if not dep_string:
continue
dep_string = portage.dep.use_reduce(
dep_string, uselist=use,
is_valid_flag=pkg.iuse.is_valid_flag,
flat=True, token_class=Atom, eapi=pkg.eapi)
atoms.extend(token for token in dep_string
if isinstance(token, Atom))
atoms = frozenset(atoms)
self._dynamic_config._flatten_atoms_cache[cache_key] = atoms
return atoms
def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
graph_pkg itself may be yielded only if it's not installed.
"""
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
for pkg in self._iter_match_pkgs_any(
graph_pkg.root_config, atom):
if pkg.cp != graph_pkg.cp:
# discard old-style virtual match
continue
if pkg.installed:
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
not self._equiv_ebuild_visible(pkg,
autounmask_level=autounmask_level):
continue
if not self._pkg_visibility_check(pkg,
autounmask_level=autounmask_level):
continue
yield pkg
def _replace_installed_atom(self, inst_pkg):
"""
Given an installed package, generate an atom suitable for
slot_operator_replace_installed backtracking info. The replacement
SLOT may differ from the installed SLOT, so first search by cpv.
"""
built_pkgs = []
for pkg in self._iter_similar_available(inst_pkg,
Atom("=%s" % inst_pkg.cpv)):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
if built_pkgs:
best_version = None
for pkg in built_pkgs:
if best_version is None or pkg > best_version:
best_version = pkg
return best_version.slot_atom
return None
def _slot_operator_trigger_reinstalls(self):
"""
Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
rebuild_if_new_slot = self._dynamic_config.myparams.get(
"rebuild_if_new_slot", "y") == "y"
for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
atom = dep.atom
if not (atom.soname or atom.slot_operator_built):
new_child_slot = self._slot_change_probe(dep)
if new_child_slot is not None:
self._slot_change_backtrack(dep, new_child_slot)
continue
if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
continue
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
if rebuild_if_new_slot:
new_dep = self._slot_operator_update_probe(dep,
new_child_slot=True)
if new_dep is not None:
self._slot_operator_update_backtrack(dep,
new_child_slot=new_dep.child)
if dep.want_update:
if self._slot_operator_update_probe(dep):
self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
# binpkg_respect_use: Behave like newuse by default. If newuse is
# False and changed_use is True, then behave like changed_use.
binpkg_respect_use = (pkg.built and
self._dynamic_config.myparams.get("binpkg_respect_use")
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
feature_flags = _get_feature_flags(
_get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
flags.difference_update(feature_flags)
if flags:
return flags
return None
def _changed_deps(self, pkg):
ebuild = None
try:
ebuild = self._pkg(pkg.cpv, "ebuild",
pkg.root_config, myrepo=pkg.repo)
except PackageNotFound:
# Use first available instance of the same version.
for ebuild in self._iter_match_pkgs(
pkg.root_config, "ebuild", Atom("=" + pkg.cpv)):
break
if ebuild is None:
changed = False
else:
if self._dynamic_config.myparams.get("bdeps", "n") == "y":
depvars = Package._dep_keys
else:
depvars = Package._runtime_keys
# Use _raw_metadata, in order to avoid interaction
# with --dynamic-deps.
try:
built_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
pkg._raw_metadata[k], uselist=pkg.use.enabled,
eapi=pkg.eapi, token_class=Atom)
strip_slots(dep_struct)
built_deps.append(dep_struct)
except InvalidDependString:
changed = True
else:
unbuilt_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
ebuild._raw_metadata[k],
uselist=pkg.use.enabled,
eapi=ebuild.eapi, token_class=Atom)
strip_slots(dep_struct)
unbuilt_deps.append(dep_struct)
changed = built_deps != unbuilt_deps
return changed
def _create_graph(self, allow_unsatisfied=False):
dep_stack = self._dynamic_config._dep_stack
dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
while dep_stack or dep_disjunctive_stack:
self._spinner_update()
while dep_stack:
dep = dep_stack.pop()
if isinstance(dep, Package):
if not self._add_pkg_deps(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
continue
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if dep_disjunctive_stack:
if not self._pop_disjunction(allow_unsatisfied):
return 0
return 1
def _expand_set_args(self, input_args, add_to_digraph=False):
"""
Iterate over a list of DependencyArg instances and yield all
instances given in the input together with additional SetArg
instances that are generated from nested sets.
@param input_args: An iterable of DependencyArg instances
@type input_args: Iterable
@param add_to_digraph: If True then add SetArg instances
to the digraph, in order to record parent -> child
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
@return: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
traversed_set_args = set()
for arg in input_args:
if not isinstance(arg, SetArg):
yield arg
continue
root_config = arg.root_config
depgraph_sets = self._dynamic_config.sets[root_config.root]
arg_stack = [arg]
while arg_stack:
arg = arg_stack.pop()
if arg in traversed_set_args:
continue
# If a node with the same hash already exists in
# the digraph, preserve the existing instance which
# may have a different reset_depth attribute
# (distiguishes user arguments from sets added for
# another reason such as complete mode).
arg = self._dynamic_config.digraph.get(arg, arg)
traversed_set_args.add(arg)
if add_to_digraph:
self._dynamic_config.digraph.add(arg, None,
priority=BlockerDepPriority.instance)
yield arg
# Traverse nested sets and add them to the stack
# if they're not already in the graph. Also, graph
# edges between parent and nested sets.
for token in arg.pset.getNonAtoms():
if not token.startswith(SETPREFIX):
continue
s = token[len(SETPREFIX):]
nested_set = depgraph_sets.sets.get(s)
if nested_set is None:
nested_set = root_config.sets.get(s)
if nested_set is not None:
# Propagate the reset_depth attribute from
# parent set to nested set.
nested_arg = SetArg(arg=token, pset=nested_set,
reset_depth=arg.reset_depth,
root_config=root_config)
# Preserve instances already in the graph (same
# reason as for the "arg" variable above).
nested_arg = self._dynamic_config.digraph.get(
nested_arg, nested_arg)
arg_stack.append(nested_arg)
if add_to_digraph:
self._dynamic_config.digraph.add(nested_arg, arg,
priority=BlockerDepPriority.instance)
depgraph_sets.sets[nested_arg.name] = nested_arg.pset
def _add_dep(self, dep, allow_unsatisfied=False):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
self._dynamic_config._package_tracker.slot_conflicts())
if not buildpkgonly and \
not nodeps and \
not dep.collapsed_priority.ignored and \
not dep.collapsed_priority.optional and \
not is_slot_conflict_parent:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
return 1
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
eapi=dep.parent.eapi,
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
if dep.child is None:
dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
onlydeps=dep.onlydeps)
else:
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
existing_node = next(self._dynamic_config._package_tracker.match(
dep.root, dep_pkg.slot_atom, installed=False), None)
if not dep_pkg:
if (dep.collapsed_priority.optional or
dep.collapsed_priority.ignored):
# This is an unnecessary build-time dep.
return 1
# NOTE: For removal actions, allow_unsatisfied is always
# True since all existing removal actions traverse all
# installed deps deeply via the _complete_graph method,
# which calls _create_graph with allow_unsatisfied = True.
if allow_unsatisfied:
self._dynamic_config._unsatisfied_deps.append(dep)
return 1
# The following case occurs when
# _solve_non_slot_operator_slot_conflicts calls
# _create_graph. In this case, ignore unsatisfied deps for
# installed packages only if their depth is beyond the depth
# requested by the user and the dep was initially
# unsatisfied (not broken by a slot conflict in the current
# graph). See bug #520950.
# NOTE: The value of dep.parent.depth is guaranteed to be
# either an integer or _UNREACHABLE_DEPTH, where
# _UNREACHABLE_DEPTH indicates that the parent has been
# pulled in by the _complete_graph method (rather than by
# explicit arguments or their deep dependencies). These
# cases must be distinguished because depth is meaningless
# for packages that are not reachable as deep dependencies
# of arguments.
if (self._dynamic_config._complete_mode and
isinstance(dep.parent, Package) and
dep.parent.installed and
(dep.parent.depth is self._UNREACHABLE_DEPTH or
(self._frozen_config.requested_depth is not True and
dep.parent.depth >= self._frozen_config.requested_depth))):
inst_pkg, in_graph = \
self._select_pkg_from_installed(dep.root, dep.atom)
if inst_pkg is None:
self._dynamic_config._initially_unsatisfied_deps.append(dep)
return 1
self._dynamic_config._unsatisfied_deps_for_display.append(
((dep.root, dep.atom), {"myparent":dep.parent}))
# The parent node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if dep.parent in self._dynamic_config._runtime_pkg_mask:
if debug:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
elif dep.atom.package and dep.atom.slot_operator_built and \
self._slot_operator_unsatisfied_probe(dep):
self._slot_operator_unsatisfied_backtrack(dep)
return 1
else:
# Do not backtrack if only USE have to be changed in
# order to satisfy the dependency. Note that when
# want_restart_for_use_change sets the need_restart
# flag, it causes _select_pkg_highest_available to
# return None, and eventually we come through here
# and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = \
self._select_package(dep.root,
dep.atom.without_use if dep.atom.package
else dep.atom, onlydeps=dep.onlydeps)
if dep_pkg is None:
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied dep:")
msg.append(" parent: %s" % dep.parent)
msg.append(" priority: %s" % dep.priority)
msg.append(" root: %s" % dep.root)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
self._rebuild.add(dep_pkg, dep)
ignore = dep.collapsed_priority.ignored and \
not self._dynamic_config._traverse_ignored_deps
if not ignore and not self._add_pkg(dep_pkg, dep):
return 0
return 1
def _check_slot_conflict(self, pkg, atom):
existing_node = next(self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom, installed=False), None)
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
if pkg != existing_node and \
atom is not None:
matches = atom.match(existing_node.with_use(