blob: 2a8956deb2471a0f5d444ba47794121957902697 [file] [log] [blame]
# Copyright 1999-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import gc
import logging
import re
import sys
import textwrap
from itertools import chain
import portage
from portage import os
from portage import _unicode_decode
from portage.const import PORTAGE_PACKAGE_ATOM
from portage.dbapi import dbapi
from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
from portage.exception import InvalidAtom
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import writemsg_level
from portage.util.digraph import digraph
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import is_valid_package_atom
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import display, filter_iuse_defaults
if sys.hexversion >= 0x3000000:
basestring = str
long = int
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, spinner):
self.settings = settings
self.target_root = settings["ROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self._running_root = trees["/"]["root_config"]
self._opts_no_restart = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot])
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
self._required_set_names = set(["world"])
self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
for x in ' '.join(myopts.get("--exclude", [])).split():
try:
x = Atom(x, allow_wildcard=True)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True)
self.excluded_pkgs.add(x)
class _depgraph_sets(object):
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _dynamic_depgraph_config(object):
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps slot atom to package for each Package added to the graph.
self._slot_pkg_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
self.mydbapi = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
self._blocked_pkgs = digraph()
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._slot_collision_info = {}
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self._slot_collision_nodes = set()
self._parent_atoms = {}
self._slot_conflict_parent_atoms = set()
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._need_restart = False
self._backtrack_infos = {}
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
self._success_without_autounmask = False
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
self._slot_pkg_map[myroot] = {}
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageVirtualDbapi(vardb.settings)
self.mydbapi[myroot] = fakedb
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
else:
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(depgraph._frozen_config._trees_orig[myroot
]["vartree"].dbapi._aux_cache_keys)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = \
portage.config(clone=fake_vartree.settings)
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
fakedb = self._dynamic_config._graph_trees[
myroot]["vartree"].dbapi
for pkg in vardb:
self._spinner_update()
# This triggers metadata updates via FakeVartree.
vardb.aux_get(pkg.cpv, [])
fakedb.cpv_inject(pkg)
# Now that the vardb state is cached in our FakeVartree,
# we won't be needing the real vartree cache for awhile.
# To make some room on the heap, clear the vardbapi
# caches.
self._frozen_config._trees_orig[myroot
]["vartree"].dbapi._clear_cache()
gc.collect()
self._dynamic_config._vdb_loaded = True
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _show_missed_update(self):
if '--quiet' in self._frozen_config.myopts and \
'--debug' not in self._frozen_config.myopts:
return
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
self._dynamic_config._runtime_pkg_mask.items():
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
if not missed_updates:
return
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type,
[]).append((pkg, parent_atoms))
self._show_missed_update_slot_conflicts(
missed_update_types.get("slot conflict"))
self._show_missed_update_unsatisfied_dep(
missed_update_types.get("missing dependency"))
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
return
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
try:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent,
check_backtrack=True)
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
continue
writemsg("\n!!! The following update has been skipped " + \
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root != '/':
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
writemsg("\n", noiselevel=-1)
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
writemsg("\n!!! The following update(s) have been skipped " + \
"due to unsatisfied dependencies\n" + \
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root != '/':
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
return
msg = []
msg.append("\n!!! One or more updates have been skipped due to " + \
"a dependency conflict:\n\n")
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
if pkg.root != '/':
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
for parent, atom in parent_atoms:
msg.append(indent)
msg.append(str(pkg))
msg.append(" conflicts with\n")
msg.append(2*indent)
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(str(parent))
else:
# Display the specific atom from SetArg or
# Package types.
msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases.
"""
if not self._dynamic_config._slot_collision_info:
return
self._show_merge_list()
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
handler = self._dynamic_config._slot_conflict_handler
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
return
if "--quiet" in self._frozen_config.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
backtrack_opt = self._frozen_config.myopts.get('--backtrack')
if not self._dynamic_config._allow_backtracking and \
(backtrack_opt is None or \
(backtrack_opt > 0 and backtrack_opt < 30)):
msg.append(" You may want to try a larger value of the ")
msg.append("--backtrack option, such as --backtrack=30, ")
msg.append("in order to see if that will solve this conflict ")
msg.append("automatically.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
def _process_slot_conflicts(self):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
for (slot_atom, root), slot_nodes \
in self._dynamic_config._slot_collision_info.items():
all_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
continue
all_parent_atoms.update(parent_atoms)
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
for parent_atom in all_parent_atoms:
if parent_atom in parent_atoms:
continue
# Use package set for matching since it will match via
# PROVIDE when necessary, while match_from_list does not.
parent, atom = parent_atom
atom_set = InternalPackageSet(
initial_atoms=(atom,), allow_repo=True)
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
parent_atoms.add(parent_atom)
else:
self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
def _reinstall_for_flags(self, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
if "--newuse" in self._frozen_config.myopts or \
"--binpkg-respect-use" in self._frozen_config.myopts:
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
if flags:
return flags
elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
flags = orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use))
if flags:
return flags
return None
def _create_graph(self, allow_unsatisfied=False):
dep_stack = self._dynamic_config._dep_stack
dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
while dep_stack or dep_disjunctive_stack:
self._spinner_update()
while dep_stack:
dep = dep_stack.pop()
if isinstance(dep, Package):
if not self._add_pkg_deps(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
continue
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if dep_disjunctive_stack:
if not self._pop_disjunction(allow_unsatisfied):
return 0
return 1
def _expand_set_args(self, input_args, add_to_digraph=False):
"""
Iterate over a list of DependencyArg instances and yield all
instances given in the input together with additional SetArg
instances that are generated from nested sets.
@param input_args: An iterable of DependencyArg instances
@type input_args: Iterable
@param add_to_digraph: If True then add SetArg instances
to the digraph, in order to record parent -> child
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
@returns: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
traversed_set_args = set()
for arg in input_args:
if not isinstance(arg, SetArg):
yield arg
continue
root_config = arg.root_config
depgraph_sets = self._dynamic_config.sets[root_config.root]
arg_stack = [arg]
while arg_stack:
arg = arg_stack.pop()
if arg in traversed_set_args:
continue
traversed_set_args.add(arg)
yield arg
# Traverse nested sets and add them to the stack
# if they're not already in the graph. Also, graph
# edges between parent and nested sets.
for token in arg.pset.getNonAtoms():
if not token.startswith(SETPREFIX):
continue
s = token[len(SETPREFIX):]
nested_set = depgraph_sets.sets.get(s)
if nested_set is None:
nested_set = root_config.sets.get(s)
if nested_set is not None:
nested_arg = SetArg(arg=token, pset=nested_set,
root_config=root_config)
arg_stack.append(nested_arg)
if add_to_digraph:
self._dynamic_config.digraph.add(nested_arg, arg,
priority=BlockerDepPriority.instance)
depgraph_sets.sets[nested_arg.name] = nested_arg.pset
def _add_dep(self, dep, allow_unsatisfied=False):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
empty = "empty" in self._dynamic_config.myparams
deep = self._dynamic_config.myparams.get("deep", 0)
recurse = empty or deep is True or dep.depth <= deep
if dep.blocker:
if not buildpkgonly and \
not nodeps and \
dep.parent not in self._dynamic_config._slot_collision_nodes:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
return 1
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
eapi=dep.parent.metadata["EAPI"],
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
if dep.child is None:
dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
onlydeps=dep.onlydeps)
else:
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
existing_node = self._dynamic_config._slot_pkg_map[
dep.root].get(dep_pkg.slot_atom)
if existing_node is not dep_pkg:
existing_node = None
if not dep_pkg:
if dep.priority.optional:
# This could be an unnecessary build-time dep
# pulled in by --with-bdeps=y.
return 1
if allow_unsatisfied:
self._dynamic_config._unsatisfied_deps.append(dep)
return 1
self._dynamic_config._unsatisfied_deps_for_display.append(
((dep.root, dep.atom), {"myparent":dep.parent}))
# The parent node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if dep.parent in self._dynamic_config._runtime_pkg_mask:
if "--debug" in self._frozen_config.myopts:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
elif not self.need_restart():
# Do not backtrack if only USE have to be changed in
# order to satisfy the dependency.
dep_pkg, existing_node = \
self._select_package(dep.root, dep.atom.without_use,
onlydeps=dep.onlydeps)
if dep_pkg is None:
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied dep:")
msg.append(" parent: %s" % dep.parent)
msg.append(" priority: %s" % dep.priority)
msg.append(" root: %s" % dep.root)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
# In some cases, dep_check will return deps that shouldn't
# be proccessed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
if dep.priority.satisfied and \
dep.priority.satisfied.visible and \
not dep_pkg.installed and \
not (existing_node or recurse):
myarg = None
if dep.root == self._frozen_config.target_root:
try:
myarg = next(self._iter_atoms_for_pkg(dep_pkg))
except StopIteration:
pass
except portage.exception.InvalidDependString:
if not dep_pkg.installed:
# This shouldn't happen since the package
# should have been masked.
raise
if not myarg:
# Existing child selection may not be valid unless
# it's added to the graph immediately, since "complete"
# mode may select a different child later.
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
return 1
if not self._add_pkg(dep_pkg, dep):
return 0
return 1
def _add_pkg(self, pkg, dep):
myparent = None
priority = None
depth = 0
if dep is None:
dep = Dependency()
else:
myparent = dep.parent
priority = dep.priority
depth = dep.depth
if priority is None:
priority = DepPriority()
"""
Fills the digraph with nodes comprised of packages to merge.
mybigkey is the package spec of the package to merge.
myparent is the package depending on mybigkey ( or None )
addme = Should we add this package to the digraph or are we just looking at it's deps?
Think --onlydeps, we need to ignore packages in that case.
#stuff to add:
#SLOT-aware emerge
#IUSE-aware emerge -> USE DEP aware depgraph
#"no downgrade" emerge
"""
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
# select the correct /var database that we'll be checking against
vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
if True:
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
if not pkg.onlydeps:
if not pkg.installed and \
"empty" not in self._dynamic_config.myparams and \
vardbapi.match(pkg.slot_atom):
# Increase the priority of dependencies on packages that
# are being rebuilt. This optimizes merge order so that
# dependencies are rebuilt/updated as soon as possible,
# which is needed especially when emerge is called by
# revdep-rebuild since dependencies may be affected by ABI
# breakage that has rendered them useless. Don't adjust
# priority here when in "empty" mode since all packages
# are being merged in that case.
priority.rebuild = True
existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
slot_collision = False
if existing_node:
existing_node_matches = pkg.cpv == existing_node.cpv
if existing_node_matches and \
pkg != existing_node and \
dep.atom is not None:
# Use package set for matching since it will match via
# PROVIDE when necessary, while match_from_list does not.
atom_set = InternalPackageSet(initial_atoms=[dep.atom],
allow_repo=True)
if not atom_set.findAtomForPackage(existing_node, \
modified_use=self._pkg_use_enabled(existing_node)):
existing_node_matches = False
if existing_node_matches:
# The existing node can be reused.
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(existing_node, parent,
priority=priority)
self._add_parent_atom(existing_node, parent_atom)
# If a direct circular dependency is not an unsatisfied
# buildtime dependency then drop it here since otherwise
# it can skew the merge order calculation in an unwanted
# way.
if existing_node != myparent or \
(priority.buildtime and not priority.satisfied):
self._dynamic_config.digraph.addnode(existing_node, myparent,
priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(existing_node,
(dep.parent, dep.atom))
return 1
else:
# A slot conflict has occurred.
# The existing node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking and \
existing_node in \
self._dynamic_config._runtime_pkg_mask:
if "--debug" in self._frozen_config.myopts:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(existing_node,
self._dynamic_config._runtime_pkg_mask[
existing_node]), noiselevel=-1)
elif self._dynamic_config._allow_backtracking and \
not self._accept_blocker_conflicts() and \
not self.need_restart():
self._add_slot_conflict(pkg)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._add_parent_atom(pkg, parent_atom)
self._process_slot_conflicts()
backtrack_data = []
fallback_data = []
all_parents = set()
# The ordering of backtrack_data can make
# a difference here, because both mask actions may lead
# to valid, but different, solutions and the one with
# 'existing_node' masked is usually the better one. Because
# of that, we choose an order such that
# the backtracker will first explore the choice with
# existing_node masked. The backtracker reverses the
# order, so the order it uses is the reverse of the
# order shown here. See bug #339606.
for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
# For missed update messages, find out which
# atoms matched to_be_selected that did not
# match to_be_masked.
parent_atoms = \
self._dynamic_config._parent_atoms.get(to_be_selected, set())
if parent_atoms:
conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
if conflict_atoms:
parent_atoms = conflict_atoms
all_parents.update(parent_atoms)
all_match = True
for parent, atom in parent_atoms:
i = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if not i.findAtomForPackage(to_be_masked):
all_match = False
break
if to_be_selected >= to_be_masked:
# We only care about the parent atoms
# when they trigger a downgrade.
parent_atoms = set()
fallback_data.append((to_be_masked, parent_atoms))
if all_match:
# 'to_be_masked' does not violate any parent atom, which means
# there is no point in masking it.
pass
else:
backtrack_data.append((to_be_masked, parent_atoms))
if not backtrack_data:
# This shouldn't happen, but fall back to the old
# behavior if this gets triggered somehow.
backtrack_data = fallback_data
if len(backtrack_data) > 1:
# NOTE: Generally, we prefer to mask the higher
# version since this solves common cases in which a
# lower version is needed so that all dependencies
# will be satisfied (bug #337178). However, if
# existing_node happens to be installed then we
# mask that since this is a common case that is
# triggered when --update is not enabled.
if existing_node.installed:
pass
elif pkg > existing_node:
backtrack_data.reverse()
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
self._dynamic_config._need_restart = True
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot conflict:")
if backtrack_data is fallback_data:
msg.append("!!! backtrack_data fallback")
msg.append(" first package: %s" % existing_node)
msg.append(" second package: %s" % pkg)
msg.append(" package to mask: %s" % to_be_masked)
msg.append(" slot: %s" % pkg.slot_atom)
msg.append(" parents: %s" % ", ".join( \
"(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
# A slot collision has occurred. Sometimes this coincides
# with unresolvable blockers, so the slot collision will be
# shown later if there are no unresolvable blockers.
self._add_slot_conflict(pkg)
slot_collision = True
if slot_collision:
# Now add this node to the graph so that self.display()
# can show use flags and --tree portage.output. This node is
# only being partially added to the graph. It must not be
# allowed to interfere with the other nodes that have been
# added. Do not overwrite data for existing nodes in
# self._dynamic_config.mydbapi since that data will be used for blocker
# validation.
# Even though the graph is now invalid, continue to process
# dependencies so that things like --fetchonly can still
# function despite collisions.
pass
elif not previously_added:
self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._check_masks(pkg)
if not pkg.installed:
# Allow this package to satisfy old-style virtuals in case it
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
pkgsettings.setinst(pkg.cpv, pkg.metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
settings.setinst(pkg.cpv, pkg.metadata)
settings.lock()
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
# Do this even when addme is False (--onlydeps) so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
""" This section determines whether we go deeper into dependencies or not.
We want to go deeper on a few occasions:
Installing package A, we need to make sure package A's deps are met.
emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
"""
if arg_atoms:
depth = 0
pkg.depth = depth
deep = self._dynamic_config.myparams.get("deep", 0)
empty = "empty" in self._dynamic_config.myparams
recurse = empty or deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
if "recurse" not in self._dynamic_config.myparams:
return 1
elif pkg.installed and not recurse:
dep_stack = self._dynamic_config._ignored_deps
self._spinner_update()
if not previously_added:
dep_stack.append(pkg)
return 1
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
# Check for upgrades in the same slot that are
# masked due to a LICENSE change in a newer
# version that is not masked for any other reason.
other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
if other_pkg is not None and pkg < other_pkg:
self._dynamic_config._masked_license_updates.add(other_pkg)
def _add_parent_atom(self, pkg, parent_atom):
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
def _add_slot_conflict(self, pkg):
self._dynamic_config._slot_collision_nodes.add(pkg)
slot_key = (pkg.slot_atom, pkg.root)
slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
if slot_nodes is None:
slot_nodes = set()
slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
slot_nodes.add(pkg)
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
mytype = pkg.type_name
myroot = pkg.root
mykey = pkg.cpv
metadata = pkg.metadata
myuse = self._pkg_use_enabled(pkg)
jbigkey = pkg
depth = pkg.depth + 1
removal_action = "remove" in self._dynamic_config.myparams
edepend={}
depkeys = ["DEPEND","RDEPEND","PDEPEND"]
for k in depkeys:
edepend[k] = metadata[k]
if not pkg.built and \
"--buildpkgonly" in self._frozen_config.myopts and \
"deep" not in self._dynamic_config.myparams and \
"empty" not in self._dynamic_config.myparams:
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
if pkg.built and not removal_action:
if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
# Pull in build time deps as requested, but marked them as
# "optional" since they are not strictly required. This allows
# more freedom in the merge order calculation for solving
# circular dependencies. Don't convert to PDEPEND since that
# could make --with-bdeps=y less effective if it is used to
# adjust merge order to prevent built_with_use() calls from
# failing.
pass
else:
# built packages do not have build time dependencies.
edepend["DEPEND"] = ""
if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
edepend["DEPEND"] = ""
if removal_action:
depend_root = myroot
else:
depend_root = "/"
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
depend_root = myroot
elif root_deps == "rdeps":
edepend["DEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=(not pkg.built),
optional=pkg.built),
pkg.built),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True),
False),
(myroot, edepend["PDEPEND"],
self._priority(runtime_post=True),
False)
)
debug = "--debug" in self._frozen_config.myopts
strict = mytype != "installed"
try:
for dep_root, dep_string, dep_priority, ignore_blockers in deps:
if not dep_string:
continue
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Depstring: %s\n" % (dep_string,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
dep_string = portage.dep.use_reduce(dep_string,
uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
# Try again, but omit the is_valid_flag argument, since
# invalid USE conditionals are a common problem and it's
# practical to ignore this issue for installed packages.
try:
dep_string = portage.dep.use_reduce(dep_string,
uselist=self._pkg_use_enabled(pkg))
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
try:
dep_string = list(self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, dep_string))
except portage.exception.InvalidDependString as e:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
# should have been masked before it was selected
raise
if not dep_string:
continue
dep_string = portage.dep.paren_enclose(dep_string)
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied, ignore_blockers=ignore_blockers):
return 0
except portage.exception.AmbiguousPackageName as e:
pkgs = e.args[0]
portage.writemsg("\n\n!!! An atom in the dependencies " + \
"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
for cpv in pkgs:
portage.writemsg(" %s\n" % cpv, noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
if mytype == "binary":
portage.writemsg(
"!!! This binary package cannot be installed: '%s'\n" % \
mykey, noiselevel=-1)
elif mytype == "ebuild":
portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
myebuild, mylocation = portdb.findname2(mykey, myrepo = pkg.repo)
portage.writemsg("!!! This ebuild cannot be installed: " + \
"'%s'\n" % myebuild, noiselevel=-1)
portage.writemsg("!!! Please notify the package maintainer " + \
"that atoms must be fully-qualified.\n", noiselevel=-1)
return 0
self._dynamic_config._traversed_pkg_deps.add(pkg)
return 1
def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied, ignore_blockers=False):
depth = pkg.depth + 1
debug = "--debug" in self._frozen_config.myopts
strict = pkg.type_name != "installed"
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Depstring: %s\n" % (dep_string,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
selected_atoms = self._select_atoms(dep_root,
dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
strict=strict, priority=dep_priority)
except portage.exception.InvalidDependString as e:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
return 1
# should have been masked before it was selected
raise
if debug:
writemsg_level("Candidates: %s\n" % \
([str(x) for x in selected_atoms[pkg]],),
noiselevel=-1, level=logging.DEBUG)
root_config = self._frozen_config.roots[dep_root]
vardb = root_config.trees["vartree"].dbapi
for atom, child in self._minimize_children(
pkg, dep_priority, root_config, selected_atoms[pkg]):
if ignore_blockers and atom.blocker:
# For --with-bdeps, ignore build-time only blockers
# that originate from built packages.
continue
mypriority = dep_priority.copy()
if not atom.blocker:
inst_pkgs = vardb.match_pkgs(atom)
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
mypriority.satisfied = inst_pkg
break
if not mypriority.satisfied:
# none visible, so use highest
mypriority.satisfied = inst_pkgs[0]
if not self._add_dep(Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=depth, parent=pkg,
priority=mypriority, root=dep_root),
allow_unsatisfied=allow_unsatisfied):
return 0
selected_atoms.pop(pkg)
# Add selected indirect virtual deps to the graph. This
# takes advantage of circular dependency avoidance that's done
# by dep_zapdeps. We preserve actual parent/child relationships
# here in order to avoid distorting the dependency graph like
# <=portage-2.1.6.x did.
for virt_pkg, atoms in selected_atoms.items():
if debug:
writemsg_level("Candidates: %s: %s\n" % \
(virt_pkg.cpv, [str(x) for x in atoms]),
noiselevel=-1, level=logging.DEBUG)
# Just assume depth + 1 here for now, though it's not entirely
# accurate since multilple levels of indirect virtual deps may
# have been traversed. The _add_pkg call will reset the depth to
# 0 if this package happens to match an argument.
if not self._add_pkg(virt_pkg,
Dependency(atom=Atom('=' + virt_pkg.cpv),
depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
root=dep_root)):
return 0
for atom, child in self._minimize_children(
pkg, self._priority(runtime=True), root_config, atoms):
# This is a GLEP 37 virtual, so its deps are all runtime.
mypriority = self._priority(runtime=True)
if not atom.blocker:
inst_pkgs = vardb.match_pkgs(atom)
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
mypriority.satisfied = inst_pkg
break
if not mypriority.satisfied:
# none visible, so use highest
mypriority.satisfied = inst_pkgs[0]
if not self._add_dep(Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=virt_pkg.depth,
parent=virt_pkg, priority=mypriority, root=dep_root),
allow_unsatisfied=allow_unsatisfied):
return 0
if debug:
writemsg_level("Exiting... %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
return 1
def _minimize_children(self, parent, priority, root_config, atoms):
"""
Selects packages to satisfy the given atoms, and minimizes the
number of selected packages. This serves to identify and eliminate
redundant package selections when multiple atoms happen to specify
a version range.
"""
atom_pkg_map = {}
for atom in atoms:
if atom.blocker:
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
root_config.root, atom)
if dep_pkg is None:
yield (atom, None)
continue
atom_pkg_map[atom] = dep_pkg
if len(atom_pkg_map) < 2:
for item in atom_pkg_map.items():
yield item
return
cp_pkg_map = {}
pkg_atom_map = {}
for atom, pkg in atom_pkg_map.items():
pkg_atom_map.setdefault(pkg, set()).add(atom)
cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
for cp, pkgs in cp_pkg_map.items():
if len(pkgs) < 2:
for pkg in pkgs:
for atom in pkg_atom_map[pkg]:
yield (atom, pkg)
continue
# Use a digraph to identify and eliminate any
# redundant package selections.
atom_pkg_graph = digraph()
cp_atoms = set()
for pkg1 in pkgs:
for atom in pkg_atom_map[pkg1]:
cp_atoms.add(atom)
atom_pkg_graph.add(pkg1, atom)
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
for pkg2 in pkgs:
if pkg2 is pkg1:
continue
if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
atom_pkg_graph.add(pkg2, atom)
for pkg in pkgs:
eliminate_pkg = True
for atom in atom_pkg_graph.parent_nodes(pkg):
if len(atom_pkg_graph.child_nodes(atom)) < 2:
eliminate_pkg = False
break
if eliminate_pkg:
atom_pkg_graph.remove(pkg)
# Yield < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
less_than = []
not_less_than = []
for atom in cp_atoms:
if atom.operator in ('<', '<='):
less_than.append(atom)
else:
not_less_than.append(atom)
for atom in chain(less_than, not_less_than):
child_pkgs = atom_pkg_graph.child_nodes(atom)
yield (atom, child_pkgs[0])
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
i = 0
while i < len(dep_struct):
x = dep_struct[i]
if isinstance(x, list):
for y in self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, x):
yield y
elif x == "||":
self._queue_disjunction(pkg, dep_root, dep_priority,
[ x, dep_struct[ i + 1 ] ] )
i += 1
else:
try:
x = portage.dep.Atom(x)
except portage.exception.InvalidAtom:
if not pkg.installed:
raise portage.exception.InvalidDependString(
"invalid atom: '%s'" % x)
else:
# Note: Eventually this will check for PROPERTIES=virtual
# or whatever other metadata gets implemented for this
# purpose.
if x.cp.startswith('virtual/'):
self._queue_disjunction( pkg, dep_root,
dep_priority, [ str(x) ] )
else:
yield str(x)
i += 1
def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
self._dynamic_config._dep_disjunctive_stack.append(
(pkg, dep_root, dep_priority, dep_struct))
def _pop_disjunction(self, allow_unsatisfied):
"""
Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
populate self._dynamic_config._dep_stack.
"""
pkg, dep_root, dep_priority, dep_struct = \
self._dynamic_config._dep_disjunctive_stack.pop()
dep_string = portage.dep.paren_enclose(dep_struct)
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
return 0
return 1
def _priority(self, **kwargs):
if "remove" in self._dynamic_config.myparams:
priority_constructor = UnmergeDepPriority
else:
priority_constructor = DepPriority
return priority_constructor(**kwargs)
def _dep_expand(self, root_config, atom_without_category):
"""
@param root_config: a root config instance
@type root_config: RootConfig
@param atom_without_category: an atom without a category component
@type atom_without_category: String
@rtype: list
@returns: a list of atoms containing categories (possibly empty)
"""
null_cp = portage.dep_getkey(insert_category_into_atom(
atom_without_category, "null"))
cat, atom_pn = portage.catsplit(null_cp)
dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
categories = set()
for db, pkg_type, built, installed, db_keys in dbs:
for cat in db.categories:
if db.cp_list("%s/%s" % (cat, atom_pn)):
categories.add(cat)
deps = []
for cat in categories:
deps.append(Atom(insert_category_into_atom(
atom_without_category, cat), allow_repo=True))
return deps
def _have_new_virt(self, root, atom_cp):
ret = False
for db, pkg_type, built, installed, db_keys in \
self._dynamic_config._filtered_trees[root]["dbs"]:
if db.cp_list(atom_cp):
ret = True
break
return ret
def _iter_atoms_for_pkg(self, pkg):
depgraph_sets = self._dynamic_config.sets[pkg.root]
atom_arg_map = depgraph_sets.atom_arg_map
root_config = self._frozen_config.roots[pkg.root]
for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
if atom.cp != pkg.cp and \
self._have_new_virt(pkg.root, atom.cp):
continue
visible_pkgs = \
self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
visible_pkgs.reverse() # descending order
higher_slot = None
for visible_pkg in visible_pkgs:
if visible_pkg.cp != atom.cp:
continue
if pkg >= visible_pkg:
# This is descending order, and we're not
# interested in any versions <= pkg given.
break
if pkg.slot_atom != visible_pkg.slot_atom:
higher_slot = visible_pkg
break
if higher_slot is not None:
continue
for arg in atom_arg_map[(atom, pkg.root)]:
if isinstance(arg, PackageArg) and \
arg.package != pkg:
continue
yield arg, atom
def select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
debug = "--debug" in self._frozen_config.myopts
root_config = self._frozen_config.roots[self._frozen_config.target_root]
sets = root_config.sets
depgraph_sets = self._dynamic_config.sets[root_config.root]
myfavorites=[]
myroot = self._frozen_config.target_root
dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
args = []
onlydeps = "--onlydeps" in self._frozen_config.myopts
lookup_owners = []
for x in myfiles:
ext = os.path.splitext(x)[1]
if ext==".tbz2":
if not os.path.exists(x):
if os.path.exists(
os.path.join(pkgsettings["PKGDIR"], "All", x)):
x = os.path.join(pkgsettings["PKGDIR"], "All", x)
elif os.path.exists(
os.path.join(pkgsettings["PKGDIR"], x)):
x = os.path.join(pkgsettings["PKGDIR"], x)
else:
writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
return 0, myfavorites
pkg = self._pkg(mykey, "binary", root_config,
onlydeps=onlydeps)
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif ext==".ebuild":
ebuild_path = portage.util.normalize_path(os.path.abspath(x))
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
e = portage.exception.PackageNotFound(
("%s is not in a valid portage tree " + \
"hierarchy or does not exist") % x)
if not portage.isvalidatom(cp):
raise e
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
raise e
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
cp, os.path.basename(ebuild_path)):
writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
return 0, myfavorites
if mykey not in portdb.xmatch(
"match-visible", portage.cpv_getkey(mykey)):
writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
raise portage.exception.PackageNotFound(
"%s is not in a valid portage tree hierarchy or does not exist" % x)
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif x.startswith(os.path.sep):
if not x.startswith(myroot):
portage.writemsg(("\n\n!!! '%s' does not start with" + \
" $ROOT.\n") % x, noiselevel=-1)
return 0, []
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
lookup_owners.append(x)
else:
if x in ("system", "world"):
x = SETPREFIX + x
if x.startswith(SETPREFIX):
s = x[len(SETPREFIX):]
if s not in sets:
raise portage.exception.PackageSetNotFound(s)
if s in depgraph_sets.sets:
continue
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
root_config=root_config))
continue
if not is_valid_package_atom(x, allow_repo=True):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
return (0,[])
# Don't expand categories or old-style virtuals here unless
# necessary. Expansion of old-style virtuals here causes at
# least the following problems:
# 1) It's more difficult to determine which set(s) an atom
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
if "/" in x:
args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
expanded_atoms = self._dep_expand(root_config, x)
installed_cp_set = set()
for atom in expanded_atoms:
if vardb.cp_list(atom.cp):
installed_cp_set.add(atom.cp)
if len(installed_cp_set) > 1:
non_virtual_cps = set()
for atom_cp in installed_cp_set:
if not atom_cp.startswith("virtual/"):
non_virtual_cps.add(atom_cp)
if len(non_virtual_cps) == 1:
installed_cp_set = non_virtual_cps
if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
installed_cp = next(iter(installed_cp_set))
for atom in expanded_atoms:
if atom.cp == installed_cp:
available = False
for pkg in self._iter_match_pkgs_any(
root_config, atom.without_use,
onlydeps=onlydeps):
if not pkg.installed:
available = True
break
if available:
expanded_atoms = [atom]
break
# If a non-virtual package and one or more virtual packages
# are in expanded_atoms, use the non-virtual package.
if len(expanded_atoms) > 1:
number_of_virtuals = 0
for expanded_atom in expanded_atoms:
if expanded_atom.cp.startswith("virtual/"):
number_of_virtuals += 1
else:
candidate = expanded_atom
if len(expanded_atoms) - number_of_virtuals == 1:
expanded_atoms = [ candidate ]
if len(expanded_atoms) > 1:
writemsg("\n\n", noiselevel=-1)
ambiguous_package_name(x, expanded_atoms, root_config,
self._frozen_config.spinner, self._frozen_config.myopts)
return False, myfavorites
if expanded_atoms:
atom = expanded_atoms[0]
else:
null_atom = Atom(insert_category_into_atom(x, "null"),
allow_repo=True)
cat, atom_pn = portage.catsplit(null_atom.cp)
virts_p = root_config.settings.get_virts_p().get(atom_pn)
if virts_p:
# Allow the depgraph to choose which virtual.
atom = Atom(null_atom.replace('null/', 'virtual/', 1),
allow_repo=True)
else:
atom = null_atom
args.append(AtomArg(arg=x, atom=atom,
root_config=root_config))
if lookup_owners:
relative_paths = []
search_for_multiple = False
if len(lookup_owners) > 1:
search_for_multiple = True
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
relative_paths.append(x[len(myroot)-1:])
owners = set()
for pkg, relative_path in \
real_vardb._owners.iter_owners(relative_paths):
owners.add(pkg.mycpv)
if not search_for_multiple:
break
if not owners:
portage.writemsg(("\n\n!!! '%s' is not claimed " + \
"by any package.\n") % lookup_owners[0], noiselevel=-1)
return 0, []
for cpv in owners:
slot = vardb.aux_get(cpv, ["SLOT"])[0]
if not slot:
# portage now masks packages with missing slot, but it's
# possible that one was installed by an older version
atom = Atom(portage.cpv_getkey(cpv))
else:
atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
if "--update" in self._frozen_config.myopts:
# In some cases, the greedy slots behavior can pull in a slot that
# the user would want to uninstall due to it being blocked by a
# newer version in a different slot. Therefore, it's necessary to
# detect and discard any that should be uninstalled. Each time
# that arguments are updated, package selections are repeated in
# order to ensure consistency with the current arguments:
#
# 1) Initialize args
# 2) Select packages and generate initial greedy atoms
# 3) Update args with greedy atoms
# 4) Select packages and generate greedy atoms again, while
# accounting for any blockers between selected packages
# 5) Update args with revised greedy atoms
self._set_args(args)
greedy_args = []
for arg in args:
greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom):
greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
self._set_args(greedy_args)
del greedy_args
# Revise greedy atoms, accounting for any blockers
# between selected packages.
revised_greedy_args = []
for arg in args:
revised_greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom,
blocker_lookahead=True):
revised_greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
args = revised_greedy_args
del revised_greedy_args
self._set_args(args)
myfavorites = set(myfavorites)
for arg in args:
if isinstance(arg, (AtomArg, PackageArg)):
myfavorites.add(arg.atom)
elif isinstance(arg, SetArg):
myfavorites.add(arg.arg)
myfavorites = list(myfavorites)
if debug:
portage.writemsg("\n", noiselevel=-1)
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
return self._resolve(myfavorites)
def _resolve(self, myfavorites):
"""Given self._dynamic_config._initial_arg_list, pull in the root nodes,
call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
myroot = self._frozen_config.target_root
pkgsettings = self._frozen_config.pkgsettings[myroot]
pprovideddict = pkgsettings.pprovideddict
virtuals = pkgsettings.getvirtuals()
for arg in self._expand_set_args(
self._dynamic_config._initial_arg_list,
add_to_digraph=True):
for atom in arg.pset.getAtoms():
self._spinner_update()
dep = Dependency(atom=atom, onlydeps=onlydeps,
root=myroot, parent=arg)
try:
pprovided = pprovideddict.get(atom.cp)
if pprovided and portage.match_from_list(atom, pprovided):
# A provided package has been specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if isinstance(arg, PackageArg):
if not self._add_pkg(arg.package, dep) or \
not self._create_graph():
if not self._dynamic_config._need_restart:
sys.stderr.write(("\n\n!!! Problem " + \
"resolving dependencies for %s\n") % \
arg.arg)
return 0, myfavorites
continue
if debug:
portage.writemsg(" Arg: %s\n Atom: %s\n" % \
(arg, atom), noiselevel=-1)
pkg, existing_node = self._select_package(
myroot, atom, onlydeps=onlydeps)
if not pkg:
pprovided_match = False
for virt_choice in virtuals.get(atom.cp, []):
expanded_atom = portage.dep.Atom(
atom.replace(atom.cp, virt_choice.cp, 1))
pprovided = pprovideddict.get(expanded_atom.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
pprovided_match = True
break
if pprovided_match:
continue
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {"myparent" : arg}))
return 0, myfavorites
self._dynamic_config._missing_args.append((arg, atom))
continue
if atom.cp != pkg.cp:
# For old-style virtuals, we need to repeat the
# package.provided check against the selected package.
expanded_atom = atom.replace(atom.cp, pkg.cp)
pprovided = pprovideddict.get(pkg.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if pkg.installed and "selective" not in self._dynamic_config.myparams:
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {"myparent" : arg}))
# Previous behavior was to bail out in this case, but
# since the dep is satisfied by the installed package,
# it's more friendly to continue building the graph
# and just show a warning message. Therefore, only bail
# out here if the atom is not from either the system or
# world set.
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
return 0, myfavorites
# Add the selected package to the graph as soon as possible
# so that later dep_check() calls can use it as feedback
# for making more consistent atom selections.
if not self._add_pkg(pkg, dep):
if self._dynamic_config._need_restart:
pass
elif isinstance(arg, SetArg):
sys.stderr.write(("\n\n!!! Problem resolving " + \
"dependencies for %s from %s\n") % \
(atom, arg.arg))
else:
sys.stderr.write(("\n\n!!! Problem resolving " + \
"dependencies for %s\n") % atom)
return 0, myfavorites
except portage.exception.MissingSignature as e:
portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
return 0, myfavorites
except portage.exception.InvalidSignature as e:
portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
return 0, myfavorites
except SystemExit as e:
raise # Needed else can't exit
except Exception as e:
writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
raise
# Now that the root packages have been added to the graph,
# process the dependencies.
if not self._create_graph():
return 0, myfavorites
try:
self.altlist()
except self._unknown_internal_error:
return False, myfavorites
if set(self._dynamic_config.digraph).intersection( \
self._dynamic_config._needed_unstable_keywords) or \
set(self._dynamic_config.digraph).intersection( \
self._dynamic_config._needed_use_config_changes) or \
set(self._dynamic_config.digraph).intersection( \
self._dynamic_config._needed_license_changes) :
#We failed if the user needs to change the configuration
self._dynamic_config._success_without_autounmask = True
return False, myfavorites
# We're true here unless we are missing binaries.
return (True, myfavorites)
def _set_args(self, args):
"""
Create the "__non_set_args__" package set from atoms and packages given as
arguments. This method can be called multiple times if necessary.
The package selection cache is automatically invalidated, since
arguments influence package selections.
"""
set_atoms = {}
non_set_atoms = {}
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
depgraph_sets.sets.setdefault('__non_set_args__',
InternalPackageSet(allow_repo=True)).clear()
depgraph_sets.atoms.clear()
depgraph_sets.atom_arg_map.clear()
set_atoms[root] = []
non_set_atoms[root] = []
# We don't add set args to the digraph here since that
# happens at a later stage and we don't want to make
# any state changes here that aren't reversed by a
# another call to this method.
for arg in self._expand_set_args(args, add_to_digraph=False):
atom_arg_map = self._dynamic_config.sets[
arg.root_config.root].atom_arg_map
if isinstance(arg, SetArg):
atom_group = set_atoms[arg.root_config.root]
else:
atom_group = non_set_atoms[arg.root_config.root]
for atom in arg.pset.getAtoms():
atom_group.append(atom)
atom_key = (atom, arg.root_config.root)
refs = atom_arg_map.get(atom_key)
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
if arg not in refs:
refs.append(arg)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
non_set_atoms.get(root, [])))
depgraph_sets.sets['__non_set_args__'].update(
non_set_atoms.get(root, []))
# Invalidate the package selection cache, since
# arguments influence package selections.
self._dynamic_config._highest_pkg_cache.clear()
for trees in self._dynamic_config._filtered_trees.values():
trees["porttree"].dbapi._clear_cache()
def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
"""
Return a list of slot atoms corresponding to installed slots that
differ from the slot of the highest visible match. When
blocker_lookahead is True, slot atoms that would trigger a blocker
conflict are automatically discarded, potentially allowing automatic
uninstallation of older slots when appropriate.
"""
highest_pkg, in_graph = self._select_package(root_config.root, atom)
if highest_pkg is None:
return []
vardb = root_config.trees["vartree"].dbapi
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
if portage.cpv_getkey(cpv) == highest_pkg.cp:
slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
slots.add(highest_pkg.metadata["SLOT"])
if len(slots) == 1:
return []
greedy_pkgs = []
slots.remove(highest_pkg.metadata["SLOT"])
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
pkg, in_graph = self._select_package(root_config.root, slot_atom)
if pkg is not None and \
pkg.cp == highest_pkg.cp and pkg < highest_pkg:
greedy_pkgs.append(pkg)
if not greedy_pkgs:
return []
if not blocker_lookahead:
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
for pkg in greedy_pkgs + [highest_pkg]:
dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, self._pkg_use_enabled(pkg),
parent=pkg, strict=True)
except portage.exception.InvalidDependString:
continue
blocker_atoms = []
for atoms in selected_atoms.values():
blocker_atoms.extend(x for x in atoms if x.blocker)
blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
if highest_pkg not in blockers:
return []
# filter packages with invalid deps
greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
# filter packages that conflict with highest_pkg
greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
(blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
if not greedy_pkgs:
return []
# If two packages conflict, discard the lower version.
discard_pkgs = set()
greedy_pkgs.sort(reverse=True)
for i in range(len(greedy_pkgs) - 1):
pkg1 = greedy_pkgs[i]
if pkg1 in discard_pkgs:
continue
for j in range(i + 1, len(greedy_pkgs)):
pkg2 = greedy_pkgs[j]
if pkg2 in discard_pkgs:
continue
if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
# pkg1 > pkg2
discard_pkgs.add(pkg2)
return [pkg.slot_atom for pkg in greedy_pkgs \
if pkg not in discard_pkgs]
def _select_atoms_from_graph(self, *pargs, **kwargs):
"""
Prefer atoms matching packages that have already been
added to the graph or those that are installed and have
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
return self._select_atoms_highest_available(*pargs, **kwargs)
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
"""This will raise InvalidDependString if necessary. If trees is
None then self._dynamic_config._filtered_trees is used."""
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
trees = self._dynamic_config._filtered_trees
mytrees = trees[root]
atom_graph = digraph()
if True:
# Temporarily disable autounmask so that || preferences
# account for masking and USE settings.
_autounmask_backup = self._dynamic_config._autounmask
self._dynamic_config._autounmask = False
mytrees["pkg_use_enabled"] = self._pkg_use_enabled
try:
if parent is not None:
trees[root]["parent"] = parent
trees[root]["atom_graph"] = atom_graph
if priority is not None:
trees[root]["priority"] = priority
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
myroot=root, trees=trees)
finally:
self._dynamic_config._autounmask = _autounmask_backup
del mytrees["pkg_use_enabled"]
if parent is not None:
trees[root].pop("parent")
trees[root].pop("atom_graph")
if priority is not None:
trees[root].pop("priority")
if not mycheck[0]:
raise portage.exception.InvalidDependString(mycheck[1])
if parent is None:
selected_atoms = mycheck[1]
else:
chosen_atoms = frozenset(mycheck[1])
selected_atoms = {parent : []}
for node in atom_graph:
if isinstance(node, Atom):
continue
if node is parent:
pkg = parent
else:
pkg, virt_atom = node
if virt_atom not in chosen_atoms:
continue
if not portage.match_from_list(virt_atom, [pkg]):
# Typically this means that the atom
# specifies USE deps that are unsatisfied
# by the selected package. The caller will
# record this as an unsatisfied dependency
# when necessary.
continue
selected_atoms[pkg] = [atom for atom in \
atom_graph.child_nodes(node) if atom in chosen_atoms]
return selected_atoms
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
check_backtrack=False):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
a matching package has been masked by backtracking.
"""
backtrack_mask = False
atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
allow_repo=True)
xinfo = '"%s"' % atom.unevaluated_atom
if arg:
xinfo='"%s"' % arg
if isinstance(myparent, AtomArg):
xinfo = _unicode_decode('"%s"') % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
masked_packages = []
missing_use = []
masked_pkg_instances = set()
missing_licenses = []
have_eapi_mask = False
pkgsettings = self._frozen_config.pkgsettings[root]
root_config = self._frozen_config.roots[root]
portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
for db, pkg_type, built, installed, db_keys in dbs:
if installed:
continue
match = db.match
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
else:
cpv_list = db.match(atom.without_use)
if atom.repo is None and hasattr(db, "getRepositories"):
repo_list = db.getRepositories()
else:
repo_list = [atom.repo]
# descending order
cpv_list.reverse()
for cpv in cpv_list:
for repo in repo_list:
if not db.cpv_exists(cpv, myrepo=repo):
continue
metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
if metadata is not None:
if not repo:
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
# pkg.metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg.metadata
if pkg in self._dynamic_config._runtime_pkg_mask:
backtrack_reasons = \
self._dynamic_config._runtime_pkg_mask[pkg]
mreasons.append('backtracking: %s' % \
', '.join(sorted(backtrack_reasons)))
backtrack_mask = True
if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
mreasons = ["exclude option"]
if mreasons:
masked_pkg_instances.add(pkg)
if atom.unevaluated_atom.use:
try:
if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
missing_use.append(pkg)
if not mreasons:
continue
except InvalidAtom:
writemsg("violated_conditionals raised " + \
"InvalidAtom: '%s' parent: %s" % \
(atom, myparent), noiselevel=-1)
raise
if pkg.built and not mreasons:
mreasons = ["use flag configuration mismatch"]
masked_packages.append(
(root_config, pkgsettings, cpv, repo, metadata, mreasons))
if check_backtrack:
if backtrack_mask:
raise self._backtrack_mask()
else:
return
missing_use_reasons = []
missing_iuse_reasons = []
for pkg in missing_use:
use = self._pkg_use_enabled(pkg)
missing_iuse = []
#Use the unevaluated atom here, because some flags might have gone
#lost during evaluation.
required_flags = atom.unevaluated_atom.use.required
missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
mreasons = []
if missing_iuse:
mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
missing_iuse_reasons.append((pkg, mreasons))
else:
need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
pkgsettings.setcpv(pkg)
untouchable_flags = \
frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
if untouchable_flags.intersection(
chain(need_enable, need_disable)):
continue
required_use = pkg.metadata["REQUIRED_USE"]
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
new_use = set(self._pkg_use_enabled(pkg))
for flag in need_enable:
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
if need_enable or need_disable:
changes = []
changes.extend(colorize("red", "+" + x) \
for x in need_enable)
changes.extend(colorize("blue", "-" + x) \
for x in need_disable)
mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
missing_use_reasons.append((pkg, mreasons))
if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
# Lets see if the violated use deps are conditional.
# If so, suggest to change them on the parent.
# If the child package is masked then a change to
# parent USE is not a valid solution (a normal mask
# message should be displayed instead).
if pkg in masked_pkg_instances:
continue
mreasons = []
violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
if not (violated_atom.use.enabled or violated_atom.use.disabled):
#all violated use deps are conditional
changes = []
conditional = violated_atom.use.conditional
involved_flags = set(chain(conditional.equal, conditional.not_equal, \
conditional.enabled, conditional.disabled))
pkgsettings = self._frozen_config.pkgsettings[myparent.root]
pkgsettings.setcpv(myparent)
untouchable_flags = \
frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
if untouchable_flags.intersection(involved_flags):
continue
required_use = myparent.metadata["REQUIRED_USE"]
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
new_use = set(self._pkg_use_enabled(myparent))
for flag in involved_flags:
if flag in old_use:
new_use.discard(flag)
else:
new_use.add(flag)
if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
for flag in involved_flags:
if flag in self._pkg_use_enabled(myparent):
changes.append(colorize("blue", "-" + flag))
else:
changes.append(colorize("red", "+" + flag))
mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
if (myparent, mreasons) not in missing_use_reasons:
missing_use_reasons.append((myparent, mreasons))
unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_use_reasons if pkg not in masked_pkg_instances]
unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_iuse_reasons if pkg not in masked_pkg_instances]
show_missing_use = False
if unmasked_use_reasons:
# Only show the latest version.
show_missing_use = []
pkg_reason = None
parent_reason = None
for pkg, mreasons in unmasked_use_reasons:
if pkg is myparent:
if parent_reason is None:
#This happens if a use change on the parent
#leads to a satisfied conditional use dep.
parent_reason = (pkg, mreasons)
elif pkg_reason is None:
#Don't rely on the first pkg in unmasked_use_reasons,
#being the highest version of the dependency.
pkg_reason = (pkg, mreasons)
if pkg_reason:
show_missing_use.append(pkg_reason)
if parent_reason:
show_missing_use.append(parent_reason)
elif unmasked_iuse_reasons:
masked_with_iuse = False
for pkg in masked_pkg_instances:
#Use atom.unevaluated here, because some flags might have gone
#lost during evaluation.
if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
# Package(s) with required IUSE are masked,
# so display a normal masking message.
masked_with_iuse = True
break
if not masked_with_iuse:
show_missing_use = unmasked_iuse_reasons
mask_docs = False
if show_missing_use:
writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
for pkg, mreasons in show_missing_use:
writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
elif masked_packages:
writemsg_stdout("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
writemsg_stdout("\n", noiselevel=-1)
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
writemsg_stdout("\n", noiselevel=-1)
mask_docs = True
else:
writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
# Show parent nodes and the argument that pulled them in.
traversed_nodes = set()
node = myparent
if isinstance(myparent, AtomArg):
# It's redundant to show parent for AtomArg since
# it's the same as 'xinfo' displayed above.
node = None
else:
node = myparent
msg = []
while node is not None:
traversed_nodes.add(node)
if isinstance(node, DependencyArg):
msg.append('(dependency required by "%s")' % \
colorize('INFORM', _unicode_decode("%s") % (node,)))
else:
msg.append('(dependency required by "%s" [%s])' % \
(colorize('INFORM', _unicode_decode("%s") % \
(node.cpv,)), node.type_name))
if node not in self._dynamic_config.digraph:
# The parent is not in the graph due to backtracking.
break
# When traversing to parents, prefer arguments over packages
# since arguments are root nodes. Never traverse the same
# package twice, in order to prevent an infinite loop.
selected_parent = None
for parent in self._dynamic_config.digraph.parent_nodes(node):
if parent in traversed_nodes:
continue
if isinstance(parent, DependencyArg):
if self._dynamic_config.digraph.parent_nodes(parent):
selected_parent = parent
else:
msg.append('(dependency required by "%s" [argument])' % \
colorize('INFORM', _unicode_decode("%s") % (parent,)))
selected_parent = None
break
else:
selected_parent = parent
node = selected_parent
if msg:
writemsg_stdout("\n".join(msg), noiselevel=-1)
writemsg_stdout("\n", noiselevel=-1)
if mask_docs:
show_mask_docs()
writemsg_stdout("\n", noiselevel=-1)
def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
for db, pkg_type, built, installed, db_keys in \
self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
for pkg in self._iter_match_pkgs(root_config,
pkg_type, atom, onlydeps=onlydeps):
yield pkg
def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
"""
Iterate over Package instances of pkg_type matching the given atom.
This does not check visibility and it also does not match USE for
unbuilt ebuilds since USE are lazily calculated after visibility
checks (to avoid the expense when possible).
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
if hasattr(db, "xmatch"):
# For portdbapi we match only against the cpv, in order
# to bypass unnecessary cache access for things like IUSE
# and SLOT. Later, we cache the metadata in a Package
# instance, and use that for further matching. This
# optimization is especially relevant since
# pordbapi.aux_get() does not cache calls that have
# myrepo or mytree arguments.
cpv_list = db.xmatch("match-all-cpv-only", atom)
else:
cpv_list = db.match(atom)
# USE=multislot can make an installed package appear as if
# it doesn't satisfy a slot dependency. Rebuilding the ebuild
# won't do any good as long as USE=multislot is enabled since
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
installed = pkg_type == 'installed'
if installed and not cpv_list and atom.slot:
for cpv in db.match(atom.cp):
slot_available = False
for other_db, other_type, other_built, \
other_installed, other_keys in \
self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
try:
if atom.slot == \
other_db.aux_get(cpv, ["SLOT"])[0]:
slot_available = True
break
except KeyError:
pass
if not slot_available:
continue
inst_pkg = self._pkg(cpv, "installed",
root_config, installed=installed, myrepo = atom.repo)
# Remove the slot from the atom and verify that
# the package matches the resulting atom.
if portage.match_from_list