blob: 6dea92c94cfa9558151fd3fbf1e084575e513e65 [file] [log] [blame]
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id$
from __future__ import print_function
import gc
import logging
import re
import sys
import textwrap
from itertools import chain
import portage
from portage import os
from portage import digraph
from portage.dep import Atom
from portage.output import bold, blue, colorize, create_color_func, darkblue, \
darkgreen, green, nc_len, red, teal, turquoise, yellow
bad = create_color_func("BAD")
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import writemsg_level
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from _emerge.changelog import calc_changelog
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.format_size import format_size
from _emerge.is_valid_package_atom import is_valid_package_atom
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageCounters import PackageCounters
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RepoDisplay import RepoDisplay
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.visible import visible
if sys.hexversion >= 0x3000000:
basestring = str
long = int
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, spinner):
self.settings = settings
self.target_root = settings["ROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self._running_root = trees["/"]["root_config"]
self._opts_no_restart = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
self._required_set_names = set(["world"])
class _dynamic_depgraph_config(object):
def __init__(self, depgraph, myparams, allow_backtracking,
runtime_pkg_mask):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps slot atom to package for each Package added to the graph.
self._slot_pkg_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
self.mydbapi = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# contains all sets added to the graph
self._sets = {}
# contains atoms given as arguments
self._sets["args"] = InternalPackageSet()
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self._set_atoms = InternalPackageSet()
self._atom_arg_map = {}
# contains all nodes pulled in by self._set_atoms
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
self._blocked_pkgs = digraph()
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
self._slot_collision_info = {}
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self._slot_collision_nodes = set()
self._parent_atoms = {}
self._slot_conflict_parent_atoms = set()
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
if runtime_pkg_mask is None:
runtime_pkg_mask = {}
else:
runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
runtime_pkg_mask.items())
self._runtime_pkg_mask = runtime_pkg_mask
self._need_restart = False
for myroot in depgraph._frozen_config.trees:
self._slot_pkg_map[myroot] = {}
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageVirtualDbapi(vardb.settings)
self.mydbapi[myroot] = fakedb
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
dbs = []
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# (db, pkg_type, built, installed, db_keys)
if "--usepkgonly" not in depgraph._frozen_config.myopts:
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
db_keys = list(depgraph._frozen_config._trees_orig[myroot
]["vartree"].dbapi._aux_cache_keys)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
if "--usepkg" in depgraph._frozen_config.myopts:
depgraph._frozen_config._trees_orig[myroot
]["bintree"].populate(
"--getbinpkg" in depgraph._frozen_config.myopts,
"--getbinpkgonly" in depgraph._frozen_config.myopts)
class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, runtime_pkg_mask=None, allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, runtime_pkg_mask)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts and \
"--buildpkgonly" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
fakedb = self._dynamic_config._graph_trees[
myroot]["vartree"].dbapi
for pkg in vardb:
self._spinner_update()
# This triggers metadata updates via FakeVartree.
vardb.aux_get(pkg.cpv, [])
fakedb.cpv_inject(pkg)
# Now that the vardb state is cached in our FakeVartree,
# we won't be needing the real vartree cache for awhile.
# To make some room on the heap, clear the vardbapi
# caches.
self._frozen_config._trees_orig[myroot
]["vartree"].dbapi._clear_cache()
gc.collect()
self._dynamic_config._vdb_loaded = True
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _show_missed_update(self):
if '--quiet' in self._frozen_config.myopts and \
'--debug' not in self._frozen_config.myopts:
return
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
self._dynamic_config._runtime_pkg_mask.items():
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
if not missed_updates:
return
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type,
[]).append((pkg, parent_atoms))
self._show_missed_update_slot_conflicts(
missed_update_types.get("slot conflict"))
self._show_missed_update_unsatisfied_dep(
missed_update_types.get("missing dependency"))
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
return
write = sys.stderr.write
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
try:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent,
check_backtrack=True)
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
continue
write("\n!!! The following update has been skipped " + \
"due to unsatisfied dependencies:\n\n")
write(str(pkg.slot_atom))
if pkg.root != '/':
write(" for %s" % (pkg.root,))
write("\n")
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
write("\n")
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
write("\n!!! The following update(s) have been skipped " + \
"due to unsatisfied dependencies\n" + \
"!!! triggered by backtracking:\n\n")
for pkg, parent_atoms in backtrack_masked:
write(str(pkg.slot_atom))
if pkg.root != '/':
write(" for %s" % (pkg.root,))
write("\n")
sys.stderr.flush()
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
return
msg = []
msg.append("\n!!! One or more updates have been skipped due to " + \
"a dependency conflict:\n\n")
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
if pkg.root != '/':
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
for parent, atom in parent_atoms:
msg.append(indent)
msg.append(str(pkg))
msg.append(" conflicts with\n")
msg.append(2*indent)
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(str(parent))
else:
# Display the specific atom from SetArg or
# Package types.
msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
sys.stderr.write("".join(msg))
sys.stderr.flush()
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases.
"""
if not self._dynamic_config._slot_collision_info:
return
self._show_merge_list()
msg = []
msg.append("\n!!! Multiple package instances within a single " + \
"package slot have been pulled\n")
msg.append("!!! into the dependency graph, resulting" + \
" in a slot conflict:\n\n")
indent = " "
# Max number of parents shown, to avoid flooding the display.
max_parents = 3
explanation_columns = 70
explanations = 0
for (slot_atom, root), slot_nodes \
in self._dynamic_config._slot_collision_info.items():
msg.append(str(slot_atom))
if root != '/':
msg.append(" for %s" % (root,))
msg.append("\n\n")
for node in slot_nodes:
msg.append(indent)
msg.append(str(node))
parent_atoms = self._dynamic_config._parent_atoms.get(node)
if parent_atoms:
pruned_list = set()
# Prefer conflict atoms over others.
for parent_atom in parent_atoms:
if len(pruned_list) >= max_parents:
break
if parent_atom in self._dynamic_config._slot_conflict_parent_atoms:
pruned_list.add(parent_atom)
# If this package was pulled in by conflict atoms then
# show those alone since those are the most interesting.
if not pruned_list:
# When generating the pruned list, prefer instances
# of DependencyArg over instances of Package.
for parent_atom in parent_atoms:
if len(pruned_list) >= max_parents:
break
parent, atom = parent_atom
if isinstance(parent, DependencyArg):
pruned_list.add(parent_atom)
# Prefer Packages instances that themselves have been
# pulled into collision slots.
for parent_atom in parent_atoms:
if len(pruned_list) >= max_parents:
break
parent, atom = parent_atom
if isinstance(parent, Package) and \
(parent.slot_atom, parent.root) \
in self._dynamic_config._slot_collision_info:
pruned_list.add(parent_atom)
for parent_atom in parent_atoms:
if len(pruned_list) >= max_parents:
break
pruned_list.add(parent_atom)
omitted_parents = len(parent_atoms) - len(pruned_list)
parent_atoms = pruned_list
msg.append(" pulled in by\n")
for parent_atom in parent_atoms:
parent, atom = parent_atom
msg.append(2*indent)
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(str(parent))
else:
# Display the specific atom from SetArg or
# Package types.
msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
if omitted_parents:
msg.append(2*indent)
msg.append("(and %d more)\n" % omitted_parents)
else:
msg.append(" (no parents)\n")
msg.append("\n")
explanation = self._slot_conflict_explanation(slot_nodes)
if explanation:
explanations += 1
msg.append(indent + "Explanation:\n\n")
for line in textwrap.wrap(explanation, explanation_columns):
msg.append(2*indent + line + "\n")
msg.append("\n")
msg.append("\n")
sys.stderr.write("".join(msg))
sys.stderr.flush()
explanations_for_all = explanations == len(self._dynamic_config._slot_collision_info)
if explanations_for_all or "--quiet" in self._frozen_config.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
from formatter import AbstractFormatter, DumbWriter
f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
f.writer.flush()
def _slot_conflict_explanation(self, slot_nodes):
"""
When a slot conflict occurs due to USE deps, there are a few
different cases to consider:
1) New USE are correctly set but --newuse wasn't requested so an
installed package with incorrect USE happened to get pulled
into graph before the new one.
2) New USE are incorrectly set but an installed package has correct
USE so it got pulled into the graph, and a new instance also got
pulled in due to --newuse or an upgrade.
3) Multiple USE deps exist that can't be satisfied simultaneously,
and multiple package instances got pulled into the same slot to
satisfy the conflicting deps.
Currently, explanations and suggested courses of action are generated
for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
"""
if len(slot_nodes) != 2:
# Suggestions are only implemented for
# conflicts between two packages.
return None
all_conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms
matched_node = None
matched_atoms = None
unmatched_node = None
for node in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(node)
if not parent_atoms:
# Normally, there are always parent atoms. If there are
# none then something unexpected is happening and there's
# currently no suggestion for this case.
return None
conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
for parent_atom in conflict_atoms:
parent, atom = parent_atom
if not atom.use:
# Suggestions are currently only implemented for cases
# in which all conflict atoms have USE deps.
return None
if conflict_atoms:
if matched_node is not None:
# If conflict atoms match multiple nodes
# then there's no suggestion.
return None
matched_node = node
matched_atoms = conflict_atoms
else:
if unmatched_node is not None:
# Neither node is matched by conflict atoms, and
# there is no suggestion for this case.
return None
unmatched_node = node
if matched_node is None or unmatched_node is None:
# This shouldn't happen.
return None
if unmatched_node.installed and not matched_node.installed and \
unmatched_node.cpv == matched_node.cpv:
# If the conflicting packages are the same version then
# --newuse should be all that's needed. If they are different
# versions then there's some other problem.
return "New USE are correctly set, but --newuse wasn't" + \
" requested, so an installed package with incorrect USE " + \
"happened to get pulled into the dependency graph. " + \
"In order to solve " + \
"this, either specify the --newuse option or explicitly " + \
" reinstall '%s'." % matched_node.slot_atom
if matched_node.installed and not unmatched_node.installed:
atoms = sorted(set(atom for parent, atom in matched_atoms))
explanation = ("New USE for '%s' are incorrectly set. " + \
"In order to solve this, adjust USE to satisfy '%s'") % \
(matched_node.slot_atom, atoms[0])
if len(atoms) > 1:
for atom in atoms[1:-1]:
explanation += ", '%s'" % (atom,)
if len(atoms) > 2:
explanation += ","
explanation += " and '%s'" % (atoms[-1],)
explanation += "."
return explanation
return None
def _process_slot_conflicts(self):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
for (slot_atom, root), slot_nodes \
in self._dynamic_config._slot_collision_info.items():
all_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
continue
all_parent_atoms.update(parent_atoms)
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
for parent_atom in all_parent_atoms:
if parent_atom in parent_atoms:
continue
# Use package set for matching since it will match via
# PROVIDE when necessary, while match_from_list does not.
parent, atom = parent_atom
atom_set = InternalPackageSet(
initial_atoms=(atom,))
if atom_set.findAtomForPackage(pkg):
parent_atoms.add(parent_atom)
else:
self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
def _reinstall_for_flags(self, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
if "--newuse" in self._frozen_config.myopts or \
"--binpkg-respect-use" in self._frozen_config.myopts:
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
if flags:
return flags
elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
flags = orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use))
if flags:
return flags
return None
def _create_graph(self, allow_unsatisfied=False):
dep_stack = self._dynamic_config._dep_stack
dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
while dep_stack or dep_disjunctive_stack:
self._spinner_update()
while dep_stack:
dep = dep_stack.pop()
if isinstance(dep, Package):
if not self._add_pkg_deps(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
continue
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if dep_disjunctive_stack:
if not self._pop_disjunction(allow_unsatisfied):
return 0
return 1
def _add_dep(self, dep, allow_unsatisfied=False):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
empty = "empty" in self._dynamic_config.myparams
deep = self._dynamic_config.myparams.get("deep", 0)
recurse = empty or deep is True or dep.depth <= deep
if dep.blocker:
if not buildpkgonly and \
not nodeps and \
dep.parent not in self._dynamic_config._slot_collision_nodes:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
return 1
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
eapi=dep.parent.metadata["EAPI"],
root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
if dep.child is None:
dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
onlydeps=dep.onlydeps)
else:
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
existing_node = self._dynamic_config._slot_pkg_map[
dep.root].get(dep_pkg.slot_atom)
if existing_node is not dep_pkg:
existing_node = None
if not dep_pkg:
if dep.priority.optional:
# This could be an unecessary build-time dep
# pulled in by --with-bdeps=y.
return 1
if allow_unsatisfied:
self._dynamic_config._unsatisfied_deps.append(dep)
return 1
self._dynamic_config._unsatisfied_deps_for_display.append(
((dep.root, dep.atom), {"myparent":dep.parent}))
# The parent node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if dep.parent in self._dynamic_config._runtime_pkg_mask:
if "--debug" in self._frozen_config.myopts:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
else:
# Do not backtrack if only USE have to be changed in
# order to satisfy the dependency.
dep_pkg, existing_node = \
self._select_package(dep.root, dep.atom.without_use,
onlydeps=dep.onlydeps)
if dep_pkg is None:
self._dynamic_config._runtime_pkg_mask.setdefault(
dep.parent, {})["missing dependency"] = \
set([(dep.parent, dep.root, dep.atom)])
self._dynamic_config._need_restart = True
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied dep:")
msg.append(" parent: %s" % dep.parent)
msg.append(" priority: %s" % dep.priority)
msg.append(" root: %s" % dep.root)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
# In some cases, dep_check will return deps that shouldn't
# be proccessed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
if dep.priority.satisfied and \
not dep_pkg.installed and \
not (existing_node or recurse):
myarg = None
if dep.root == self._frozen_config.target_root:
try:
myarg = next(self._iter_atoms_for_pkg(dep_pkg))
except StopIteration:
pass
except portage.exception.InvalidDependString:
if not dep_pkg.installed:
# This shouldn't happen since the package
# should have been masked.
raise
if not myarg:
self._dynamic_config._ignored_deps.append(dep)
return 1
if not self._add_pkg(dep_pkg, dep):
return 0
return 1
def _add_pkg(self, pkg, dep):
myparent = None
priority = None
depth = 0
if dep is None:
dep = Dependency()
else:
myparent = dep.parent
priority = dep.priority
depth = dep.depth
if priority is None:
priority = DepPriority()
"""
Fills the digraph with nodes comprised of packages to merge.
mybigkey is the package spec of the package to merge.
myparent is the package depending on mybigkey ( or None )
addme = Should we add this package to the digraph or are we just looking at it's deps?
Think --onlydeps, we need to ignore packages in that case.
#stuff to add:
#SLOT-aware emerge
#IUSE-aware emerge -> USE DEP aware depgraph
#"no downgrade" emerge
"""
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
# select the correct /var database that we'll be checking against
vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
if True:
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except portage.exception.InvalidDependString as e:
if not pkg.installed:
show_invalid_depstring_notice(
pkg, pkg.metadata["PROVIDE"], str(e))
return 0
del e
if not pkg.onlydeps:
if not pkg.installed and \
"empty" not in self._dynamic_config.myparams and \
vardbapi.match(pkg.slot_atom):
# Increase the priority of dependencies on packages that
# are being rebuilt. This optimizes merge order so that
# dependencies are rebuilt/updated as soon as possible,
# which is needed especially when emerge is called by
# revdep-rebuild since dependencies may be affected by ABI
# breakage that has rendered them useless. Don't adjust
# priority here when in "empty" mode since all packages
# are being merged in that case.
priority.rebuild = True
existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
slot_collision = False
if existing_node:
existing_node_matches = pkg.cpv == existing_node.cpv
if existing_node_matches and \
pkg != existing_node and \
dep.atom is not None:
# Use package set for matching since it will match via
# PROVIDE when necessary, while match_from_list does not.
atom_set = InternalPackageSet(initial_atoms=[dep.atom])
if not atom_set.findAtomForPackage(existing_node):
existing_node_matches = False
if existing_node_matches:
# The existing node can be reused.
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(existing_node, parent,
priority=priority)
self._add_parent_atom(existing_node, parent_atom)
# If a direct circular dependency is not an unsatisfied
# buildtime dependency then drop it here since otherwise
# it can skew the merge order calculation in an unwanted
# way.
if existing_node != myparent or \
(priority.buildtime and not priority.satisfied):
self._dynamic_config.digraph.addnode(existing_node, myparent,
priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(existing_node,
(dep.parent, dep.atom))
return 1
else:
# A slot conflict has occurred.
# The existing node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking and \
existing_node in \
self._dynamic_config._runtime_pkg_mask:
if "--debug" in self._frozen_config.myopts:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(existing_node,
self._dynamic_config._runtime_pkg_mask[
existing_node]), noiselevel=-1)
elif self._dynamic_config._allow_backtracking and \
not self._accept_blocker_conflicts():
self._add_slot_conflict(pkg)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._add_parent_atom(pkg, parent_atom)
self._process_slot_conflicts()
parent_atoms = \
self._dynamic_config._parent_atoms.get(pkg, set())
if parent_atoms:
conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
if conflict_atoms:
parent_atoms = conflict_atoms
if pkg >= existing_node:
# We only care about the parent atoms
# when they trigger a downgrade.
parent_atoms = set()
self._dynamic_config._runtime_pkg_mask.setdefault(
existing_node, {})["slot conflict"] = parent_atoms
self._dynamic_config._need_restart = True
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot conflict:")
msg.append(" package: %s" % existing_node)
msg.append(" slot: %s" % existing_node.slot_atom)
msg.append(" parents: %s" % \
[(str(parent), atom) \
for parent, atom in parent_atoms])
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
# A slot collision has occurred. Sometimes this coincides
# with unresolvable blockers, so the slot collision will be
# shown later if there are no unresolvable blockers.
self._add_slot_conflict(pkg)
slot_collision = True
if slot_collision:
# Now add this node to the graph so that self.display()
# can show use flags and --tree portage.output. This node is
# only being partially added to the graph. It must not be
# allowed to interfere with the other nodes that have been
# added. Do not overwrite data for existing nodes in
# self._dynamic_config.mydbapi since that data will be used for blocker
# validation.
# Even though the graph is now invalid, continue to process
# dependencies so that things like --fetchonly can still
# function despite collisions.
pass
elif not previously_added:
self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
if not pkg.installed:
# Allow this package to satisfy old-style virtuals in case it
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
pkgsettings.setinst(pkg.cpv, pkg.metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
settings.setinst(pkg.cpv, pkg.metadata)
settings.lock()
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(
pkg, pkg.metadata["PROVIDE"], str(e))
del e
return 0
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
# Do this even when addme is False (--onlydeps) so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
""" This section determines whether we go deeper into dependencies or not.
We want to go deeper on a few occasions:
Installing package A, we need to make sure package A's deps are met.
emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
"""
if arg_atoms:
depth = 0
pkg.depth = depth
deep = self._dynamic_config.myparams.get("deep", 0)
empty = "empty" in self._dynamic_config.myparams
recurse = empty or deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
if "recurse" not in self._dynamic_config.myparams:
return 1
elif pkg.installed and not recurse:
dep_stack = self._dynamic_config._ignored_deps
self._spinner_update()
if not previously_added:
dep_stack.append(pkg)
return 1
def _add_parent_atom(self, pkg, parent_atom):
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
def _add_slot_conflict(self, pkg):
self._dynamic_config._slot_collision_nodes.add(pkg)
slot_key = (pkg.slot_atom, pkg.root)
slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
if slot_nodes is None:
slot_nodes = set()
slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
slot_nodes.add(pkg)
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
mytype = pkg.type_name
myroot = pkg.root
mykey = pkg.cpv
metadata = pkg.metadata
myuse = pkg.use.enabled
jbigkey = pkg
depth = pkg.depth + 1
removal_action = "remove" in self._dynamic_config.myparams
edepend={}
depkeys = ["DEPEND","RDEPEND","PDEPEND"]
for k in depkeys:
edepend[k] = metadata[k]
if not pkg.built and \
"--buildpkgonly" in self._frozen_config.myopts and \
"deep" not in self._dynamic_config.myparams and \
"empty" not in self._dynamic_config.myparams:
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
bdeps_optional = False
if pkg.built and not removal_action:
if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
# Pull in build time deps as requested, but marked them as
# "optional" since they are not strictly required. This allows
# more freedom in the merge order calculation for solving
# circular dependencies. Don't convert to PDEPEND since that
# could make --with-bdeps=y less effective if it is used to
# adjust merge order to prevent built_with_use() calls from
# failing.
bdeps_optional = True
else:
# built packages do not have build time dependencies.
edepend["DEPEND"] = ""
if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
edepend["DEPEND"] = ""
if removal_action:
bdeps_root = myroot
else:
bdeps_root = "/"
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
bdeps_root = myroot
elif root_deps == "rdeps":
edepend["DEPEND"] = ""
deps = (
(bdeps_root, edepend["DEPEND"],
self._priority(buildtime=(not bdeps_optional),
optional=bdeps_optional)),
(myroot, edepend["RDEPEND"], self._priority(runtime=True)),
(myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
)
debug = "--debug" in self._frozen_config.myopts
strict = mytype != "installed"
try:
if not strict:
portage.dep._dep_check_strict = False
for dep_root, dep_string, dep_priority in deps:
if not dep_string:
continue
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Depstring: %s\n" % (dep_string,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
dep_string = portage.dep.paren_normalize(
portage.dep.use_reduce(
portage.dep.paren_reduce(dep_string),
uselist=pkg.use.enabled))
dep_string = list(self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, dep_string))
except portage.exception.InvalidDependString as e:
if pkg.installed:
del e
continue
show_invalid_depstring_notice(pkg, dep_string, str(e))
return 0
if not dep_string:
continue
dep_string = portage.dep.paren_enclose(dep_string)
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied):
return 0
except portage.exception.AmbiguousPackageName as e:
pkgs = e.args[0]
portage.writemsg("\n\n!!! An atom in the dependencies " + \
"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
for cpv in pkgs:
portage.writemsg(" %s\n" % cpv, noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
if mytype == "binary":
portage.writemsg(
"!!! This binary package cannot be installed: '%s'\n" % \
mykey, noiselevel=-1)
elif mytype == "ebuild":
portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
myebuild, mylocation = portdb.findname2(mykey)
portage.writemsg("!!! This ebuild cannot be installed: " + \
"'%s'\n" % myebuild, noiselevel=-1)
portage.writemsg("!!! Please notify the package maintainer " + \
"that atoms must be fully-qualified.\n", noiselevel=-1)
return 0
finally:
portage.dep._dep_check_strict = True
return 1
def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied):
depth = pkg.depth + 1
debug = "--debug" in self._frozen_config.myopts
strict = pkg.type_name != "installed"
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Depstring: %s\n" % (dep_string,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
selected_atoms = self._select_atoms(dep_root,
dep_string, myuse=pkg.use.enabled, parent=pkg,
strict=strict, priority=dep_priority)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg, dep_string, str(e))
del e
if pkg.installed:
return 1
return 0
if debug:
writemsg_level("Candidates: %s\n" % \
([str(x) for x in selected_atoms[pkg]],),
noiselevel=-1, level=logging.DEBUG)
root_config = self._frozen_config.roots[dep_root]
vardb = root_config.trees["vartree"].dbapi
for atom, child in self._minimize_children(
pkg, dep_priority, root_config, selected_atoms[pkg]):
mypriority = dep_priority.copy()
if not atom.blocker and vardb.match(atom):
mypriority.satisfied = True
if not self._add_dep(Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=depth, parent=pkg,
priority=mypriority, root=dep_root),
allow_unsatisfied=allow_unsatisfied):
return 0
selected_atoms.pop(pkg)
# Add selected indirect virtual deps to the graph. This
# takes advantage of circular dependency avoidance that's done
# by dep_zapdeps. We preserve actual parent/child relationships
# here in order to avoid distorting the dependency graph like
# <=portage-2.1.6.x did.
for virt_pkg, atoms in selected_atoms.items():
if debug:
writemsg_level("Candidates: %s: %s\n" % \
(virt_pkg.cpv, [str(x) for x in atoms]),
noiselevel=-1, level=logging.DEBUG)
# Just assume depth + 1 here for now, though it's not entirely
# accurate since multilple levels of indirect virtual deps may
# have been traversed. The _add_pkg call will reset the depth to
# 0 if this package happens to match an argument.
if not self._add_pkg(virt_pkg,
Dependency(atom=Atom('=' + virt_pkg.cpv),
depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
root=dep_root)):
return 0
for atom, child in self._minimize_children(
pkg, self._priority(runtime=True), root_config, atoms):
# This is a GLEP 37 virtual, so its deps are all runtime.
mypriority = self._priority(runtime=True)
if not atom.blocker and vardb.match(atom):
mypriority.satisfied = True
if not self._add_dep(Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=virt_pkg.depth,
parent=virt_pkg, priority=mypriority, root=dep_root),
allow_unsatisfied=allow_unsatisfied):
return 0
if debug:
writemsg_level("Exiting... %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
return 1
def _minimize_children(self, parent, priority, root_config, atoms):
"""
Selects packages to satisfy the given atoms, and minimizes the
number of selected packages. This serves to identify and eliminate
redundant package selections when multiple atoms happen to specify
a version range.
"""
atom_pkg_map = {}
for atom in atoms:
if atom.blocker:
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
root_config.root, atom)
if dep_pkg is None:
yield (atom, None)
continue
atom_pkg_map[atom] = dep_pkg
if len(atom_pkg_map) < 2:
for item in atom_pkg_map.items():
yield item
return
cp_pkg_map = {}
pkg_atom_map = {}
for atom, pkg in atom_pkg_map.items():
pkg_atom_map.setdefault(pkg, set()).add(atom)
cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
for cp, pkgs in cp_pkg_map.items():
if len(pkgs) < 2:
for pkg in pkgs:
for atom in pkg_atom_map[pkg]:
yield (atom, pkg)
continue
# Use a digraph to identify and eliminate any
# redundant package selections.
atom_pkg_graph = digraph()
cp_atoms = set()
for pkg1 in pkgs:
for atom in pkg_atom_map[pkg1]:
cp_atoms.add(atom)
atom_pkg_graph.add(pkg1, atom)
atom_set = InternalPackageSet(initial_atoms=(atom,))
for pkg2 in pkgs:
if pkg2 is pkg1:
continue
if atom_set.findAtomForPackage(pkg2):
atom_pkg_graph.add(pkg2, atom)
for pkg in pkgs:
eliminate_pkg = True
for atom in atom_pkg_graph.parent_nodes(pkg):
if len(atom_pkg_graph.child_nodes(atom)) < 2:
eliminate_pkg = False
break
if eliminate_pkg:
atom_pkg_graph.remove(pkg)
# Yield < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
less_than = []
not_less_than = []
for atom in cp_atoms:
if atom.operator in ('<', '<='):
less_than.append(atom)
else:
not_less_than.append(atom)
for atom in chain(less_than, not_less_than):
child_pkgs = atom_pkg_graph.child_nodes(atom)
yield (atom, child_pkgs[0])
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
i = 0
while i < len(dep_struct):
x = dep_struct[i]
if isinstance(x, list):
for y in self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, x):
yield y
elif x == "||":
self._queue_disjunction(pkg, dep_root, dep_priority,
[ x, dep_struct[ i + 1 ] ] )
i += 1
else:
try:
x = portage.dep.Atom(x)
except portage.exception.InvalidAtom:
if not pkg.installed:
raise portage.exception.InvalidDependString(
"invalid atom: '%s'" % x)
else:
# Note: Eventually this will check for PROPERTIES=virtual
# or whatever other metadata gets implemented for this
# purpose.
if x.cp.startswith('virtual/'):
self._queue_disjunction( pkg, dep_root,
dep_priority, [ str(x) ] )
else:
yield str(x)
i += 1
def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
self._dynamic_config._dep_disjunctive_stack.append(
(pkg, dep_root, dep_priority, dep_struct))
def _pop_disjunction(self, allow_unsatisfied):
"""
Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
populate self._dynamic_config._dep_stack.
"""
pkg, dep_root, dep_priority, dep_struct = \
self._dynamic_config._dep_disjunctive_stack.pop()
dep_string = portage.dep.paren_enclose(dep_struct)
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
return 0
return 1
def _priority(self, **kwargs):
if "remove" in self._dynamic_config.myparams:
priority_constructor = UnmergeDepPriority
else:
priority_constructor = DepPriority
return priority_constructor(**kwargs)
def _dep_expand(self, root_config, atom_without_category):
"""
@param root_config: a root config instance
@type root_config: RootConfig
@param atom_without_category: an atom without a category component
@type atom_without_category: String
@rtype: list
@returns: a list of atoms containing categories (possibly empty)
"""
null_cp = portage.dep_getkey(insert_category_into_atom(
atom_without_category, "null"))
cat, atom_pn = portage.catsplit(null_cp)
dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
categories = set()
for db, pkg_type, built, installed, db_keys in dbs:
for cat in db.categories:
if db.cp_list("%s/%s" % (cat, atom_pn)):
categories.add(cat)
deps = []
for cat in categories:
deps.append(Atom(insert_category_into_atom(
atom_without_category, cat)))
return deps
def _have_new_virt(self, root, atom_cp):
ret = False
for db, pkg_type, built, installed, db_keys in \
self._dynamic_config._filtered_trees[root]["dbs"]:
if db.cp_list(atom_cp):
ret = True
break
return ret
def _iter_atoms_for_pkg(self, pkg):
# TODO: add multiple $ROOT support
if pkg.root != self._frozen_config.target_root:
return
atom_arg_map = self._dynamic_config._atom_arg_map
root_config = self._frozen_config.roots[pkg.root]
for atom in self._dynamic_config._set_atoms.iterAtomsForPackage(pkg):
if atom.cp != pkg.cp and \
self._have_new_virt(pkg.root, atom.cp):
continue
visible_pkgs = \
self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
visible_pkgs.reverse() # descending order
higher_slot = None
for visible_pkg in visible_pkgs:
if visible_pkg.cp != atom.cp:
continue
if pkg >= visible_pkg:
# This is descending order, and we're not
# interested in any versions <= pkg given.
break
if pkg.slot_atom != visible_pkg.slot_atom:
higher_slot = visible_pkg
break
if higher_slot is not None:
continue
for arg in atom_arg_map[(atom, pkg.root)]:
if isinstance(arg, PackageArg) and \
arg.package != pkg:
continue
yield arg, atom
def select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
debug = "--debug" in self._frozen_config.myopts
root_config = self._frozen_config.roots[self._frozen_config.target_root]
sets = root_config.sets
getSetAtoms = root_config.setconfig.getSetAtoms
myfavorites=[]
myroot = self._frozen_config.target_root
dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
args = []
onlydeps = "--onlydeps" in self._frozen_config.myopts
lookup_owners = []
for x in myfiles:
ext = os.path.splitext(x)[1]
if ext==".tbz2":
if not os.path.exists(x):
if os.path.exists(
os.path.join(pkgsettings["PKGDIR"], "All", x)):
x = os.path.join(pkgsettings["PKGDIR"], "All", x)
elif os.path.exists(
os.path.join(pkgsettings["PKGDIR"], x)):
x = os.path.join(pkgsettings["PKGDIR"], x)
else:
print("\n\n!!! Binary package '"+str(x)+"' does not exist.")
print("!!! Please ensure the tbz2 exists as specified.\n")
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
print(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n"))
return 0, myfavorites
pkg = self._pkg(mykey, "binary", root_config,
onlydeps=onlydeps)
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif ext==".ebuild":
ebuild_path = portage.util.normalize_path(os.path.abspath(x))
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
e = portage.exception.PackageNotFound(
("%s is not in a valid portage tree " + \
"hierarchy or does not exist") % x)
if not portage.isvalidatom(cp):
raise e
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
raise e
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
cp, os.path.basename(ebuild_path)):
print(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n"))
return 0, myfavorites
if mykey not in portdb.xmatch(
"match-visible", portage.dep_getkey(mykey)):
print(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use"))
print(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man"))
print(colorize("BAD", "*** page for details."))
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
raise portage.exception.PackageNotFound(
"%s is not in a valid portage tree hierarchy or does not exist" % x)
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps)
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif x.startswith(os.path.sep):
if not x.startswith(myroot):
portage.writemsg(("\n\n!!! '%s' does not start with" + \
" $ROOT.\n") % x, noiselevel=-1)
return 0, []
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
lookup_owners.append(x)
else:
if x in ("system", "world"):
x = SETPREFIX + x
if x.startswith(SETPREFIX):
s = x[len(SETPREFIX):]
if s not in sets:
raise portage.exception.PackageSetNotFound(s)
if s in self._dynamic_config._sets:
continue
# Recursively expand sets so that containment tests in
# self._get_parent_sets() properly match atoms in nested
# sets (like if world contains system).
expanded_set = InternalPackageSet(
initial_atoms=getSetAtoms(s))
self._dynamic_config._sets[s] = expanded_set
args.append(SetArg(arg=x, set=expanded_set,
root_config=root_config))
continue
if not is_valid_package_atom(x):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
return (0,[])
# Don't expand categories or old-style virtuals here unless
# necessary. Expansion of old-style virtuals here causes at
# least the following problems:
# 1) It's more difficult to determine which set(s) an atom
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
if "/" in x:
args.append(AtomArg(arg=x, atom=Atom(x),
root_config=root_config))
continue
expanded_atoms = self._dep_expand(root_config, x)
installed_cp_set = set()
for atom in expanded_atoms:
if vardb.cp_list(atom.cp):
installed_cp_set.add(atom.cp)
if len(installed_cp_set) > 1:
non_virtual_cps = set()
for atom_cp in installed_cp_set:
if not atom_cp.startswith("virtual/"):
non_virtual_cps.add(atom_cp)
if len(non_virtual_cps) == 1:
installed_cp_set = non_virtual_cps
if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
installed_cp = next(iter(installed_cp_set))
expanded_atoms = [atom for atom in expanded_atoms \
if atom.cp == installed_cp]
# If a non-virtual package and one or more virtual packages
# are in expanded_atoms, use the non-virtual package.
if len(expanded_atoms) > 1:
number_of_virtuals = 0
for expanded_atom in expanded_atoms:
if expanded_atom.cp.startswith("virtual/"):
number_of_virtuals += 1
else:
candidate = expanded_atom
if len(expanded_atoms) - number_of_virtuals == 1:
expanded_atoms = [ candidate ]
if len(expanded_atoms) > 1:
print()
print()
ambiguous_package_name(x, expanded_atoms, root_config,
self._frozen_config.spinner, self._frozen_config.myopts)
return False, myfavorites
if expanded_atoms:
atom = expanded_atoms[0]
else:
null_atom = Atom(insert_category_into_atom(x, "null"))
cat, atom_pn = portage.catsplit(null_atom.cp)
virts_p = root_config.settings.get_virts_p().get(atom_pn)
if virts_p:
# Allow the depgraph to choose which virtual.
atom = Atom(null_atom.replace('null/', 'virtual/', 1))
else:
atom = null_atom
args.append(AtomArg(arg=x, atom=atom,
root_config=root_config))
if lookup_owners:
relative_paths = []
search_for_multiple = False
if len(lookup_owners) > 1:
search_for_multiple = True
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
relative_paths.append(x[len(myroot)-1:])
owners = set()
for pkg, relative_path in \
real_vardb._owners.iter_owners(relative_paths):
owners.add(pkg.mycpv)
if not search_for_multiple:
break
if not owners:
portage.writemsg(("\n\n!!! '%s' is not claimed " + \
"by any package.\n") % lookup_owners[0], noiselevel=-1)
return 0, []
for cpv in owners:
slot = vardb.aux_get(cpv, ["SLOT"])[0]
if not slot:
# portage now masks packages with missing slot, but it's
# possible that one was installed by an older version
atom = Atom(portage.cpv_getkey(cpv))
else:
atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
if "--update" in self._frozen_config.myopts:
# In some cases, the greedy slots behavior can pull in a slot that
# the user would want to uninstall due to it being blocked by a
# newer version in a different slot. Therefore, it's necessary to
# detect and discard any that should be uninstalled. Each time
# that arguments are updated, package selections are repeated in
# order to ensure consistency with the current arguments:
#
# 1) Initialize args
# 2) Select packages and generate initial greedy atoms
# 3) Update args with greedy atoms
# 4) Select packages and generate greedy atoms again, while
# accounting for any blockers between selected packages
# 5) Update args with revised greedy atoms
self._set_args(args)
greedy_args = []
for arg in args:
greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom):
greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
self._set_args(greedy_args)
del greedy_args
# Revise greedy atoms, accounting for any blockers
# between selected packages.
revised_greedy_args = []
for arg in args:
revised_greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom,
blocker_lookahead=True):
revised_greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
args = revised_greedy_args
del revised_greedy_args
self._set_args(args)
myfavorites = set(myfavorites)
for arg in args:
if isinstance(arg, (AtomArg, PackageArg)):
myfavorites.add(arg.atom)
elif isinstance(arg, SetArg):
myfavorites.add(arg.arg)
myfavorites = list(myfavorites)
if debug:
portage.writemsg("\n", noiselevel=-1)
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
return self._resolve(myfavorites)
def _resolve(self, myfavorites):
"""Given self._dynamic_config._initial_arg_list, pull in the root nodes,
call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
myroot = self._frozen_config.target_root
pkgsettings = self._frozen_config.pkgsettings[myroot]
pprovideddict = pkgsettings.pprovideddict
virtuals = pkgsettings.getvirtuals()
for arg in self._dynamic_config._initial_arg_list:
for atom in arg.set:
self._spinner_update()
dep = Dependency(atom=atom, onlydeps=onlydeps,
root=myroot, parent=arg)
try:
pprovided = pprovideddict.get(atom.cp)
if pprovided and portage.match_from_list(atom, pprovided):
# A provided package has been specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if isinstance(arg, PackageArg):
if not self._add_pkg(arg.package, dep) or \
not self._create_graph():
if not self._dynamic_config._need_restart:
sys.stderr.write(("\n\n!!! Problem " + \
"resolving dependencies for %s\n") % \
arg.arg)
return 0, myfavorites
continue
if debug:
portage.writemsg(" Arg: %s\n Atom: %s\n" % \
(arg, atom), noiselevel=-1)
pkg, existing_node = self._select_package(
myroot, atom, onlydeps=onlydeps)
if not pkg:
pprovided_match = False
for virt_choice in virtuals.get(atom.cp, []):
expanded_atom = portage.dep.Atom(
atom.replace(atom.cp,
portage.dep_getkey(virt_choice), 1))
pprovided = pprovideddict.get(expanded_atom.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
pprovided_match = True
break
if pprovided_match:
continue
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {}))
return 0, myfavorites
self._dynamic_config._missing_args.append((arg, atom))
continue
if atom.cp != pkg.cp:
# For old-style virtuals, we need to repeat the
# package.provided check against the selected package.
expanded_atom = atom.replace(atom.cp, pkg.cp)
pprovided = pprovideddict.get(pkg.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if pkg.installed and "selective" not in self._dynamic_config.myparams:
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {}))
# Previous behavior was to bail out in this case, but
# since the dep is satisfied by the installed package,
# it's more friendly to continue building the graph
# and just show a warning message. Therefore, only bail
# out here if the atom is not from either the system or
# world set.
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
return 0, myfavorites
# Add the selected package to the graph as soon as possible
# so that later dep_check() calls can use it as feedback
# for making more consistent atom selections.
if not self._add_pkg(pkg, dep):
if self._dynamic_config._need_restart:
pass
elif isinstance(arg, SetArg):
sys.stderr.write(("\n\n!!! Problem resolving " + \
"dependencies for %s from %s\n") % \
(atom, arg.arg))
else:
sys.stderr.write(("\n\n!!! Problem resolving " + \
"dependencies for %s\n") % atom)
return 0, myfavorites
except portage.exception.MissingSignature as e:
portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
return 0, myfavorites
except portage.exception.InvalidSignature as e:
portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
return 0, myfavorites
except SystemExit as e:
raise # Needed else can't exit
except Exception as e:
print("\n\n!!! Problem in '%s' dependencies." % atom, file=sys.stderr)
print("!!!", str(e), getattr(e, "__module__", None), file=sys.stderr)
raise
# Now that the root packages have been added to the graph,
# process the dependencies.
if not self._create_graph():
return 0, myfavorites
missing=0
if "--usepkgonly" in self._frozen_config.myopts:
for xs in self._dynamic_config.digraph.all_nodes():
if not isinstance(xs, Package):
continue
if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
if missing == 0:
print()
missing += 1
print("Missing binary for:",xs[2])
try:
self.altlist()
except self._unknown_internal_error:
return False, myfavorites
# We're true here unless we are missing binaries.
return (not missing,myfavorites)
def _set_args(self, args):
"""
Create the "args" package set from atoms and packages given as
arguments. This method can be called multiple times if necessary.
The package selection cache is automatically invalidated, since
arguments influence package selections.
"""
args_set = self._dynamic_config._sets["args"]
args_set.clear()
for arg in args:
if not isinstance(arg, (AtomArg, PackageArg)):
continue
atom = arg.atom
if atom in args_set:
continue
args_set.add(atom)
self._dynamic_config._set_atoms.clear()
self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
atom_arg_map = self._dynamic_config._atom_arg_map
atom_arg_map.clear()
for arg in args:
for atom in arg.set:
atom_key = (atom, arg.root_config.root)
refs = atom_arg_map.get(atom_key)
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
if arg not in refs:
refs.append(arg)
# Invalidate the package selection cache, since
# arguments influence package selections.
self._dynamic_config._highest_pkg_cache.clear()
for trees in self._dynamic_config._filtered_trees.values():
trees["porttree"].dbapi._clear_cache()
def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
"""
Return a list of slot atoms corresponding to installed slots that
differ from the slot of the highest visible match. When
blocker_lookahead is True, slot atoms that would trigger a blocker
conflict are automatically discarded, potentially allowing automatic
uninstallation of older slots when appropriate.
"""
highest_pkg, in_graph = self._select_package(root_config.root, atom)
if highest_pkg is None:
return []
vardb = root_config.trees["vartree"].dbapi
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
if portage.cpv_getkey(cpv) == highest_pkg.cp:
slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
slots.add(highest_pkg.metadata["SLOT"])
if len(slots) == 1:
return []
greedy_pkgs = []
slots.remove(highest_pkg.metadata["SLOT"])
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
pkg, in_graph = self._select_package(root_config.root, slot_atom)
if pkg is not None and \
pkg.cp == highest_pkg.cp and pkg < highest_pkg:
greedy_pkgs.append(pkg)
if not greedy_pkgs:
return []
if not blocker_lookahead:
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
for pkg in greedy_pkgs + [highest_pkg]:
dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, pkg.use.enabled,
parent=pkg, strict=True)
except portage.exception.InvalidDependString:
continue
blocker_atoms = []
for atoms in selected_atoms.values():
blocker_atoms.extend(x for x in atoms if x.blocker)
blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
if highest_pkg not in blockers:
return []
# filter packages with invalid deps
greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
# filter packages that conflict with highest_pkg
greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
(blockers[highest_pkg].findAtomForPackage(pkg) or \
blockers[pkg].findAtomForPackage(highest_pkg))]
if not greedy_pkgs:
return []
# If two packages conflict, discard the lower version.
discard_pkgs = set()
greedy_pkgs.sort(reverse=True)
for i in range(len(greedy_pkgs) - 1):
pkg1 = greedy_pkgs[i]
if pkg1 in discard_pkgs:
continue
for j in range(i + 1, len(greedy_pkgs)):
pkg2 = greedy_pkgs[j]
if pkg2 in discard_pkgs:
continue
if blockers[pkg1].findAtomForPackage(pkg2) or \
blockers[pkg2].findAtomForPackage(pkg1):
# pkg1 > pkg2
discard_pkgs.add(pkg2)
return [pkg.slot_atom for pkg in greedy_pkgs \
if pkg not in discard_pkgs]
def _select_atoms_from_graph(self, *pargs, **kwargs):
"""
Prefer atoms matching packages that have already been
added to the graph or those that are installed and have
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
return self._select_atoms_highest_available(*pargs, **kwargs)
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
"""This will raise InvalidDependString if necessary. If trees is
None then self._dynamic_config._filtered_trees is used."""
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
trees = self._dynamic_config._filtered_trees
atom_graph = digraph()
if True:
try:
if parent is not None:
trees[root]["parent"] = parent
trees[root]["atom_graph"] = atom_graph
if priority is not None:
trees[root]["priority"] = priority
if not strict:
portage.dep._dep_check_strict = False
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
myroot=root, trees=trees)
finally:
if parent is not None:
trees[root].pop("parent")
trees[root].pop("atom_graph")
if priority is not None:
trees[root].pop("priority")
portage.dep._dep_check_strict = True
if not mycheck[0]:
raise portage.exception.InvalidDependString(mycheck[1])
if parent is None:
selected_atoms = mycheck[1]
else:
chosen_atoms = frozenset(mycheck[1])
selected_atoms = {parent : []}
for node in atom_graph:
if isinstance(node, Atom):
continue
if node is parent:
pkg = parent
else:
pkg, virt_atom = node
if virt_atom not in chosen_atoms:
continue
if not portage.match_from_list(virt_atom, [pkg]):
# Typically this means that the atom
# specifies USE deps that are unsatisfied
# by the selected package. The caller will
# record this as an unsatisfied dependency
# when necessary.
continue
selected_atoms[pkg] = [atom for atom in \
atom_graph.child_nodes(node) if atom in chosen_atoms]
return selected_atoms
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
check_backtrack=False):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
a matching package has been masked by backtracking.
"""
backtrack_mask = False
atom_set = InternalPackageSet(initial_atoms=(atom,))
xinfo = '"%s"' % atom
if arg:
xinfo='"%s"' % arg
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
masked_packages = []
missing_use = []
masked_pkg_instances = set()
missing_licenses = []
have_eapi_mask = False
pkgsettings = self._frozen_config.pkgsettings[root]
implicit_iuse = pkgsettings._get_implicit_iuse()
root_config = self._frozen_config.roots[root]
portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
for db, pkg_type, built, installed, db_keys in dbs:
if installed:
continue
match = db.match
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all", atom.without_use)
else:
cpv_list = db.match(atom.without_use)
# descending order
cpv_list.reverse()
for cpv in cpv_list:
metadata, mreasons = get_mask_info(root_config, cpv,
pkgsettings, db, pkg_type, built, installed, db_keys)
if metadata is not None:
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed)
# pkg.metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg.metadata
if pkg.cp != atom.cp:
# A cpv can be returned from dbapi.match() as an
# old-style virtual match even in cases when the
# package does not actually PROVIDE the virtual.
# Filter out any such false matches here.
if not atom_set.findAtomForPackage(pkg):
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
backtrack_reasons = \
self._dynamic_config._runtime_pkg_mask[pkg]
mreasons.append('backtracking: %s' % \
', '.join(sorted(backtrack_reasons)))
backtrack_mask = True
if mreasons:
masked_pkg_instances.add(pkg)
if atom.use:
missing_use.append(pkg)
if not mreasons:
continue
masked_packages.append(
(root_config, pkgsettings, cpv, metadata, mreasons))
if check_backtrack:
if backtrack_mask:
raise self._backtrack_mask()
else:
return
missing_use_reasons = []
missing_iuse_reasons = []
for pkg in missing_use:
use = pkg.use.enabled
iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
iuse_re = re.compile("^(%s)$" % "|".join(iuse))
missing_iuse = []
for x in atom.use.required:
if iuse_re.match(x) is None:
missing_iuse.append(x)
mreasons = []
if missing_iuse:
mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
missing_iuse_reasons.append((pkg, mreasons))
else:
need_enable = sorted(atom.use.enabled.difference(use))
need_disable = sorted(atom.use.disabled.intersection(use))
if need_enable or need_disable:
changes = []
changes.extend(colorize("red", "+" + x) \
for x in need_enable)
changes.extend(colorize("blue", "-" + x) \
for x in need_disable)
mreasons.append("Change USE: %s" % " ".join(changes))
missing_use_reasons.append((pkg, mreasons))
unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_use_reasons if pkg not in masked_pkg_instances]
unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_iuse_reasons if pkg not in masked_pkg_instances]
show_missing_use = False
if unmasked_use_reasons:
# Only show the latest version.
show_missing_use = unmasked_use_reasons[:1]
elif unmasked_iuse_reasons:
if missing_use_reasons:
# All packages with required IUSE are masked,
# so display a normal masking message.
pass
else:
show_missing_use = unmasked_iuse_reasons
if show_missing_use:
print("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".")
print("!!! One of the following packages is required to complete your request:")
for pkg, mreasons in show_missing_use:
print("- "+pkg.cpv+" ("+", ".join(mreasons)+")")
elif masked_packages:
print("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked."))
print("!!! One of the following masked packages is required to complete your request:")
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
print()
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
from textwrap import wrap
for line in wrap(msg, 75):
print(line)
print()
show_mask_docs()
else:
print("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".")
# Show parent nodes and the argument that pulled them in.
traversed_nodes = set()
node = myparent
msg = []
while node is not None:
traversed_nodes.add(node)
msg.append('(dependency required by "%s" [%s])' % \
(colorize('INFORM', str(node.cpv)), node.type_name))
if node not in self._dynamic_config.digraph:
# The parent is not in the graph due to backtracking.
break
# When traversing to parents, prefer arguments over packages
# since arguments are root nodes. Never traverse the same
# package twice, in order to prevent an infinite loop.
selected_parent = None
for parent in self._dynamic_config.digraph.parent_nodes(node):
if isinstance(parent, DependencyArg):
msg.append('(dependency required by "%s" [argument])' % \
(colorize('INFORM', str(parent))))
selected_parent = None
break
if parent not in traversed_nodes:
selected_parent = parent
node = selected_parent
for line in msg:
print(line)
print()
def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
"""
Iterate over Package instances of pkg_type matching the given atom.
This does not check visibility and it also does not match USE for
unbuilt ebuilds since USE are lazily calculated after visibility
checks (to avoid the expense when possible).
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all", atom)
else:
cpv_list = db.match(atom)
# USE=multislot can make an installed package appear as if
# it doesn't satisfy a slot dependency. Rebuilding the ebuild
# won't do any good as long as USE=multislot is enabled since
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
installed = pkg_type == 'installed'
if installed and not cpv_list and atom.slot:
for cpv in db.match(atom.cp):
slot_available = False
for other_db, other_type, other_built, \
other_installed, other_keys in \
self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
try:
if atom.slot == \
other_db.aux_get(cpv, ["SLOT"])[0]:
slot_available = True
break
except KeyError:
pass
if not slot_available:
continue
inst_pkg = self._pkg(cpv, "installed",
root_config, installed=installed)
# Remove the slot from the atom and verify that
# the package matches the resulting atom.
atom_without_slot = portage.dep.remove_slot(atom)
if atom.use:
atom_without_slot += str(atom.use)
atom_without_slot = portage.dep.Atom(atom_without_slot)
if portage.match_from_list(
atom_without_slot, [inst_pkg]):
cpv_list = [inst_pkg.cpv]
break
if cpv_list:
# descending order
cpv_list.reverse()
for cpv in cpv_list:
try:
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, onlydeps=onlydeps)
except portage.exception.PackageNotFound:
pass
else:
if pkg.cp != atom.cp:
# A cpv can be returned from dbapi.match() as an
# old-style virtual match even in cases when the
# package does not actually PROVIDE the virtual.
# Filter out any such false matches here.
if not InternalPackageSet(initial_atoms=(atom,)
).findAtomForPackage(pkg):
continue
yield pkg
def _select_pkg_highest_available(self, root, atom, onlydeps=False):
cache_key = (root, atom, onlydeps)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
pkg, existing = ret
if pkg and not existing:
existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
if existing and existing == pkg:
# Update the cache to reflect that the
# package has been added to the graph.
ret = pkg, pkg
self._dynamic_config._highest_pkg_cache[cache_key] = ret
return ret
ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
settings = pkg.root_config.settings
if visible(settings, pkg) and not (pkg.installed and \
settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
return ret
def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
highest_version = None
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
atom_cp = atom.cp
atom_set = InternalPackageSet(initial_atoms=(atom,))
existing_node = None
myeb = None
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
empty = "empty" in self._dynamic_config.myparams
selective = "selective" in self._dynamic_config.myparams
reinstall = False
noreplace = "--noreplace" in self._frozen_config.myopts
avoid_update = "--update" not in self._frozen_config.myopts
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
# Behavior of the "selective" parameter depends on
# whether or not a package matches an argument atom.
# If an installed package provides an old-style
# virtual that is no longer provided by an available
# package, the installed package may match an argument
# atom even though none of the available packages do.
# Therefore, "selective" logic does not consider
# whether or not an installed package matches an
# argument atom. It only considers whether or not
# available packages match argument atoms, which is
# represented by the found_available_arg flag.
found_available_arg = False
for find_existing_node in True, False:
if existing_node:
break
for db, pkg_type, built, installed, db_keys in dbs:
if existing_node:
break
if installed and not find_existing_node:
want_reinstall = reinstall or empty or \
(found_available_arg and not selective)
if want_reinstall and matched_packages:
continue
for pkg in self._iter_match_pkgs(root_config, pkg_type, atom,
onlydeps=onlydeps):
if pkg in self._dynamic_config._runtime_pkg_mask:
# The package has been masked by the backtracking logic
continue
cpv = pkg.cpv
# Make --noreplace take precedence over --newuse.
if not pkg.installed and noreplace and \
cpv in vardb.match(atom):
# If the installed version is masked, it may
# be necessary to look at lower versions,
# in case there is a visible downgrade.
continue
reinstall_for_flags = None
if not pkg.installed or \
(matched_packages and not avoid_update):
# Only enforce visibility on installed packages
# if there is at least one other visible package
# available. By filtering installed masked packages
# here, packages that have been masked since they
# were installed can be automatically downgraded
# to an unmasked version.
try:
if not visible(pkgsettings, pkg):
continue
except portage.exception.InvalidDependString:
if not installed:
continue
# Enable upgrade or downgrade to a version
# with visible KEYWORDS when the installed
# version is masked by KEYWORDS, but never
# reinstall the same exact version only due
# to a KEYWORDS mask. See bug #252167.
if matched_packages:
different_version = None
for avail_pkg in matched_packages:
if not portage.dep.cpvequal(
pkg.cpv, avail_pkg.cpv):
different_version = avail_pkg
break
if different_version is not None:
# If the ebuild no longer exists or it's
# keywords have been dropped, reject built
# instances (installed or binary).
# If --usepkgonly is enabled, assume that
# the ebuild status should be ignored.
if not use_ebuild_visibility and usepkgonly:
if installed and \
pkgsettings._getMissingKeywords(
pkg.cpv, pkg.metadata):
continue
else:
try:
pkg_eb = self._pkg(
pkg.cpv, "ebuild", root_config)
except portage.exception.PackageNotFound:
continue
else:
if not visible(pkgsettings, pkg_eb):
continue
# Calculation of USE for unbuilt ebuilds is relatively
# expensive, so it is only performed lazily, after the
# above visibility checks are complete.
myarg = None
if root == self._frozen_config.target_root:
try:
myarg = next(self._iter_atoms_for_pkg(pkg))
except StopIteration:
pass
except portage.exception.InvalidDependString:
if not installed:
# masked by corruption
continue
if not installed and myarg:
found_available_arg = True
if atom.use and not pkg.built:
use = pkg.use.enabled
if atom.use.enabled.difference(use):
continue
if atom.use.disabled.intersection(use):
continue
if pkg.cp == atom_cp:
if highest_version is None:
highest_version = pkg
elif pkg > highest_version:
highest_version = pkg
# At this point, we've found the highest visible
# match from the current repo. Any lower versions
# from this repo are ignored, so this so the loop
# will always end with a break statement below
# this point.
if find_existing_node:
e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
if not e_pkg:
break
# Use PackageSet.findAtomForPackage()
# for PROVIDE support.
if atom_set.findAtomForPackage(e_pkg):
if highest_version and \
e_pkg.cp == atom_cp and \
e_pkg < highest_version and \
e_pkg.slot_atom != highest_version.slot_atom:
# There is a higher version available in a
# different slot, so this existing node is
# irrelevant.
pass
else:
matched_packages.append(e_pkg)
existing_node = e_pkg
break
# Compare built package to current config and
# reject the built package if necessary.
if built and not installed and \
("--newuse" in self._frozen_config.myopts or \
"--reinstall" in self._frozen_config.myopts or \
"--binpkg-respect-use" in self._frozen_config.myopts):
iuses = pkg.iuse.all
old_use = pkg.use.enabled
if myeb:
pkgsettings.setcpv(myeb)
else:
pkgsettings.setcpv(pkg)
now_use = pkgsettings["PORTAGE_USE"].split()
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
cur_iuse = iuses
if myeb and not usepkgonly:
cur_iuse = myeb.iuse.all
if self._reinstall_for_flags(forced_flags,
old_use, iuses,
now_use, cur_iuse):
break
# Compare current config to installed package
# and do not reinstall if possible.
if not installed and \
("--newuse" in self._frozen_config.myopts or \
"--reinstall" in self._frozen_config.myopts) and \
cpv in vardb.match(atom):
pkgsettings.setcpv(pkg)
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
old_use = vardb.aux_get(cpv, ["USE"])[0].split()
old_iuse = set(filter_iuse_defaults(
vardb.aux_get(cpv, ["IUSE"])[0].split()))
cur_use = pkg.use.</