| # Copyright 1999-2011 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| |
| from __future__ import print_function |
| |
| import difflib |
| import gc |
| import logging |
| import re |
| import sys |
| import textwrap |
| from collections import deque |
| from itertools import chain |
| |
| import portage |
| from portage import os, OrderedDict |
| from portage import _unicode_decode |
| from portage.const import PORTAGE_PACKAGE_ATOM |
| from portage.dbapi import dbapi |
| from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use |
| from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use |
| from portage.exception import InvalidAtom, InvalidDependString |
| from portage.output import colorize, create_color_func, \ |
| darkgreen, green |
| bad = create_color_func("BAD") |
| from portage.package.ebuild.getmaskingstatus import \ |
| _getmaskingstatus, _MaskReason |
| from portage._sets import SETPREFIX |
| from portage._sets.base import InternalPackageSet |
| from portage.util import cmp_sort_key, writemsg, writemsg_stdout |
| from portage.util import writemsg_level |
| from portage.util.digraph import digraph |
| from portage.versions import catpkgsplit |
| |
| from _emerge.AtomArg import AtomArg |
| from _emerge.Blocker import Blocker |
| from _emerge.BlockerCache import BlockerCache |
| from _emerge.BlockerDepPriority import BlockerDepPriority |
| from _emerge.countdown import countdown |
| from _emerge.create_world_atom import create_world_atom |
| from _emerge.Dependency import Dependency |
| from _emerge.DependencyArg import DependencyArg |
| from _emerge.DepPriority import DepPriority |
| from _emerge.DepPriorityNormalRange import DepPriorityNormalRange |
| from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
| from _emerge.FakeVartree import FakeVartree |
| from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps |
| from _emerge.is_valid_package_atom import is_valid_package_atom |
| from _emerge.Package import Package |
| from _emerge.PackageArg import PackageArg |
| from _emerge.PackageVirtualDbapi import PackageVirtualDbapi |
| from _emerge.RootConfig import RootConfig |
| from _emerge.search import search |
| from _emerge.SetArg import SetArg |
| from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice |
| from _emerge.UnmergeDepPriority import UnmergeDepPriority |
| from _emerge.UseFlagDisplay import pkg_use_display |
| |
| from _emerge.resolver.slot_collision import slot_conflict_handler |
| from _emerge.resolver.circular_dependency import circular_dependency_handler |
| from _emerge.resolver.output import display |
| |
| if sys.hexversion >= 0x3000000: |
| basestring = str |
| long = int |
| |
| class _scheduler_graph_config(object): |
| def __init__(self, trees, pkg_cache, graph, mergelist): |
| self.trees = trees |
| self.pkg_cache = pkg_cache |
| self.graph = graph |
| self.mergelist = mergelist |
| |
| def _wildcard_set(atoms): |
| pkgs = InternalPackageSet(allow_wildcard=True) |
| for x in atoms: |
| try: |
| x = Atom(x, allow_wildcard=True) |
| except portage.exception.InvalidAtom: |
| x = Atom("*/" + x, allow_wildcard=True) |
| pkgs.add(x) |
| return pkgs |
| |
| class _frozen_depgraph_config(object): |
| |
| def __init__(self, settings, trees, myopts, spinner): |
| self.settings = settings |
| self.target_root = settings["ROOT"] |
| self.myopts = myopts |
| self.edebug = 0 |
| if settings.get("PORTAGE_DEBUG", "") == "1": |
| self.edebug = 1 |
| self.spinner = spinner |
| self._running_root = trees["/"]["root_config"] |
| self._opts_no_restart = frozenset(["--buildpkgonly", |
| "--fetchonly", "--fetch-all-uri", "--pretend"]) |
| self.pkgsettings = {} |
| self.trees = {} |
| self._trees_orig = trees |
| self.roots = {} |
| # All Package instances |
| self._pkg_cache = {} |
| self._highest_license_masked = {} |
| for myroot in trees: |
| self.trees[myroot] = {} |
| # Create a RootConfig instance that references |
| # the FakeVartree instead of the real one. |
| self.roots[myroot] = RootConfig( |
| trees[myroot]["vartree"].settings, |
| self.trees[myroot], |
| trees[myroot]["root_config"].setconfig) |
| for tree in ("porttree", "bintree"): |
| self.trees[myroot][tree] = trees[myroot][tree] |
| self.trees[myroot]["vartree"] = \ |
| FakeVartree(trees[myroot]["root_config"], |
| pkg_cache=self._pkg_cache, |
| pkg_root_config=self.roots[myroot]) |
| self.pkgsettings[myroot] = portage.config( |
| clone=self.trees[myroot]["vartree"].settings) |
| |
| self._required_set_names = set(["world"]) |
| |
| atoms = ' '.join(myopts.get("--exclude", [])).split() |
| self.excluded_pkgs = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() |
| self.reinstall_atoms = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() |
| self.usepkg_exclude = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() |
| self.useoldpkg_atoms = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() |
| self.rebuild_exclude = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() |
| self.rebuild_ignore = _wildcard_set(atoms) |
| |
| self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts |
| self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts |
| self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts |
| |
| class _depgraph_sets(object): |
| def __init__(self): |
| # contains all sets added to the graph |
| self.sets = {} |
| # contains non-set atoms given as arguments |
| self.sets['__non_set_args__'] = InternalPackageSet() |
| # contains all atoms from all sets added to the graph, including |
| # atoms given as arguments |
| self.atoms = InternalPackageSet() |
| self.atom_arg_map = {} |
| |
| class _rebuild_config(object): |
| def __init__(self, frozen_config, rebuild_list, reinstall_list): |
| self._graph = digraph() |
| self._frozen_config = frozen_config |
| self.rebuild_list = (rebuild_list or set()).copy() |
| self.orig_rebuild_list = self.rebuild_list.copy() |
| self.reinstall_list = (reinstall_list or set()).copy() |
| self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev |
| self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver |
| self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt |
| self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or |
| self.rebuild_if_unbuilt) |
| |
| def add(self, dep_pkg, dep): |
| parent = dep.collapsed_parent |
| priority = dep.collapsed_priority |
| rebuild_exclude = self._frozen_config.rebuild_exclude |
| rebuild_ignore = self._frozen_config.rebuild_ignore |
| if (self.rebuild and isinstance(parent, Package) and |
| parent.built and (priority.buildtime or priority.runtime) and |
| isinstance(dep_pkg, Package) and |
| not rebuild_exclude.findAtomForPackage(parent) and |
| not rebuild_ignore.findAtomForPackage(dep_pkg)): |
| self._graph.add(dep_pkg, parent, priority) |
| |
| def _needs_rebuild(self, dep_pkg): |
| """Check whether packages that depend on dep_pkg need to be rebuilt.""" |
| dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) |
| if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: |
| return False |
| |
| if self.rebuild_if_unbuilt: |
| # dep_pkg is being installed from source, so binary |
| # packages for parents are invalid. Force rebuild |
| return True |
| |
| trees = self._frozen_config.trees |
| vardb = trees[dep_pkg.root]["vartree"].dbapi |
| if self.rebuild_if_new_rev: |
| # Parent packages are valid if a package with the same |
| # cpv is already installed. |
| return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) |
| |
| # Otherwise, parent packages are valid if a package with the same |
| # version (excluding revision) is already installed. |
| assert self.rebuild_if_new_ver |
| cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] |
| for inst_cpv in vardb.match(dep_pkg.slot_atom): |
| inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] |
| if inst_cpv_norev == cpv_norev: |
| return False |
| |
| return True |
| |
| def _trigger_rebuild(self, parent, build_deps, runtime_deps): |
| root_slot = (parent.root, parent.slot_atom) |
| if root_slot in self.rebuild_list: |
| return False |
| trees = self._frozen_config.trees |
| children = set(build_deps).intersection(runtime_deps) |
| reinstall = False |
| for slot_atom in children: |
| kids = set([build_deps[slot_atom], runtime_deps[slot_atom]]) |
| for dep_pkg in kids: |
| dep_root_slot = (dep_pkg.root, slot_atom) |
| if self._needs_rebuild(dep_pkg): |
| self.rebuild_list.add(root_slot) |
| return True |
| elif ("--usepkg" in self._frozen_config.myopts and |
| (dep_root_slot in self.reinstall_list or |
| dep_root_slot in self.rebuild_list or |
| not dep_pkg.installed)): |
| |
| # A direct rebuild dependency is being installed. We |
| # should update the parent as well to the latest binary, |
| # if that binary is valid. |
| # |
| # To validate the binary, we check whether all of the |
| # rebuild dependencies are present on the same binhost. |
| # |
| # 1) If parent is present on the binhost, but one of its |
| # rebuild dependencies is not, then the parent should |
| # be rebuilt from source. |
| # 2) Otherwise, the parent binary is assumed to be valid, |
| # because all of its rebuild dependencies are |
| # consistent. |
| bintree = trees[parent.root]["bintree"] |
| uri = bintree.get_pkgindex_uri(parent.cpv) |
| dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) |
| bindb = bintree.dbapi |
| if self.rebuild_if_new_ver and uri and uri != dep_uri: |
| cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] |
| for cpv in bindb.match(dep_pkg.slot_atom): |
| if cpv_norev == catpkgsplit(cpv)[:-1]: |
| dep_uri = bintree.get_pkgindex_uri(cpv) |
| if uri == dep_uri: |
| break |
| if uri and uri != dep_uri: |
| # 1) Remote binary package is invalid because it was |
| # built without dep_pkg. Force rebuild. |
| self.rebuild_list.add(root_slot) |
| return True |
| elif (parent.installed and |
| root_slot not in self.reinstall_list): |
| inst_build_time = parent.metadata.get("BUILD_TIME") |
| try: |
| bin_build_time, = bindb.aux_get(parent.cpv, |
| ["BUILD_TIME"]) |
| except KeyError: |
| continue |
| if bin_build_time != inst_build_time: |
| # 2) Remote binary package is valid, and local package |
| # is not up to date. Force reinstall. |
| reinstall = True |
| if reinstall: |
| self.reinstall_list.add(root_slot) |
| return reinstall |
| |
| def trigger_rebuilds(self): |
| """ |
| Trigger rebuilds where necessary. If pkgA has been updated, and pkgB |
| depends on pkgA at both build-time and run-time, pkgB needs to be |
| rebuilt. |
| """ |
| need_restart = False |
| graph = self._graph |
| build_deps = {} |
| runtime_deps = {} |
| leaf_nodes = deque(graph.leaf_nodes()) |
| |
| def ignore_non_runtime(priority): |
| return not priority.runtime |
| |
| def ignore_non_buildtime(priority): |
| return not priority.buildtime |
| |
| # Trigger rebuilds bottom-up (starting with the leaves) so that parents |
| # will always know which children are being rebuilt. |
| while not graph.empty(): |
| if not leaf_nodes: |
| # We're interested in intersection of buildtime and runtime, |
| # so ignore edges that do not contain both. |
| leaf_nodes.extend(graph.leaf_nodes( |
| ignore_priority=ignore_non_runtime)) |
| if not leaf_nodes: |
| leaf_nodes.extend(graph.leaf_nodes( |
| ignore_priority=ignore_non_buildtime)) |
| if not leaf_nodes: |
| # We'll have to drop an edge that is both |
| # buildtime and runtime. This should be |
| # quite rare. |
| leaf_nodes.append(graph.order[-1]) |
| |
| node = leaf_nodes.popleft() |
| if node not in graph: |
| # This can be triggered by circular dependencies. |
| continue |
| slot_atom = node.slot_atom |
| |
| # Remove our leaf node from the graph, keeping track of deps. |
| parents = graph.nodes[node][1].items() |
| graph.remove(node) |
| node_build_deps = build_deps.get(node, {}) |
| node_runtime_deps = runtime_deps.get(node, {}) |
| for parent, priorities in parents: |
| if parent == node: |
| # Ignore a direct cycle. |
| continue |
| parent_bdeps = build_deps.setdefault(parent, {}) |
| parent_rdeps = runtime_deps.setdefault(parent, {}) |
| for priority in priorities: |
| if priority.buildtime: |
| parent_bdeps[slot_atom] = node |
| if priority.runtime: |
| parent_rdeps[slot_atom] = node |
| if slot_atom in parent_bdeps and slot_atom in parent_rdeps: |
| parent_rdeps.update(node_runtime_deps) |
| if not graph.child_nodes(parent): |
| leaf_nodes.append(parent) |
| |
| # Trigger rebuilds for our leaf node. Because all of our children |
| # have been processed, build_deps and runtime_deps will be |
| # completely filled in, and self.rebuild_list / self.reinstall_list |
| # will tell us whether any of our children need to be rebuilt or |
| # reinstalled. |
| if self._trigger_rebuild(node, node_build_deps, node_runtime_deps): |
| need_restart = True |
| |
| return need_restart |
| |
| |
| class _dynamic_depgraph_config(object): |
| |
| def __init__(self, depgraph, myparams, allow_backtracking, |
| runtime_pkg_mask, needed_unstable_keywords, needed_use_config_changes, needed_license_changes, |
| rebuild_list=None, reinstall_list=None): |
| self.myparams = myparams.copy() |
| self._vdb_loaded = False |
| self._allow_backtracking = allow_backtracking |
| # Maps slot atom to package for each Package added to the graph. |
| self._slot_pkg_map = {} |
| # Maps nodes to the reasons they were selected for reinstallation. |
| self._reinstall_nodes = {} |
| self.mydbapi = {} |
| # Contains a filtered view of preferred packages that are selected |
| # from available repositories. |
| self._filtered_trees = {} |
| # Contains installed packages and new packages that have been added |
| # to the graph. |
| self._graph_trees = {} |
| # Caches visible packages returned from _select_package, for use in |
| # depgraph._iter_atoms_for_pkg() SLOT logic. |
| self._visible_pkgs = {} |
| #contains the args created by select_files |
| self._initial_arg_list = [] |
| self.digraph = portage.digraph() |
| # manages sets added to the graph |
| self.sets = {} |
| # contains all nodes pulled in by self.sets |
| self._set_nodes = set() |
| # Contains only Blocker -> Uninstall edges |
| self._blocker_uninstalls = digraph() |
| # Contains only Package -> Blocker edges |
| self._blocker_parents = digraph() |
| # Contains only irrelevant Package -> Blocker edges |
| self._irrelevant_blockers = digraph() |
| # Contains only unsolvable Package -> Blocker edges |
| self._unsolvable_blockers = digraph() |
| # Contains all Blocker -> Blocked Package edges |
| self._blocked_pkgs = digraph() |
| # Contains world packages that have been protected from |
| # uninstallation but may not have been added to the graph |
| # if the graph is not complete yet. |
| self._blocked_world_pkgs = {} |
| # Contains packages whose dependencies have been traversed. |
| # This use used to check if we have accounted for blockers |
| # relevant to a package. |
| self._traversed_pkg_deps = set() |
| self._slot_collision_info = {} |
| # Slot collision nodes are not allowed to block other packages since |
| # blocker validation is only able to account for one package per slot. |
| self._slot_collision_nodes = set() |
| self._parent_atoms = {} |
| self._slot_conflict_parent_atoms = set() |
| self._slot_conflict_handler = None |
| self._circular_dependency_handler = None |
| self._serialized_tasks_cache = None |
| self._scheduler_graph = None |
| self._displayed_list = None |
| self._pprovided_args = [] |
| self._missing_args = [] |
| self._masked_installed = set() |
| self._masked_license_updates = set() |
| self._unsatisfied_deps_for_display = [] |
| self._unsatisfied_blockers_for_display = None |
| self._circular_deps_for_display = None |
| self._dep_stack = [] |
| self._dep_disjunctive_stack = [] |
| self._unsatisfied_deps = [] |
| self._initially_unsatisfied_deps = [] |
| self._ignored_deps = [] |
| self._highest_pkg_cache = {} |
| |
| if runtime_pkg_mask is None: |
| runtime_pkg_mask = {} |
| else: |
| runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \ |
| runtime_pkg_mask.items()) |
| |
| if needed_unstable_keywords is None: |
| self._needed_unstable_keywords = set() |
| else: |
| self._needed_unstable_keywords = needed_unstable_keywords.copy() |
| |
| if needed_license_changes is None: |
| self._needed_license_changes = {} |
| else: |
| self._needed_license_changes = needed_license_changes.copy() |
| |
| if needed_use_config_changes is None: |
| self._needed_use_config_changes = {} |
| else: |
| self._needed_use_config_changes = \ |
| dict((k.copy(), (v[0].copy(), v[1].copy())) for (k, v) in \ |
| needed_use_config_changes.items()) |
| |
| self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True |
| |
| self._runtime_pkg_mask = runtime_pkg_mask |
| self._need_restart = False |
| # For conditions that always require user intervention, such as |
| # unsatisfied REQUIRED_USE (currently has no autounmask support). |
| self._skip_restart = False |
| self._traverse_ignored_deps = False |
| |
| for myroot in depgraph._frozen_config.trees: |
| self.sets[myroot] = _depgraph_sets() |
| self._slot_pkg_map[myroot] = {} |
| vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi |
| # This dbapi instance will model the state that the vdb will |
| # have after new packages have been installed. |
| fakedb = PackageVirtualDbapi(vardb.settings) |
| |
| self.mydbapi[myroot] = fakedb |
| def graph_tree(): |
| pass |
| graph_tree.dbapi = fakedb |
| self._graph_trees[myroot] = {} |
| self._filtered_trees[myroot] = {} |
| # Substitute the graph tree for the vartree in dep_check() since we |
| # want atom selections to be consistent with package selections |
| # have already been made. |
| self._graph_trees[myroot]["porttree"] = graph_tree |
| self._graph_trees[myroot]["vartree"] = graph_tree |
| def filtered_tree(): |
| pass |
| filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) |
| self._filtered_trees[myroot]["porttree"] = filtered_tree |
| self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) |
| |
| # Passing in graph_tree as the vartree here could lead to better |
| # atom selections in some cases by causing atoms for packages that |
| # have been added to the graph to be preferred over other choices. |
| # However, it can trigger atom selections that result in |
| # unresolvable direct circular dependencies. For example, this |
| # happens with gwydion-dylan which depends on either itself or |
| # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, |
| # gwydion-dylan-bin needs to be selected in order to avoid a |
| # an unresolvable direct circular dependency. |
| # |
| # To solve the problem described above, pass in "graph_db" so that |
| # packages that have been added to the graph are distinguishable |
| # from other available packages and installed packages. Also, pass |
| # the parent package into self._select_atoms() calls so that |
| # unresolvable direct circular dependencies can be detected and |
| # avoided when possible. |
| self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi |
| self._filtered_trees[myroot]["vartree"] = \ |
| depgraph._frozen_config.trees[myroot]["vartree"] |
| |
| dbs = [] |
| # (db, pkg_type, built, installed, db_keys) |
| if "remove" in self.myparams: |
| # For removal operations, use _dep_check_composite_db |
| # for availability and visibility checks. This provides |
| # consistency with install operations, so we don't |
| # get install/uninstall cycles like in bug #332719. |
| self._graph_trees[myroot]["porttree"] = filtered_tree |
| else: |
| if "--usepkgonly" not in depgraph._frozen_config.myopts: |
| portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi |
| db_keys = list(portdb._aux_cache_keys) |
| dbs.append((portdb, "ebuild", False, False, db_keys)) |
| |
| if "--usepkg" in depgraph._frozen_config.myopts: |
| bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi |
| db_keys = list(bindb._aux_cache_keys) |
| dbs.append((bindb, "binary", True, False, db_keys)) |
| |
| vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi |
| db_keys = list(depgraph._frozen_config._trees_orig[myroot |
| ]["vartree"].dbapi._aux_cache_keys) |
| dbs.append((vardb, "installed", True, True, db_keys)) |
| self._filtered_trees[myroot]["dbs"] = dbs |
| |
| class depgraph(object): |
| |
| pkg_tree_map = RootConfig.pkg_tree_map |
| |
| _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
| |
| def __init__(self, settings, trees, myopts, myparams, spinner, |
| frozen_config=None, runtime_pkg_mask=None, needed_unstable_keywords=None, \ |
| needed_use_config_changes=None, needed_license_changes=None, allow_backtracking=False, |
| rebuild_list=None, reinstall_list=None): |
| if frozen_config is None: |
| frozen_config = _frozen_depgraph_config(settings, trees, |
| myopts, spinner) |
| self._frozen_config = frozen_config |
| self._dynamic_config = _dynamic_depgraph_config(self, myparams, |
| allow_backtracking, runtime_pkg_mask, needed_unstable_keywords, \ |
| needed_use_config_changes, needed_license_changes) |
| self._rebuild = _rebuild_config(frozen_config, rebuild_list, |
| reinstall_list) |
| |
| self._select_atoms = self._select_atoms_highest_available |
| self._select_package = self._select_pkg_highest_available |
| |
| def _load_vdb(self): |
| """ |
| Load installed package metadata if appropriate. This used to be called |
| from the constructor, but that wasn't very nice since this procedure |
| is slow and it generates spinner output. So, now it's called on-demand |
| by various methods when necessary. |
| """ |
| |
| if self._dynamic_config._vdb_loaded: |
| return |
| |
| for myroot in self._frozen_config.trees: |
| |
| preload_installed_pkgs = \ |
| "--nodeps" not in self._frozen_config.myopts |
| |
| fake_vartree = self._frozen_config.trees[myroot]["vartree"] |
| if not fake_vartree.dbapi: |
| # This needs to be called for the first depgraph, but not for |
| # backtracking depgraphs that share the same frozen_config. |
| fake_vartree.sync() |
| |
| # FakeVartree.sync() populates virtuals, and we want |
| # self.pkgsettings to have them populated too. |
| self._frozen_config.pkgsettings[myroot] = \ |
| portage.config(clone=fake_vartree.settings) |
| |
| if preload_installed_pkgs: |
| vardb = fake_vartree.dbapi |
| fakedb = self._dynamic_config._graph_trees[ |
| myroot]["vartree"].dbapi |
| |
| for pkg in vardb: |
| self._spinner_update() |
| # This triggers metadata updates via FakeVartree. |
| vardb.aux_get(pkg.cpv, []) |
| fakedb.cpv_inject(pkg) |
| |
| # Now that the vardb state is cached in our FakeVartree, |
| # we won't be needing the real vartree cache for awhile. |
| # To make some room on the heap, clear the vardbapi |
| # caches. |
| self._frozen_config._trees_orig[myroot |
| ]["vartree"].dbapi._clear_cache() |
| gc.collect() |
| |
| self._dynamic_config._vdb_loaded = True |
| |
| def _spinner_update(self): |
| if self._frozen_config.spinner: |
| self._frozen_config.spinner.update() |
| |
| def _show_missed_update(self): |
| |
| # In order to minimize noise, show only the highest |
| # missed update from each SLOT. |
| missed_updates = {} |
| for pkg, mask_reasons in \ |
| self._dynamic_config._runtime_pkg_mask.items(): |
| if pkg.installed: |
| # Exclude installed here since we only |
| # want to show available updates. |
| continue |
| k = (pkg.root, pkg.slot_atom) |
| if k in missed_updates: |
| other_pkg, mask_type, parent_atoms = missed_updates[k] |
| if other_pkg > pkg: |
| continue |
| for mask_type, parent_atoms in mask_reasons.items(): |
| if not parent_atoms: |
| continue |
| missed_updates[k] = (pkg, mask_type, parent_atoms) |
| break |
| |
| if not missed_updates: |
| return |
| |
| missed_update_types = {} |
| for pkg, mask_type, parent_atoms in missed_updates.values(): |
| missed_update_types.setdefault(mask_type, |
| []).append((pkg, parent_atoms)) |
| |
| if '--quiet' in self._frozen_config.myopts and \ |
| '--debug' not in self._frozen_config.myopts: |
| missed_update_types.pop("slot conflict", None) |
| missed_update_types.pop("missing dependency", None) |
| |
| self._show_missed_update_slot_conflicts( |
| missed_update_types.get("slot conflict")) |
| |
| self._show_missed_update_unsatisfied_dep( |
| missed_update_types.get("missing dependency")) |
| |
| def _show_missed_update_unsatisfied_dep(self, missed_updates): |
| |
| if not missed_updates: |
| return |
| |
| backtrack_masked = [] |
| |
| for pkg, parent_atoms in missed_updates: |
| |
| try: |
| for parent, root, atom in parent_atoms: |
| self._show_unsatisfied_dep(root, atom, myparent=parent, |
| check_backtrack=True) |
| except self._backtrack_mask: |
| # This is displayed below in abbreviated form. |
| backtrack_masked.append((pkg, parent_atoms)) |
| continue |
| |
| writemsg("\n!!! The following update has been skipped " + \ |
| "due to unsatisfied dependencies:\n\n", noiselevel=-1) |
| |
| writemsg(str(pkg.slot_atom), noiselevel=-1) |
| if pkg.root != '/': |
| writemsg(" for %s" % (pkg.root,), noiselevel=-1) |
| writemsg("\n", noiselevel=-1) |
| |
| for parent, root, atom in parent_atoms: |
| self._show_unsatisfied_dep(root, atom, myparent=parent) |
| writemsg("\n", noiselevel=-1) |
| |
| if backtrack_masked: |
| # These are shown in abbreviated form, in order to avoid terminal |
| # flooding from mask messages as reported in bug #285832. |
| writemsg("\n!!! The following update(s) have been skipped " + \ |
| "due to unsatisfied dependencies\n" + \ |
| "!!! triggered by backtracking:\n\n", noiselevel=-1) |
| for pkg, parent_atoms in backtrack_masked: |
| writemsg(str(pkg.slot_atom), noiselevel=-1) |
| if pkg.root != '/': |
| writemsg(" for %s" % (pkg.root,), noiselevel=-1) |
| writemsg("\n", noiselevel=-1) |
| |
| def _show_missed_update_slot_conflicts(self, missed_updates): |
| |
| if not missed_updates: |
| return |
| |
| msg = [] |
| msg.append("\n!!! One or more updates have been skipped due to " + \ |
| "a dependency conflict:\n\n") |
| |
| indent = " " |
| for pkg, parent_atoms in missed_updates: |
| msg.append(str(pkg.slot_atom)) |
| if pkg.root != '/': |
| msg.append(" for %s" % (pkg.root,)) |
| msg.append("\n\n") |
| |
| for parent, atom in parent_atoms: |
| msg.append(indent) |
| msg.append(str(pkg)) |
| |
| msg.append(" conflicts with\n") |
| msg.append(2*indent) |
| if isinstance(parent, |
| (PackageArg, AtomArg)): |
| # For PackageArg and AtomArg types, it's |
| # redundant to display the atom attribute. |
| msg.append(str(parent)) |
| else: |
| # Display the specific atom from SetArg or |
| # Package types. |
| msg.append("%s required by %s" % (atom, parent)) |
| msg.append("\n") |
| msg.append("\n") |
| |
| writemsg("".join(msg), noiselevel=-1) |
| |
| def _show_slot_collision_notice(self): |
| """Show an informational message advising the user to mask one of the |
| the packages. In some cases it may be possible to resolve this |
| automatically, but support for backtracking (removal nodes that have |
| already been selected) will be required in order to handle all possible |
| cases. |
| """ |
| |
| if not self._dynamic_config._slot_collision_info: |
| return |
| |
| self._show_merge_list() |
| |
| self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) |
| handler = self._dynamic_config._slot_conflict_handler |
| |
| conflict = handler.get_conflict() |
| writemsg(conflict, noiselevel=-1) |
| |
| explanation = handler.get_explanation() |
| if explanation: |
| writemsg(explanation, noiselevel=-1) |
| return |
| |
| if "--quiet" in self._frozen_config.myopts: |
| return |
| |
| msg = [] |
| msg.append("It may be possible to solve this problem ") |
| msg.append("by using package.mask to prevent one of ") |
| msg.append("those packages from being selected. ") |
| msg.append("However, it is also possible that conflicting ") |
| msg.append("dependencies exist such that they are impossible to ") |
| msg.append("satisfy simultaneously. If such a conflict exists in ") |
| msg.append("the dependencies of two different packages, then those ") |
| msg.append("packages can not be installed simultaneously.") |
| backtrack_opt = self._frozen_config.myopts.get('--backtrack') |
| if not self._dynamic_config._allow_backtracking and \ |
| (backtrack_opt is None or \ |
| (backtrack_opt > 0 and backtrack_opt < 30)): |
| msg.append(" You may want to try a larger value of the ") |
| msg.append("--backtrack option, such as --backtrack=30, ") |
| msg.append("in order to see if that will solve this conflict ") |
| msg.append("automatically.") |
| |
| for line in textwrap.wrap(''.join(msg), 70): |
| writemsg(line + '\n', noiselevel=-1) |
| writemsg('\n', noiselevel=-1) |
| |
| msg = [] |
| msg.append("For more information, see MASKED PACKAGES ") |
| msg.append("section in the emerge man page or refer ") |
| msg.append("to the Gentoo Handbook.") |
| for line in textwrap.wrap(''.join(msg), 70): |
| writemsg(line + '\n', noiselevel=-1) |
| writemsg('\n', noiselevel=-1) |
| |
| def _process_slot_conflicts(self): |
| """ |
| Process slot conflict data to identify specific atoms which |
| lead to conflict. These atoms only match a subset of the |
| packages that have been pulled into a given slot. |
| """ |
| for (slot_atom, root), slot_nodes \ |
| in self._dynamic_config._slot_collision_info.items(): |
| |
| all_parent_atoms = set() |
| for pkg in slot_nodes: |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if not parent_atoms: |
| continue |
| all_parent_atoms.update(parent_atoms) |
| |
| for pkg in slot_nodes: |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if parent_atoms is None: |
| parent_atoms = set() |
| self._dynamic_config._parent_atoms[pkg] = parent_atoms |
| for parent_atom in all_parent_atoms: |
| if parent_atom in parent_atoms: |
| continue |
| # Use package set for matching since it will match via |
| # PROVIDE when necessary, while match_from_list does not. |
| parent, atom = parent_atom |
| atom_set = InternalPackageSet( |
| initial_atoms=(atom,)) |
| if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)): |
| parent_atoms.add(parent_atom) |
| else: |
| self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom) |
| |
| def _reinstall_for_flags(self, forced_flags, |
| orig_use, orig_iuse, cur_use, cur_iuse): |
| """Return a set of flags that trigger reinstallation, or None if there |
| are no such flags.""" |
| if "--newuse" in self._frozen_config.myopts or \ |
| "--binpkg-respect-use" in self._frozen_config.myopts: |
| flags = set(orig_iuse.symmetric_difference( |
| cur_iuse).difference(forced_flags)) |
| flags.update(orig_iuse.intersection(orig_use).symmetric_difference( |
| cur_iuse.intersection(cur_use))) |
| if flags: |
| return flags |
| elif "changed-use" == self._frozen_config.myopts.get("--reinstall"): |
| flags = orig_iuse.intersection(orig_use).symmetric_difference( |
| cur_iuse.intersection(cur_use)) |
| if flags: |
| return flags |
| return None |
| |
| def _create_graph(self, allow_unsatisfied=False): |
| dep_stack = self._dynamic_config._dep_stack |
| dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack |
| while dep_stack or dep_disjunctive_stack: |
| self._spinner_update() |
| while dep_stack: |
| dep = dep_stack.pop() |
| if isinstance(dep, Package): |
| if not self._add_pkg_deps(dep, |
| allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| continue |
| if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| if dep_disjunctive_stack: |
| if not self._pop_disjunction(allow_unsatisfied): |
| return 0 |
| return 1 |
| |
| def _expand_set_args(self, input_args, add_to_digraph=False): |
| """ |
| Iterate over a list of DependencyArg instances and yield all |
| instances given in the input together with additional SetArg |
| instances that are generated from nested sets. |
| @param input_args: An iterable of DependencyArg instances |
| @type input_args: Iterable |
| @param add_to_digraph: If True then add SetArg instances |
| to the digraph, in order to record parent -> child |
| relationships from nested sets |
| @type add_to_digraph: Boolean |
| @rtype: Iterable |
| @returns: All args given in the input together with additional |
| SetArg instances that are generated from nested sets |
| """ |
| |
| traversed_set_args = set() |
| |
| for arg in input_args: |
| if not isinstance(arg, SetArg): |
| yield arg |
| continue |
| |
| root_config = arg.root_config |
| depgraph_sets = self._dynamic_config.sets[root_config.root] |
| arg_stack = [arg] |
| while arg_stack: |
| arg = arg_stack.pop() |
| if arg in traversed_set_args: |
| continue |
| traversed_set_args.add(arg) |
| yield arg |
| |
| # Traverse nested sets and add them to the stack |
| # if they're not already in the graph. Also, graph |
| # edges between parent and nested sets. |
| for token in arg.pset.getNonAtoms(): |
| if not token.startswith(SETPREFIX): |
| continue |
| s = token[len(SETPREFIX):] |
| nested_set = depgraph_sets.sets.get(s) |
| if nested_set is None: |
| nested_set = root_config.sets.get(s) |
| if nested_set is not None: |
| nested_arg = SetArg(arg=token, pset=nested_set, |
| root_config=root_config) |
| arg_stack.append(nested_arg) |
| if add_to_digraph: |
| self._dynamic_config.digraph.add(nested_arg, arg, |
| priority=BlockerDepPriority.instance) |
| depgraph_sets.sets[nested_arg.name] = nested_arg.pset |
| |
| def _add_dep(self, dep, allow_unsatisfied=False): |
| debug = "--debug" in self._frozen_config.myopts |
| buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts |
| nodeps = "--nodeps" in self._frozen_config.myopts |
| deep = self._dynamic_config.myparams.get("deep", 0) |
| recurse = deep is True or dep.depth <= deep |
| if dep.blocker: |
| if not buildpkgonly and \ |
| not nodeps and \ |
| not dep.collapsed_priority.ignored and \ |
| not dep.collapsed_priority.optional and \ |
| dep.parent not in self._dynamic_config._slot_collision_nodes: |
| if dep.parent.onlydeps: |
| # It's safe to ignore blockers if the |
| # parent is an --onlydeps node. |
| return 1 |
| # The blocker applies to the root where |
| # the parent is or will be installed. |
| blocker = Blocker(atom=dep.atom, |
| eapi=dep.parent.metadata["EAPI"], |
| priority=dep.priority, root=dep.parent.root) |
| self._dynamic_config._blocker_parents.add(blocker, dep.parent) |
| return 1 |
| |
| if dep.child is None: |
| dep_pkg, existing_node = self._select_package(dep.root, dep.atom, |
| onlydeps=dep.onlydeps) |
| else: |
| # The caller has selected a specific package |
| # via self._minimize_packages(). |
| dep_pkg = dep.child |
| existing_node = self._dynamic_config._slot_pkg_map[ |
| dep.root].get(dep_pkg.slot_atom) |
| |
| if not dep_pkg: |
| if (dep.collapsed_priority.optional or |
| dep.collapsed_priority.ignored): |
| # This is an unnecessary build-time dep. |
| return 1 |
| if allow_unsatisfied: |
| self._dynamic_config._unsatisfied_deps.append(dep) |
| return 1 |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((dep.root, dep.atom), {"myparent":dep.parent})) |
| |
| # The parent node should not already be in |
| # runtime_pkg_mask, since that would trigger an |
| # infinite backtracking loop. |
| if self._dynamic_config._allow_backtracking: |
| if dep.parent in self._dynamic_config._runtime_pkg_mask: |
| if "--debug" in self._frozen_config.myopts: |
| writemsg( |
| "!!! backtracking loop detected: %s %s\n" % \ |
| (dep.parent, |
| self._dynamic_config._runtime_pkg_mask[ |
| dep.parent]), noiselevel=-1) |
| else: |
| # Do not backtrack if only USE have to be changed in |
| # order to satisfy the dependency. |
| dep_pkg, existing_node = \ |
| self._select_package(dep.root, dep.atom.without_use, |
| onlydeps=dep.onlydeps) |
| if dep_pkg is None: |
| self._dynamic_config._runtime_pkg_mask.setdefault( |
| dep.parent, {})["missing dependency"] = \ |
| set([(dep.parent, dep.root, dep.atom)]) |
| self._dynamic_config._need_restart = True |
| if "--debug" in self._frozen_config.myopts: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to unsatisfied dep:") |
| msg.append(" parent: %s" % dep.parent) |
| msg.append(" priority: %s" % dep.priority) |
| msg.append(" root: %s" % dep.root) |
| msg.append(" atom: %s" % dep.atom) |
| msg.append("") |
| writemsg_level("".join("%s\n" % l for l in msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return 0 |
| |
| self._rebuild.add(dep_pkg, dep) |
| |
| ignore = dep.collapsed_priority.ignored and \ |
| not self._dynamic_config._traverse_ignored_deps |
| if not ignore and not self._add_pkg(dep_pkg, dep): |
| return 0 |
| return 1 |
| |
| def _check_slot_conflict(self, pkg, atom): |
| existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom) |
| matches = None |
| if existing_node: |
| matches = pkg.cpv == existing_node.cpv |
| if pkg != existing_node and \ |
| atom is not None: |
| # Use package set for matching since it will match via |
| # PROVIDE when necessary, while match_from_list does not. |
| matches = bool(InternalPackageSet(initial_atoms=(atom,), |
| ).findAtomForPackage(existing_node, |
| modified_use=self._pkg_use_enabled(existing_node))) |
| |
| return (existing_node, matches) |
| |
| def _add_pkg(self, pkg, dep): |
| myparent = None |
| priority = None |
| depth = 0 |
| if dep is None: |
| dep = Dependency() |
| else: |
| myparent = dep.parent |
| priority = dep.priority |
| depth = dep.depth |
| if priority is None: |
| priority = DepPriority() |
| """ |
| Fills the digraph with nodes comprised of packages to merge. |
| mybigkey is the package spec of the package to merge. |
| myparent is the package depending on mybigkey ( or None ) |
| addme = Should we add this package to the digraph or are we just looking at it's deps? |
| Think --onlydeps, we need to ignore packages in that case. |
| #stuff to add: |
| #SLOT-aware emerge |
| #IUSE-aware emerge -> USE DEP aware depgraph |
| #"no downgrade" emerge |
| """ |
| # Ensure that the dependencies of the same package |
| # are never processed more than once. |
| previously_added = pkg in self._dynamic_config.digraph |
| |
| # select the correct /var database that we'll be checking against |
| vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi |
| pkgsettings = self._frozen_config.pkgsettings[pkg.root] |
| |
| arg_atoms = None |
| if True: |
| try: |
| arg_atoms = list(self._iter_atoms_for_pkg(pkg)) |
| except portage.exception.InvalidDependString as e: |
| if not pkg.installed: |
| # should have been masked before it was selected |
| raise |
| del e |
| |
| # NOTE: REQUIRED_USE checks are delayed until after |
| # package selection, since we want to prompt the user |
| # for USE adjustment rather than have REQUIRED_USE |
| # affect package selection and || dep choices. |
| if not pkg.built and pkg.metadata["REQUIRED_USE"] and \ |
| eapi_has_required_use(pkg.metadata["EAPI"]): |
| required_use_is_sat = check_required_use( |
| pkg.metadata["REQUIRED_USE"], |
| self._pkg_use_enabled(pkg), |
| pkg.iuse.is_valid_flag) |
| if not required_use_is_sat: |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
| |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._add_parent_atom(pkg, parent_atom) |
| |
| atom = dep.atom |
| if atom is None: |
| atom = Atom("=" + pkg.cpv) |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((pkg.root, atom), {"myparent":dep.parent})) |
| self._dynamic_config._skip_restart = True |
| return 0 |
| |
| if not pkg.onlydeps: |
| if not pkg.installed and \ |
| "empty" not in self._dynamic_config.myparams and \ |
| vardbapi.match(pkg.slot_atom): |
| # Increase the priority of dependencies on packages that |
| # are being rebuilt. This optimizes merge order so that |
| # dependencies are rebuilt/updated as soon as possible, |
| # which is needed especially when emerge is called by |
| # revdep-rebuild since dependencies may be affected by ABI |
| # breakage that has rendered them useless. Don't adjust |
| # priority here when in "empty" mode since all packages |
| # are being merged in that case. |
| priority.rebuild = True |
| |
| existing_node, existing_node_matches = \ |
| self._check_slot_conflict(pkg, dep.atom) |
| slot_collision = False |
| if existing_node: |
| if existing_node_matches: |
| # The existing node can be reused. |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._dynamic_config.digraph.add(existing_node, parent, |
| priority=priority) |
| self._add_parent_atom(existing_node, parent_atom) |
| # If a direct circular dependency is not an unsatisfied |
| # buildtime dependency then drop it here since otherwise |
| # it can skew the merge order calculation in an unwanted |
| # way. |
| if existing_node != myparent or \ |
| (priority.buildtime and not priority.satisfied): |
| self._dynamic_config.digraph.addnode(existing_node, myparent, |
| priority=priority) |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(existing_node, |
| (dep.parent, dep.atom)) |
| return 1 |
| else: |
| # A slot conflict has occurred. |
| # The existing node should not already be in |
| # runtime_pkg_mask, since that would trigger an |
| # infinite backtracking loop. |
| if self._dynamic_config._allow_backtracking and \ |
| existing_node in \ |
| self._dynamic_config._runtime_pkg_mask: |
| if "--debug" in self._frozen_config.myopts: |
| writemsg( |
| "!!! backtracking loop detected: %s %s\n" % \ |
| (existing_node, |
| self._dynamic_config._runtime_pkg_mask[ |
| existing_node]), noiselevel=-1) |
| elif self._dynamic_config._allow_backtracking and \ |
| not self._accept_blocker_conflicts(): |
| self._add_slot_conflict(pkg) |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._add_parent_atom(pkg, parent_atom) |
| self._process_slot_conflicts() |
| |
| backtrack_data = [] |
| fallback_data = [] |
| all_parents = set() |
| # The ordering of backtrack_data can make |
| # a difference here, because both mask actions may lead |
| # to valid, but different, solutions and the one with |
| # 'existing_node' masked is usually the better one. Because |
| # of that, we choose an order such that |
| # the backtracker will first explore the choice with |
| # existing_node masked. The backtracker reverses the |
| # order, so the order it uses is the reverse of the |
| # order shown here. See bug #339606. |
| for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node): |
| # For missed update messages, find out which |
| # atoms matched to_be_selected that did not |
| # match to_be_masked. |
| parent_atoms = \ |
| self._dynamic_config._parent_atoms.get(to_be_selected, set()) |
| if parent_atoms: |
| conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms) |
| if conflict_atoms: |
| parent_atoms = conflict_atoms |
| |
| all_parents.update(parent_atoms) |
| |
| all_match = True |
| for parent, atom in parent_atoms: |
| i = InternalPackageSet(initial_atoms=(atom,)) |
| if not i.findAtomForPackage(to_be_masked): |
| all_match = False |
| break |
| |
| if to_be_selected >= to_be_masked: |
| # We only care about the parent atoms |
| # when they trigger a downgrade. |
| parent_atoms = set() |
| |
| fallback_data.append((to_be_masked, parent_atoms)) |
| |
| if all_match: |
| # 'to_be_masked' does not violate any parent atom, which means |
| # there is no point in masking it. |
| pass |
| else: |
| backtrack_data.append((to_be_masked, parent_atoms)) |
| |
| if not backtrack_data: |
| # This shouldn't happen, but fall back to the old |
| # behavior if this gets triggered somehow. |
| backtrack_data = fallback_data |
| |
| if len(backtrack_data) > 1: |
| # NOTE: Generally, we prefer to mask the higher |
| # version since this solves common cases in which a |
| # lower version is needed so that all dependencies |
| # will be satisfied (bug #337178). However, if |
| # existing_node happens to be installed then we |
| # mask that since this is a common case that is |
| # triggered when --update is not enabled. |
| if existing_node.installed: |
| pass |
| elif pkg > existing_node: |
| backtrack_data.reverse() |
| |
| to_be_masked, parent_atoms = backtrack_data[-1] |
| |
| self._dynamic_config._runtime_pkg_mask.setdefault( |
| to_be_masked, {})["slot conflict"] = parent_atoms |
| self._dynamic_config._need_restart = True |
| if "--debug" in self._frozen_config.myopts: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to slot conflict:") |
| if backtrack_data is fallback_data: |
| msg.append("!!! backtrack_data fallback") |
| msg.append(" first package: %s" % existing_node) |
| msg.append(" second package: %s" % pkg) |
| msg.append(" package to mask: %s" % to_be_masked) |
| msg.append(" slot: %s" % pkg.slot_atom) |
| msg.append(" parents: %s" % ", ".join( \ |
| "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) |
| msg.append("") |
| writemsg_level("".join("%s\n" % l for l in msg), |
| noiselevel=-1, level=logging.DEBUG) |
| return 0 |
| |
| # A slot collision has occurred. Sometimes this coincides |
| # with unresolvable blockers, so the slot collision will be |
| # shown later if there are no unresolvable blockers. |
| self._add_slot_conflict(pkg) |
| slot_collision = True |
| |
| if slot_collision: |
| # Now add this node to the graph so that self.display() |
| # can show use flags and --tree portage.output. This node is |
| # only being partially added to the graph. It must not be |
| # allowed to interfere with the other nodes that have been |
| # added. Do not overwrite data for existing nodes in |
| # self._dynamic_config.mydbapi since that data will be used for blocker |
| # validation. |
| # Even though the graph is now invalid, continue to process |
| # dependencies so that things like --fetchonly can still |
| # function despite collisions. |
| pass |
| elif not previously_added: |
| self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg |
| self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg) |
| self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache() |
| self._dynamic_config._highest_pkg_cache.clear() |
| self._check_masks(pkg) |
| |
| if not pkg.installed: |
| # Allow this package to satisfy old-style virtuals in case it |
| # doesn't already. Any pre-existing providers will be preferred |
| # over this one. |
| try: |
| pkgsettings.setinst(pkg.cpv, pkg.metadata) |
| # For consistency, also update the global virtuals. |
| settings = self._frozen_config.roots[pkg.root].settings |
| settings.unlock() |
| settings.setinst(pkg.cpv, pkg.metadata) |
| settings.lock() |
| except portage.exception.InvalidDependString as e: |
| if not pkg.installed: |
| # should have been masked before it was selected |
| raise |
| |
| if arg_atoms: |
| self._dynamic_config._set_nodes.add(pkg) |
| |
| # Do this even when addme is False (--onlydeps) so that the |
| # parent/child relationship is always known in case |
| # self._show_slot_collision_notice() needs to be called later. |
| self._dynamic_config.digraph.add(pkg, myparent, priority=priority) |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
| |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._dynamic_config.digraph.add(pkg, parent, priority=priority) |
| self._add_parent_atom(pkg, parent_atom) |
| |
| """ This section determines whether we go deeper into dependencies or not. |
| We want to go deeper on a few occasions: |
| Installing package A, we need to make sure package A's deps are met. |
| emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec |
| If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies. |
| """ |
| if arg_atoms: |
| depth = 0 |
| pkg.depth = depth |
| deep = self._dynamic_config.myparams.get("deep", 0) |
| recurse = deep is True or depth + 1 <= deep |
| dep_stack = self._dynamic_config._dep_stack |
| if "recurse" not in self._dynamic_config.myparams: |
| return 1 |
| elif pkg.installed and not recurse: |
| dep_stack = self._dynamic_config._ignored_deps |
| |
| self._spinner_update() |
| |
| if not previously_added: |
| dep_stack.append(pkg) |
| return 1 |
| |
| def _check_masks(self, pkg): |
| |
| slot_key = (pkg.root, pkg.slot_atom) |
| |
| # Check for upgrades in the same slot that are |
| # masked due to a LICENSE change in a newer |
| # version that is not masked for any other reason. |
| other_pkg = self._frozen_config._highest_license_masked.get(slot_key) |
| if other_pkg is not None and pkg < other_pkg: |
| self._dynamic_config._masked_license_updates.add(other_pkg) |
| |
| def _add_parent_atom(self, pkg, parent_atom): |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if parent_atoms is None: |
| parent_atoms = set() |
| self._dynamic_config._parent_atoms[pkg] = parent_atoms |
| parent_atoms.add(parent_atom) |
| |
| def _add_slot_conflict(self, pkg): |
| self._dynamic_config._slot_collision_nodes.add(pkg) |
| slot_key = (pkg.slot_atom, pkg.root) |
| slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key) |
| if slot_nodes is None: |
| slot_nodes = set() |
| slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom]) |
| self._dynamic_config._slot_collision_info[slot_key] = slot_nodes |
| slot_nodes.add(pkg) |
| |
| def _add_pkg_deps(self, pkg, allow_unsatisfied=False): |
| |
| mytype = pkg.type_name |
| myroot = pkg.root |
| mykey = pkg.cpv |
| metadata = pkg.metadata |
| myuse = self._pkg_use_enabled(pkg) |
| jbigkey = pkg |
| depth = pkg.depth + 1 |
| removal_action = "remove" in self._dynamic_config.myparams |
| |
| edepend={} |
| depkeys = ["DEPEND","RDEPEND","PDEPEND"] |
| for k in depkeys: |
| edepend[k] = metadata[k] |
| |
| if not pkg.built and \ |
| "--buildpkgonly" in self._frozen_config.myopts and \ |
| "deep" not in self._dynamic_config.myparams: |
| edepend["RDEPEND"] = "" |
| edepend["PDEPEND"] = "" |
| |
| ignore_build_time_deps = False |
| if pkg.built and not removal_action: |
| if self._dynamic_config.myparams.get("bdeps", "n") == "y": |
| # Pull in build time deps as requested, but marked them as |
| # "optional" since they are not strictly required. This allows |
| # more freedom in the merge order calculation for solving |
| # circular dependencies. Don't convert to PDEPEND since that |
| # could make --with-bdeps=y less effective if it is used to |
| # adjust merge order to prevent built_with_use() calls from |
| # failing. |
| pass |
| else: |
| ignore_build_time_deps = True |
| |
| if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n": |
| ignore_build_time_deps = True |
| |
| if removal_action: |
| depend_root = myroot |
| else: |
| depend_root = "/" |
| root_deps = self._frozen_config.myopts.get("--root-deps") |
| if root_deps is not None: |
| if root_deps is True: |
| depend_root = myroot |
| elif root_deps == "rdeps": |
| ignore_build_time_deps = True |
| |
| deps = ( |
| (depend_root, edepend["DEPEND"], |
| self._priority(buildtime=True, |
| optional=(pkg.built or ignore_build_time_deps), |
| ignored=ignore_build_time_deps)), |
| (myroot, edepend["RDEPEND"], |
| self._priority(runtime=True)), |
| (myroot, edepend["PDEPEND"], |
| self._priority(runtime_post=True)) |
| ) |
| |
| debug = "--debug" in self._frozen_config.myopts |
| strict = mytype != "installed" |
| |
| for dep_root, dep_string, dep_priority in deps: |
| if not dep_string: |
| continue |
| if debug: |
| writemsg_level("\nParent: %s\n" % (pkg,), |
| noiselevel=-1, level=logging.DEBUG) |
| writemsg_level("Depstring: %s\n" % (dep_string,), |
| noiselevel=-1, level=logging.DEBUG) |
| writemsg_level("Priority: %s\n" % (dep_priority,), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| try: |
| dep_string = portage.dep.use_reduce(dep_string, |
| uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag) |
| except portage.exception.InvalidDependString as e: |
| if not pkg.installed: |
| # should have been masked before it was selected |
| raise |
| del e |
| |
| # Try again, but omit the is_valid_flag argument, since |
| # invalid USE conditionals are a common problem and it's |
| # practical to ignore this issue for installed packages. |
| try: |
| dep_string = portage.dep.use_reduce(dep_string, |
| uselist=self._pkg_use_enabled(pkg)) |
| except portage.exception.InvalidDependString as e: |
| self._dynamic_config._masked_installed.add(pkg) |
| del e |
| continue |
| |
| try: |
| dep_string = list(self._queue_disjunctive_deps( |
| pkg, dep_root, dep_priority, dep_string)) |
| except portage.exception.InvalidDependString as e: |
| if pkg.installed: |
| self._dynamic_config._masked_installed.add(pkg) |
| del e |
| continue |
| |
| # should have been masked before it was selected |
| raise |
| |
| if not dep_string: |
| continue |
| |
| dep_string = portage.dep.paren_enclose(dep_string, |
| unevaluated_atom=True) |
| |
| if not self._add_pkg_dep_string( |
| pkg, dep_root, dep_priority, dep_string, |
| allow_unsatisfied): |
| return 0 |
| |
| self._dynamic_config._traversed_pkg_deps.add(pkg) |
| return 1 |
| |
| def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string, |
| allow_unsatisfied): |
| _autounmask_backup = self._dynamic_config._autounmask |
| if dep_priority.optional or dep_priority.ignored: |
| # Temporarily disable autounmask for deps that |
| # don't necessarily need to be satisfied. |
| self._dynamic_config._autounmask = False |
| try: |
| return self._wrapped_add_pkg_dep_string( |
| pkg, dep_root, dep_priority, dep_string, |
| allow_unsatisfied) |
| finally: |
| self._dynamic_config._autounmask = _autounmask_backup |
| |
| def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority, |
| dep_string, allow_unsatisfied): |
| depth = pkg.depth + 1 |
| deep = self._dynamic_config.myparams.get("deep", 0) |
| recurse_satisfied = deep is True or depth <= deep |
| debug = "--debug" in self._frozen_config.myopts |
| strict = pkg.type_name != "installed" |
| |
| if debug: |
| writemsg_level("\nParent: %s\n" % (pkg,), |
| noiselevel=-1, level=logging.DEBUG) |
| writemsg_level("Depstring: %s\n" % (dep_string,), |
| noiselevel=-1, level=logging.DEBUG) |
| writemsg_level("Priority: %s\n" % (dep_priority,), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| try: |
| selected_atoms = self._select_atoms(dep_root, |
| dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg, |
| strict=strict, priority=dep_priority) |
| except portage.exception.InvalidDependString as e: |
| if pkg.installed: |
| self._dynamic_config._masked_installed.add(pkg) |
| return 1 |
| |
| # should have been masked before it was selected |
| raise |
| |
| if debug: |
| writemsg_level("Candidates: %s\n" % \ |
| ([str(x) for x in selected_atoms[pkg]],), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| root_config = self._frozen_config.roots[dep_root] |
| vardb = root_config.trees["vartree"].dbapi |
| traversed_virt_pkgs = set() |
| |
| reinstall_atoms = self._frozen_config.reinstall_atoms |
| for atom, child in self._minimize_children( |
| pkg, dep_priority, root_config, selected_atoms[pkg]): |
| |
| # If this was a specially generated virtual atom |
| # from dep_check, map it back to the original, in |
| # order to avoid distortion in places like display |
| # or conflict resolution code. |
| is_virt = hasattr(atom, '_orig_atom') |
| atom = getattr(atom, '_orig_atom', atom) |
| |
| if atom.blocker and \ |
| (dep_priority.optional or dep_priority.ignored): |
| # For --with-bdeps, ignore build-time only blockers |
| # that originate from built packages. |
| continue |
| |
| mypriority = dep_priority.copy() |
| if not atom.blocker: |
| root_slot = (pkg.root, pkg.slot_atom) |
| inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom) |
| if not reinstall_atoms.findAtomForPackage(inst_pkg, |
| modified_use=self._pkg_use_enabled(inst_pkg))] |
| if inst_pkgs: |
| for inst_pkg in inst_pkgs: |
| if self._pkg_visibility_check(inst_pkg): |
| # highest visible |
| mypriority.satisfied = inst_pkg |
| break |
| if not mypriority.satisfied: |
| # none visible, so use highest |
| mypriority.satisfied = inst_pkgs[0] |
| |
| dep = Dependency(atom=atom, |
| blocker=atom.blocker, child=child, depth=depth, parent=pkg, |
| priority=mypriority, root=dep_root) |
| |
| # In some cases, dep_check will return deps that shouldn't |
| # be proccessed any further, so they are identified and |
| # discarded here. Try to discard as few as possible since |
| # discarded dependencies reduce the amount of information |
| # available for optimization of merge order. |
| ignored = False |
| if not atom.blocker and \ |
| not recurse_satisfied and \ |
| mypriority.satisfied and \ |
| mypriority.satisfied.visible and \ |
| dep.child is not None and \ |
| not dep.child.installed and \ |
| self._dynamic_config._slot_pkg_map[dep.child.root].get( |
| dep.child.slot_atom) is None: |
| myarg = None |
| if dep.root == self._frozen_config.target_root: |
| try: |
| myarg = next(self._iter_atoms_for_pkg(dep.child)) |
| except StopIteration: |
| pass |
| except InvalidDependString: |
| if not dep.child.installed: |
| # This shouldn't happen since the package |
| # should have been masked. |
| raise |
| |
| if myarg is None: |
| # Existing child selection may not be valid unless |
| # it's added to the graph immediately, since "complete" |
| # mode may select a different child later. |
| ignored = True |
| dep.child = None |
| self._dynamic_config._ignored_deps.append(dep) |
| |
| if not ignored: |
| if dep_priority.ignored and \ |
| not self._dynamic_config._traverse_ignored_deps: |
| if is_virt and dep.child is not None: |
| traversed_virt_pkgs.add(dep.child) |
| dep.child = None |
| self._dynamic_config._ignored_deps.append(dep) |
| else: |
| if not self._add_dep(dep, |
| allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| if is_virt and dep.child is not None: |
| traversed_virt_pkgs.add(dep.child) |
| |
| selected_atoms.pop(pkg) |
| |
| # Add selected indirect virtual deps to the graph. This |
| # takes advantage of circular dependency avoidance that's done |
| # by dep_zapdeps. We preserve actual parent/child relationships |
| # here in order to avoid distorting the dependency graph like |
| # <=portage-2.1.6.x did. |
| for virt_dep, atoms in selected_atoms.items(): |
| |
| virt_pkg = virt_dep.child |
| if virt_pkg not in traversed_virt_pkgs: |
| continue |
| |
| if debug: |
| writemsg_level("Candidates: %s: %s\n" % \ |
| (virt_pkg.cpv, [str(x) for x in atoms]), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| if not dep_priority.ignored or \ |
| self._dynamic_config._traverse_ignored_deps: |
| if not self._add_pkg(virt_pkg, virt_dep): |
| return 0 |
| |
| for atom, child in self._minimize_children( |
| pkg, self._priority(runtime=True), root_config, atoms): |
| |
| # If this was a specially generated virtual atom |
| # from dep_check, map it back to the original, in |
| # order to avoid distortion in places like display |
| # or conflict resolution code. |
| is_virt = hasattr(atom, '_orig_atom') |
| atom = getattr(atom, '_orig_atom', atom) |
| |
| # This is a GLEP 37 virtual, so its deps are all runtime. |
| mypriority = self._priority(runtime=True) |
| if not atom.blocker: |
| inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom) |
| if not reinstall_atoms.findAtomForPackage(inst_pkg, |
| modified_use=self._pkg_use_enabled(inst_pkg))] |
| if inst_pkgs: |
| for inst_pkg in inst_pkgs: |
| if self._pkg_visibility_check(inst_pkg): |
| # highest visible |
| mypriority.satisfied = inst_pkg |
| break |
| if not mypriority.satisfied: |
| # none visible, so use highest |
| mypriority.satisfied = inst_pkgs[0] |
| |
| # Dependencies of virtuals are considered to have the |
| # same depth as the virtual itself. |
| dep = Dependency(atom=atom, |
| blocker=atom.blocker, child=child, depth=virt_dep.depth, |
| parent=virt_pkg, priority=mypriority, root=dep_root, |
| collapsed_parent=pkg, collapsed_priority=dep_priority) |
| |
| ignored = False |
| if not atom.blocker and \ |
| not recurse_satisfied and \ |
| mypriority.satisfied and \ |
| mypriority.satisfied.visible and \ |
| dep.child is not None and \ |
| not dep.child.installed and \ |
| self._dynamic_config._slot_pkg_map[dep.child.root].get( |
| dep.child.slot_atom) is None: |
| myarg = None |
| if dep.root == self._frozen_config.target_root: |
| try: |
| myarg = next(self._iter_atoms_for_pkg(dep.child)) |
| except StopIteration: |
| pass |
| except InvalidDependString: |
| if not dep.child.installed: |
| raise |
| |
| if myarg is None: |
| ignored = True |
| dep.child = None |
| self._dynamic_config._ignored_deps.append(dep) |
| |
| if not ignored: |
| if dep_priority.ignored and \ |
| not self._dynamic_config._traverse_ignored_deps: |
| if is_virt and dep.child is not None: |
| traversed_virt_pkgs.add(dep.child) |
| dep.child = None |
| self._dynamic_config._ignored_deps.append(dep) |
| else: |
| if not self._add_dep(dep, |
| allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| if is_virt and dep.child is not None: |
| traversed_virt_pkgs.add(dep.child) |
| |
| if debug: |
| writemsg_level("Exiting... %s\n" % (pkg,), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return 1 |
| |
| def _minimize_children(self, parent, priority, root_config, atoms): |
| """ |
| Selects packages to satisfy the given atoms, and minimizes the |
| number of selected packages. This serves to identify and eliminate |
| redundant package selections when multiple atoms happen to specify |
| a version range. |
| """ |
| |
| atom_pkg_map = {} |
| |
| for atom in atoms: |
| if atom.blocker: |
| yield (atom, None) |
| continue |
| dep_pkg, existing_node = self._select_package( |
| root_config.root, atom) |
| if dep_pkg is None: |
| yield (atom, None) |
| continue |
| atom_pkg_map[atom] = dep_pkg |
| |
| if len(atom_pkg_map) < 2: |
| for item in atom_pkg_map.items(): |
| yield item |
| return |
| |
| cp_pkg_map = {} |
| pkg_atom_map = {} |
| for atom, pkg in atom_pkg_map.items(): |
| pkg_atom_map.setdefault(pkg, set()).add(atom) |
| cp_pkg_map.setdefault(pkg.cp, set()).add(pkg) |
| |
| for cp, pkgs in cp_pkg_map.items(): |
| if len(pkgs) < 2: |
| for pkg in pkgs: |
| for atom in pkg_atom_map[pkg]: |
| yield (atom, pkg) |
| continue |
| |
| # Use a digraph to identify and eliminate any |
| # redundant package selections. |
| atom_pkg_graph = digraph() |
| cp_atoms = set() |
| for pkg1 in pkgs: |
| for atom in pkg_atom_map[pkg1]: |
| cp_atoms.add(atom) |
| atom_pkg_graph.add(pkg1, atom) |
| atom_set = InternalPackageSet(initial_atoms=(atom,)) |
| for pkg2 in pkgs: |
| if pkg2 is pkg1: |
| continue |
| if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)): |
| atom_pkg_graph.add(pkg2, atom) |
| |
| for pkg in pkgs: |
| eliminate_pkg = True |
| for atom in atom_pkg_graph.parent_nodes(pkg): |
| if len(atom_pkg_graph.child_nodes(atom)) < 2: |
| eliminate_pkg = False |
| break |
| if eliminate_pkg: |
| atom_pkg_graph.remove(pkg) |
| |
| # Yield ~, =*, < and <= atoms first, since those are more likely to |
| # cause slot conflicts, and we want those atoms to be displayed |
| # in the resulting slot conflict message (see bug #291142). |
| conflict_atoms = [] |
| normal_atoms = [] |
| for atom in cp_atoms: |
| conflict = False |
| for child_pkg in atom_pkg_graph.child_nodes(atom): |
| existing_node, matches = \ |
| self._check_slot_conflict(child_pkg, atom) |
| if existing_node and not matches: |
| conflict = True |
| break |
| if conflict: |
| conflict_atoms.append(atom) |
| else: |
| normal_atoms.append(atom) |
| |
| for atom in chain(conflict_atoms, normal_atoms): |
| child_pkgs = atom_pkg_graph.child_nodes(atom) |
| # if more than one child, yield highest version |
| if len(child_pkgs) > 1: |
| child_pkgs.sort() |
| yield (atom, child_pkgs[-1]) |
| |
| def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct): |
| """ |
| Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack. |
| Yields non-disjunctive deps. Raises InvalidDependString when |
| necessary. |
| """ |
| i = 0 |
| while i < len(dep_struct): |
| x = dep_struct[i] |
| if isinstance(x, list): |
| for y in self._queue_disjunctive_deps( |
| pkg, dep_root, dep_priority, x): |
| yield y |
| elif x == "||": |
| self._queue_disjunction(pkg, dep_root, dep_priority, |
| [ x, dep_struct[ i + 1 ] ] ) |
| i += 1 |
| else: |
| try: |
| x = portage.dep.Atom(x) |
| except portage.exception.InvalidAtom: |
| if not pkg.installed: |
| raise portage.exception.InvalidDependString( |
| "invalid atom: '%s'" % x) |
| else: |
| # Note: Eventually this will check for PROPERTIES=virtual |
| # or whatever other metadata gets implemented for this |
| # purpose. |
| if x.cp.startswith('virtual/'): |
| self._queue_disjunction( pkg, dep_root, |
| dep_priority, [ str(x) ] ) |
| else: |
| yield str(x) |
| i += 1 |
| |
| def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct): |
| self._dynamic_config._dep_disjunctive_stack.append( |
| (pkg, dep_root, dep_priority, dep_struct)) |
| |
| def _pop_disjunction(self, allow_unsatisfied): |
| """ |
| Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to |
| populate self._dynamic_config._dep_stack. |
| """ |
| pkg, dep_root, dep_priority, dep_struct = \ |
| self._dynamic_config._dep_disjunctive_stack.pop() |
| dep_string = portage.dep.paren_enclose(dep_struct, |
| unevaluated_atom=True) |
| if not self._add_pkg_dep_string( |
| pkg, dep_root, dep_priority, dep_string, allow_unsatisfied): |
| return 0 |
| return 1 |
| |
| def _priority(self, **kwargs): |
| if "remove" in self._dynamic_config.myparams: |
| priority_constructor = UnmergeDepPriority |
| else: |
| priority_constructor = DepPriority |
| return priority_constructor(**kwargs) |
| |
| def _dep_expand(self, root_config, atom_without_category): |
| """ |
| @param root_config: a root config instance |
| @type root_config: RootConfig |
| @param atom_without_category: an atom without a category component |
| @type atom_without_category: String |
| @rtype: list |
| @returns: a list of atoms containing categories (possibly empty) |
| """ |
| null_cp = portage.dep_getkey(insert_category_into_atom( |
| atom_without_category, "null")) |
| cat, atom_pn = portage.catsplit(null_cp) |
| |
| dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"] |
| categories = set() |
| for db, pkg_type, built, installed, db_keys in dbs: |
| for cat in db.categories: |
| if db.cp_list("%s/%s" % (cat, atom_pn)): |
| categories.add(cat) |
| |
| deps = [] |
| for cat in categories: |
| deps.append(Atom(insert_category_into_atom( |
| atom_without_category, cat))) |
| return deps |
| |
| def _have_new_virt(self, root, atom_cp): |
| ret = False |
| for db, pkg_type, built, installed, db_keys in \ |
| self._dynamic_config._filtered_trees[root]["dbs"]: |
| if db.cp_list(atom_cp): |
| ret = True |
| break |
| return ret |
| |
| def _iter_atoms_for_pkg(self, pkg): |
| depgraph_sets = self._dynamic_config.sets[pkg.root] |
| atom_arg_map = depgraph_sets.atom_arg_map |
| root_config = self._frozen_config.roots[pkg.root] |
| for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg): |
| if atom.cp != pkg.cp and \ |
| self._have_new_virt(pkg.root, atom.cp): |
| continue |
| visible_pkgs = \ |
| self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom) |
| visible_pkgs.reverse() # descending order |
| higher_slot = None |
| for visible_pkg in visible_pkgs: |
| if visible_pkg.cp != atom.cp: |
| continue |
| if pkg >= visible_pkg: |
| # This is descending order, and we're not |
| # interested in any versions <= pkg given. |
| break |
| if pkg.slot_atom != visible_pkg.slot_atom: |
| higher_slot = visible_pkg |
| break |
| if higher_slot is not None: |
| continue |
| for arg in atom_arg_map[(atom, pkg.root)]: |
| if isinstance(arg, PackageArg) and \ |
| arg.package != pkg: |
| continue |
| yield arg, atom |
| |
| def select_files(self, myfiles): |
| """Given a list of .tbz2s, .ebuilds sets, and deps, populate |
| self._dynamic_config._initial_arg_list and call self._resolve to create the |
| appropriate depgraph and return a favorite list.""" |
| self._load_vdb() |
| debug = "--debug" in self._frozen_config.myopts |
| root_config = self._frozen_config.roots[self._frozen_config.target_root] |
| sets = root_config.sets |
| depgraph_sets = self._dynamic_config.sets[root_config.root] |
| myfavorites=[] |
| myroot = self._frozen_config.target_root |
| dbs = self._dynamic_config._filtered_trees[myroot]["dbs"] |
| vardb = self._frozen_config.trees[myroot]["vartree"].dbapi |
| real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi |
| portdb = self._frozen_config.trees[myroot]["porttree"].dbapi |
| bindb = self._frozen_config.trees[myroot]["bintree"].dbapi |
| pkgsettings = self._frozen_config.pkgsettings[myroot] |
| args = [] |
| onlydeps = "--onlydeps" in self._frozen_config.myopts |
| lookup_owners = [] |
| for x in myfiles: |
| ext = os.path.splitext(x)[1] |
| if ext==".tbz2": |
| if not os.path.exists(x): |
| if os.path.exists( |
| os.path.join(pkgsettings["PKGDIR"], "All", x)): |
| x = os.path.join(pkgsettings["PKGDIR"], "All", x) |
| elif os.path.exists( |
| os.path.join(pkgsettings["PKGDIR"], x)): |
| x = os.path.join(pkgsettings["PKGDIR"], x) |
| else: |
| writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1) |
| writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1) |
| return 0, myfavorites |
| mytbz2=portage.xpak.tbz2(x) |
| mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0] |
| if os.path.realpath(x) != \ |
| os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)): |
| writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1) |
| self._dynamic_config._skip_restart = True |
| return 0, myfavorites |
| |
| pkg = self._pkg(mykey, "binary", root_config, |
| onlydeps=onlydeps) |
| args.append(PackageArg(arg=x, package=pkg, |
| root_config=root_config)) |
| elif ext==".ebuild": |
| ebuild_path = portage.util.normalize_path(os.path.abspath(x)) |
| pkgdir = os.path.dirname(ebuild_path) |
| tree_root = os.path.dirname(os.path.dirname(pkgdir)) |
| cp = pkgdir[len(tree_root)+1:] |
| e = portage.exception.PackageNotFound( |
| ("%s is not in a valid portage tree " + \ |
| "hierarchy or does not exist") % x) |
| if not portage.isvalidatom(cp): |
| raise e |
| cat = portage.catsplit(cp)[0] |
| mykey = cat + "/" + os.path.basename(ebuild_path[:-7]) |
| if not portage.isvalidatom("="+mykey): |
| raise e |
| ebuild_path = portdb.findname(mykey) |
| if ebuild_path: |
| if ebuild_path != os.path.join(os.path.realpath(tree_root), |
| cp, os.path.basename(ebuild_path)): |
| writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1) |
| self._dynamic_config._skip_restart = True |
| return 0, myfavorites |
| if mykey not in portdb.xmatch( |
| "match-visible", portage.cpv_getkey(mykey)): |
| writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1) |
| writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1) |
| writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1) |
| countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]), |
| "Continuing...") |
| else: |
| raise portage.exception.PackageNotFound( |
| "%s is not in a valid portage tree hierarchy or does not exist" % x) |
| pkg = self._pkg(mykey, "ebuild", root_config, |
| onlydeps=onlydeps) |
| args.append(PackageArg(arg=x, package=pkg, |
| root_config=root_config)) |
| elif x.startswith(os.path.sep): |
| if not x.startswith(myroot): |
| portage.writemsg(("\n\n!!! '%s' does not start with" + \ |
| " $ROOT.\n") % x, noiselevel=-1) |
| self._dynamic_config._skip_restart = True |
| return 0, [] |
| # Queue these up since it's most efficient to handle |
| # multiple files in a single iter_owners() call. |
| lookup_owners.append(x) |
| elif x.startswith("." + os.sep) or \ |
| x.startswith(".." + os.sep): |
| f = os.path.abspath(x) |
| if not f.startswith(myroot): |
| portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \ |
| " $ROOT.\n") % (f, x), noiselevel=-1) |
| self._dynamic_config._skip_restart = True |
| return 0, [] |
| lookup_owners.append(f) |
| else: |
| if x in ("system", "world"): |
| x = SETPREFIX + x |
| if x.startswith(SETPREFIX): |
| s = x[len(SETPREFIX):] |
| if s not in sets: |
| raise portage.exception.PackageSetNotFound(s) |
| if s in depgraph_sets.sets: |
| continue |
| pset = sets[s] |
| depgraph_sets.sets[s] = pset |
| args.append(SetArg(arg=x, pset=pset, |
| root_config=root_config)) |
| continue |
| if not is_valid_package_atom(x): |
| portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x, |
| noiselevel=-1) |
| portage.writemsg("!!! Please check ebuild(5) for full details.\n") |
| portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n") |
| self._dynamic_config._skip_restart = True |
| return (0,[]) |
| # Don't expand categories or old-style virtuals here unless |
| # necessary. Expansion of old-style virtuals here causes at |
| # least the following problems: |
| # 1) It's more difficult to determine which set(s) an atom |
| # came from, if any. |
| # 2) It takes away freedom from the resolver to choose other |
| # possible expansions when necessary. |
| if "/" in x: |
| args.append(AtomArg(arg=x, atom=Atom(x), |
| root_config=root_config)) |
| continue |
| expanded_atoms = self._dep_expand(root_config, x) |
| installed_cp_set = set() |
| for atom in expanded_atoms: |
| if vardb.cp_list(atom.cp): |
| installed_cp_set.add(atom.cp) |
| |
| if len(installed_cp_set) > 1: |
| non_virtual_cps = set() |
| for atom_cp in installed_cp_set: |
| if not atom_cp.startswith("virtual/"): |
| non_virtual_cps.add(atom_cp) |
| if len(non_virtual_cps) == 1: |
| installed_cp_set = non_virtual_cps |
| |
| if len(expanded_atoms) > 1 and len(installed_cp_set) == 1: |
| installed_cp = next(iter(installed_cp_set)) |
| for atom in expanded_atoms: |
| if atom.cp == installed_cp: |
| available = False |
| for pkg in self._iter_match_pkgs_any( |
| root_config, atom.without_use, |
| onlydeps=onlydeps): |
| if not pkg.installed: |
| available = True |
| break |
| if available: |
| expanded_atoms = [atom] |
| break |
| |
| # If a non-virtual package and one or more virtual packages |
| # are in expanded_atoms, use the non-virtual package. |
| if len(expanded_atoms) > 1: |
| number_of_virtuals = 0 |
| for expanded_atom in expanded_atoms: |
| if expanded_atom.cp.startswith("virtual/"): |
| number_of_virtuals += 1 |
| else: |
| candidate = expanded_atom |
| if len(expanded_atoms) - number_of_virtuals == 1: |
| expanded_atoms = [ candidate ] |
| |
| if len(expanded_atoms) > 1: |
| writemsg("\n\n", noiselevel=-1) |
| ambiguous_package_name(x, expanded_atoms, root_config, |
| self._frozen_config.spinner, self._frozen_config.myopts) |
| self._dynamic_config._skip_restart = True |
| return False, myfavorites |
| if expanded_atoms: |
| atom = expanded_atoms[0] |
| else: |
| null_atom = Atom(insert_category_into_atom(x, "null")) |
| cat, atom_pn = portage.catsplit(null_atom.cp) |
| virts_p = root_config.settings.get_virts_p().get(atom_pn) |
| if virts_p: |
| # Allow the depgraph to choose which virtual. |
| atom = Atom(null_atom.replace('null/', 'virtual/', 1)) |
| else: |
| atom = null_atom |
| |
| args.append(AtomArg(arg=x, atom=atom, |
| root_config=root_config)) |
| |
| if lookup_owners: |
| relative_paths = [] |
| search_for_multiple = False |
| if len(lookup_owners) > 1: |
| search_for_multiple = True |
| |
| for x in lookup_owners: |
| if not search_for_multiple and os.path.isdir(x): |
| search_for_multiple = True |
| relative_paths.append(x[len(myroot)-1:]) |
| |
| owners = set() |
| for pkg, relative_path in \ |
| real_vardb._owners.iter_owners(relative_paths): |
| owners.add(pkg.mycpv) |
| if not search_for_multiple: |
| break |
| |
| if not owners: |
| portage.writemsg(("\n\n!!! '%s' is not claimed " + \ |
| "by any package.\n") % lookup_owners[0], noiselevel=-1) |
| self._dynamic_config._skip_restart = True |
| return 0, [] |
| |
| for cpv in owners: |
| slot = vardb.aux_get(cpv, ["SLOT"])[0] |
| if not slot: |
| # portage now masks packages with missing slot, but it's |
| # possible that one was installed by an older version |
| atom = Atom(portage.cpv_getkey(cpv)) |
| else: |
| atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot)) |
| args.append(AtomArg(arg=atom, atom=atom, |
| root_config=root_config)) |
| |
| if "--update" in self._frozen_config.myopts: |
| # In some cases, the greedy slots behavior can pull in a slot that |
| # the user would want to uninstall due to it being blocked by a |
| # newer version in a different slot. Therefore, it's necessary to |
| # detect and discard any that should be uninstalled. Each time |
| # that arguments are updated, package selections are repeated in |
| # order to ensure consistency with the current arguments: |
| # |
| # 1) Initialize args |
| # 2) Select packages and generate initial greedy atoms |
| # 3) Update args with greedy atoms |
| # 4) Select packages and generate greedy atoms again, while |
| # accounting for any blockers between selected packages |
| # 5) Update args with revised greedy atoms |
| |
| self._set_args(args) |
| greedy_args = [] |
| for arg in args: |
| greedy_args.append(arg) |
| if not isinstance(arg, AtomArg): |
| continue |
| for atom in self._greedy_slots(arg.root_config, arg.atom): |
| greedy_args.append( |
| AtomArg(arg=arg.arg, atom=atom, |
| root_config=arg.root_config)) |
| |
| self._set_args(greedy_args) |
| del greedy_args |
| |
| # Revise greedy atoms, accounting for any blockers |
| # between selected packages. |
| revised_greedy_args = [] |
| for arg in args: |
| revised_greedy_args.append(arg) |
| if not isinstance(arg, AtomArg): |
| continue |
| for atom in self._greedy_slots(arg.root_config, arg.atom, |
| blocker_lookahead=True): |
| revised_greedy_args.append( |
| AtomArg(arg=arg.arg, atom=atom, |
| root_config=arg.root_config)) |
| args = revised_greedy_args |
| del revised_greedy_args |
| |
| self._set_args(args) |
| |
| myfavorites = set(myfavorites) |
| for arg in args: |
| if isinstance(arg, (AtomArg, PackageArg)): |
| myfavorites.add(arg.atom) |
| elif isinstance(arg, SetArg): |
| myfavorites.add(arg.arg) |
| myfavorites = list(myfavorites) |
| |
| if debug: |
| portage.writemsg("\n", noiselevel=-1) |
| # Order needs to be preserved since a feature of --nodeps |
| # is to allow the user to force a specific merge order. |
| self._dynamic_config._initial_arg_list = args[:] |
| |
| return self._resolve(myfavorites) |
| |
| def _resolve(self, myfavorites): |
| """Given self._dynamic_config._initial_arg_list, pull in the root nodes, |
| call self._creategraph to process theier deps and return |
| a favorite list.""" |
| debug = "--debug" in self._frozen_config.myopts |
| onlydeps = "--onlydeps" in self._frozen_config.myopts |
| myroot = self._frozen_config.target_root |
| pkgsettings = self._frozen_config.pkgsettings[myroot] |
| pprovideddict = pkgsettings.pprovideddict |
| virtuals = pkgsettings.getvirtuals() |
| args = self._dynamic_config._initial_arg_list[:] |
| for root, atom in chain(self._rebuild.rebuild_list, |
| self._rebuild.reinstall_list): |
| args.append(AtomArg(arg=atom, atom=atom, |
| root_config=self._frozen_config.roots[root])) |
| for arg in self._expand_set_args(args, add_to_digraph=True): |
| for atom in arg.pset.getAtoms(): |
| self._spinner_update() |
| dep = Dependency(atom=atom, onlydeps=onlydeps, |
| root=myroot, parent=arg) |
| try: |
| pprovided = pprovideddict.get(atom.cp) |
| if pprovided and portage.match_from_list(atom, pprovided): |
| # A provided package has been specified on the command line. |
| self._dynamic_config._pprovided_args.append((arg, atom)) |
| continue |
| if isinstance(arg, PackageArg): |
| if not self._add_pkg(arg.package, dep) or \ |
| not self._create_graph(): |
| if not self.need_restart(): |
| sys.stderr.write(("\n\n!!! Problem " + \ |
| "resolving dependencies for %s\n") % \ |
| arg.arg) |
| return 0, myfavorites |
| continue |
| if debug: |
| portage.writemsg(" Arg: %s\n Atom: %s\n" % \ |
| (arg, atom), noiselevel=-1) |
| pkg, existing_node = self._select_package( |
| myroot, atom, onlydeps=onlydeps) |
| if not pkg: |
| pprovided_match = False |
| for virt_choice in virtuals.get(atom.cp, []): |
| expanded_atom = portage.dep.Atom( |
| atom.replace(atom.cp, virt_choice.cp, 1)) |
| pprovided = pprovideddict.get(expanded_atom.cp) |
| if pprovided and \ |
| portage.match_from_list(expanded_atom, pprovided): |
| # A provided package has been |
| # specified on the command line. |
| self._dynamic_config._pprovided_args.append((arg, atom)) |
| pprovided_match = True |
| break |
| if pprovided_match: |
| continue |
| |
| if not (isinstance(arg, SetArg) and \ |
| arg.name in ("selected", "system", "world")): |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((myroot, atom), {"myparent" : arg})) |
| return 0, myfavorites |
| |
| self._dynamic_config._missing_args.append((arg, atom)) |
| continue |
| if atom.cp != pkg.cp: |
| # For old-style virtuals, we need to repeat the |
| # package.provided check against the selected package. |
| expanded_atom = atom.replace(atom.cp, pkg.cp) |
| pprovided = pprovideddict.get(pkg.cp) |
| if pprovided and \ |
| portage.match_from_list(expanded_atom, pprovided): |
| # A provided package has been |
| # specified on the command line. |
| self._dynamic_config._pprovided_args.append((arg, atom)) |
| continue |
| if pkg.installed and "selective" not in self._dynamic_config.myparams: |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((myroot, atom), {"myparent" : arg})) |
| # Previous behavior was to bail out in this case, but |
| # since the dep is satisfied by the installed package, |
| # it's more friendly to continue building the graph |
| # and just show a warning message. Therefore, only bail |
| # out here if the atom is not from either the system or |
| # world set. |
| if not (isinstance(arg, SetArg) and \ |
| arg.name in ("selected", "system", "world")): |
| return 0, myfavorites |
| |
| # Add the selected package to the graph as soon as possible |
| # so that later dep_check() calls can use it as feedback |
| # for making more consistent atom selections. |
| if not self._add_pkg(pkg, dep): |
| if self.need_restart(): |
| pass |
| elif isinstance(arg, SetArg): |
| writemsg(("\n\n!!! Problem resolving " + \ |
| "dependencies for %s from %s\n") % \ |
| (atom, arg.arg), noiselevel=-1) |
| else: |
| writemsg(("\n\n!!! Problem resolving " + \ |
| "dependencies for %s\n") % \ |
| (atom,), noiselevel=-1) |
| return 0, myfavorites |
| |
| except SystemExit as e: |
| raise # Needed else can't exit |
| except Exception as e: |
| writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1) |
| writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None)))) |
| raise |
| |
| # Now that the root packages have been added to the graph, |
| # process the dependencies. |
| if not self._create_graph(): |
| return 0, myfavorites |
| |
| try: |
| self.altlist() |
| except self._unknown_internal_error: |
| return False, myfavorites |
| |
| if set(self._dynamic_config.digraph).intersection( \ |
| self._dynamic_config._needed_unstable_keywords) or \ |
| set(self._dynamic_config.digraph).intersection( \ |
| self._dynamic_config._needed_use_config_changes) or \ |
| set(self._dynamic_config.digraph).intersection( \ |
| self._dynamic_config._needed_license_changes) : |
| #We failed if the user needs to change the configuration |
| return False, myfavorites |
| |
| if self._rebuild.trigger_rebuilds(): |
| self._dynamic_config._need_restart = True |
| return False, myfavorites |
| |
| # We're true here unless we are missing binaries. |
| return (True, myfavorites) |
| |
| def _set_args(self, args): |
| """ |
| Create the "__non_set_args__" package set from atoms and packages given as |
| arguments. This method can be called multiple times if necessary. |
| The package selection cache is automatically invalidated, since |
| arguments influence package selections. |
| """ |
| |
| set_atoms = {} |
| non_set_atoms = {} |
| for root in self._dynamic_config.sets: |
| depgraph_sets = self._dynamic_config.sets[root] |
| depgraph_sets.sets.setdefault('__non_set_args__', |
| InternalPackageSet()).clear() |
| depgraph_sets.atoms.clear() |
| depgraph_sets.atom_arg_map.clear() |
| set_atoms[root] = [] |
| non_set_atoms[root] = [] |
| |
| # We don't add set args to the digraph here since that |
| # happens at a later stage and we don't want to make |
| # any state changes here that aren't reversed by a |
| # another call to this method. |
| for arg in self._expand_set_args(args, add_to_digraph=False): |
| atom_arg_map = self._dynamic_config.sets[ |
| arg.root_config.root].atom_arg_map |
| if isinstance(arg, SetArg): |
| atom_group = set_atoms[arg.root_config.root] |
| else: |
| atom_group = non_set_atoms[arg.root_config.root] |
| |
| for atom in arg.pset.getAtoms(): |
| atom_group.append(atom) |
| atom_key = (atom, arg.root_config.root) |
| refs = atom_arg_map.get(atom_key) |
| if refs is None: |
| refs = [] |
| atom_arg_map[atom_key] = refs |
| if arg not in refs: |
| refs.append(arg) |
| |
| for root in self._dynamic_config.sets: |
| depgraph_sets = self._dynamic_config.sets[root] |
| depgraph_sets.atoms.update(chain(set_atoms.get(root, []), |
| non_set_atoms.get(root, []))) |
| depgraph_sets.sets['__non_set_args__'].update( |
| non_set_atoms.get(root, [])) |
| |
| # Invalidate the package selection cache, since |
| # arguments influence package selections. |
| self._dynamic_config._highest_pkg_cache.clear() |
| for trees in self._dynamic_config._filtered_trees.values(): |
| trees["porttree"].dbapi._clear_cache() |
| |
| def _greedy_slots(self, root_config, atom, blocker_lookahead=False): |
| """ |
| Return a list of slot atoms corresponding to installed slots that |
| differ from the slot of the highest visible match. When |
| blocker_lookahead is True, slot atoms that would trigger a blocker |
| conflict are automatically discarded, potentially allowing automatic |
| uninstallation of older slots when appropriate. |
| """ |
| highest_pkg, in_graph = self._select_package(root_config.root, atom) |
| if highest_pkg is None: |
| return [] |
| vardb = root_config.trees["vartree"].dbapi |
| slots = set() |
| for cpv in vardb.match(atom): |
| # don't mix new virtuals with old virtuals |
| if portage.cpv_getkey(cpv) == highest_pkg.cp: |
| slots.add(vardb.aux_get(cpv, ["SLOT"])[0]) |
| |
| slots.add(highest_pkg.metadata["SLOT"]) |
| if len(slots) == 1: |
| return [] |
| greedy_pkgs = [] |
| slots.remove(highest_pkg.metadata["SLOT"]) |
| while slots: |
| slot = slots.pop() |
| slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot)) |
| pkg, in_graph = self._select_package(root_config.root, slot_atom) |
| if pkg is not None and \ |
| pkg.cp == highest_pkg.cp and pkg < highest_pkg: |
| greedy_pkgs.append(pkg) |
| if not greedy_pkgs: |
| return [] |
| if not blocker_lookahead: |
| return [pkg.slot_atom for pkg in greedy_pkgs] |
| |
| blockers = {} |
| blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"] |
| for pkg in greedy_pkgs + [highest_pkg]: |
| dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys) |
| try: |
| selected_atoms = self._select_atoms( |
| pkg.root, dep_str, self._pkg_use_enabled(pkg), |
| parent=pkg, strict=True) |
| except portage.exception.InvalidDependString: |
| continue |
| blocker_atoms = [] |
| for atoms in selected_atoms.values(): |
| blocker_atoms.extend(x for x in atoms if x.blocker) |
| blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms) |
| |
| if highest_pkg not in blockers: |
| return [] |
| |
| # filter packages with invalid deps |
| greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers] |
| |
| # filter packages that conflict with highest_pkg |
| greedy_pkgs = [pkg for pkg in greedy_pkgs if not \ |
| (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \ |
| blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))] |
| |
| if not greedy_pkgs: |
| return [] |
| |
| # If two packages conflict, discard the lower version. |
| discard_pkgs = set() |
| greedy_pkgs.sort(reverse=True) |
| for i in range(len(greedy_pkgs) - 1): |
| pkg1 = greedy_pkgs[i] |
| if pkg1 in discard_pkgs: |
| continue |
| for j in range(i + 1, len(greedy_pkgs)): |
| pkg2 = greedy_pkgs[j] |
| if pkg2 in discard_pkgs: |
| continue |
| if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \ |
| blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)): |
| # pkg1 > pkg2 |
| discard_pkgs.add(pkg2) |
| |
| return [pkg.slot_atom for pkg in greedy_pkgs \ |
| if pkg not in discard_pkgs] |
| |
| def _select_atoms_from_graph(self, *pargs, **kwargs): |
| """ |
| Prefer atoms matching packages that have already been |
| added to the graph or those that are installed and have |
| not been scheduled for replacement. |
| """ |
| kwargs["trees"] = self._dynamic_config._graph_trees |
| return self._select_atoms_highest_available(*pargs, **kwargs) |
| |
| def _select_atoms_highest_available(self, root, depstring, |
| myuse=None, parent=None, strict=True, trees=None, priority=None): |
| """This will raise InvalidDependString if necessary. If trees is |
| None then self._dynamic_config._filtered_trees is used.""" |
| |
| pkgsettings = self._frozen_config.pkgsettings[root] |
| if trees is None: |
| trees = self._dynamic_config._filtered_trees |
| mytrees = trees[root] |
| atom_graph = digraph() |
| if True: |
| # Temporarily disable autounmask so that || preferences |
| # account for masking and USE settings. |
| _autounmask_backup = self._dynamic_config._autounmask |
| self._dynamic_config._autounmask = False |
| mytrees["pkg_use_enabled"] = self._pkg_use_enabled |
| try: |
| if parent is not None: |
| trees[root]["parent"] = parent |
| trees[root]["atom_graph"] = atom_graph |
| if priority is not None: |
| trees[root]["priority"] = priority |
| mycheck = portage.dep_check(depstring, None, |
| pkgsettings, myuse=myuse, |
| myroot=root, trees=trees) |
| finally: |
| self._dynamic_config._autounmask = _autounmask_backup |
| del mytrees["pkg_use_enabled"] |
| if parent is not None: |
| trees[root].pop("parent") |
| trees[root].pop("atom_graph") |
| if priority is not None: |
| trees[root].pop("priority") |
| if not mycheck[0]: |
| raise portage.exception.InvalidDependString(mycheck[1]) |
| if parent is None: |
| selected_atoms = mycheck[1] |
| elif parent not in atom_graph: |
| selected_atoms = {parent : mycheck[1]} |
| else: |
| # Recursively traversed virtual dependencies, and their |
| # direct dependencies, are considered to have the same |
| # depth as direct dependencies. |
| if parent.depth is None: |
| virt_depth = None |
| else: |
| virt_depth = parent.depth + 1 |
| chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1]) |
| selected_atoms = OrderedDict() |
| node_stack = [(parent, None, None)] |
| traversed_nodes = set() |
| while node_stack: |
| node, node_parent, parent_atom = node_stack.pop() |
| traversed_nodes.add(node) |
| if node is parent: |
| k = parent |
| else: |
| if node_parent is parent: |
| if priority is None: |
| node_priority = None |
| else: |
| node_priority = priority.copy() |
| else: |
| # virtuals only have runtime deps |
| node_priority = self._priority(runtime=True) |
| |
| k = Dependency(atom=parent_atom, |
| blocker=parent_atom.blocker, child=node, |
| depth=virt_depth, parent=node_parent, |
| priority=node_priority, root=node.root) |
| |
| child_atoms = [] |
| selected_atoms[k] = child_atoms |
| for atom_node in atom_graph.child_nodes(node): |
| child_atom = atom_node[0] |
| if id(child_atom) not in chosen_atom_ids: |
| continue |
| child_atoms.append(child_atom) |
|