| # Copyright 1999-2014 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| |
| from __future__ import division, print_function, unicode_literals |
| |
| import collections |
| import errno |
| import io |
| import logging |
| import stat |
| import sys |
| import textwrap |
| import warnings |
| from collections import deque |
| from itertools import chain |
| |
| import portage |
| from portage import os, OrderedDict |
| from portage import _unicode_decode, _unicode_encode, _encodings |
| from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS |
| from portage.dbapi import dbapi |
| from portage.dbapi.dep_expand import dep_expand |
| from portage.dbapi._similar_name_search import similar_name_search |
| from portage.dep import Atom, best_match_to_list, extract_affecting_use, \ |
| check_required_use, human_readable_required_use, match_from_list, \ |
| _repo_separator |
| from portage.dep._slot_operator import ignore_built_slot_operator_deps |
| from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \ |
| _get_eapi_attrs |
| from portage.exception import (InvalidAtom, InvalidData, InvalidDependString, |
| PackageNotFound, PortageException) |
| from portage.output import colorize, create_color_func, \ |
| darkgreen, green |
| bad = create_color_func("BAD") |
| from portage.package.ebuild.config import _get_feature_flags |
| from portage.package.ebuild.getmaskingstatus import \ |
| _getmaskingstatus, _MaskReason |
| from portage._sets import SETPREFIX |
| from portage._sets.base import InternalPackageSet |
| from portage.util import ConfigProtect, shlex_split, new_protect_filename |
| from portage.util import cmp_sort_key, writemsg, writemsg_stdout |
| from portage.util import ensure_dirs |
| from portage.util import writemsg_level, write_atomic |
| from portage.util.digraph import digraph |
| from portage.util._async.TaskScheduler import TaskScheduler |
| from portage.util._eventloop.EventLoop import EventLoop |
| from portage.util._eventloop.global_event_loop import global_event_loop |
| from portage.versions import catpkgsplit |
| |
| from _emerge.AtomArg import AtomArg |
| from _emerge.Blocker import Blocker |
| from _emerge.BlockerCache import BlockerCache |
| from _emerge.BlockerDepPriority import BlockerDepPriority |
| from .chk_updated_cfg_files import chk_updated_cfg_files |
| from _emerge.countdown import countdown |
| from _emerge.create_world_atom import create_world_atom |
| from _emerge.Dependency import Dependency |
| from _emerge.DependencyArg import DependencyArg |
| from _emerge.DepPriority import DepPriority |
| from _emerge.DepPriorityNormalRange import DepPriorityNormalRange |
| from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
| from _emerge.EbuildMetadataPhase import EbuildMetadataPhase |
| from _emerge.FakeVartree import FakeVartree |
| from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps |
| from _emerge.is_valid_package_atom import insert_category_into_atom, \ |
| is_valid_package_atom |
| from _emerge.Package import Package |
| from _emerge.PackageArg import PackageArg |
| from _emerge.PackageVirtualDbapi import PackageVirtualDbapi |
| from _emerge.RootConfig import RootConfig |
| from _emerge.search import search |
| from _emerge.SetArg import SetArg |
| from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice |
| from _emerge.UnmergeDepPriority import UnmergeDepPriority |
| from _emerge.UseFlagDisplay import pkg_use_display |
| from _emerge.UserQuery import UserQuery |
| |
| from _emerge.resolver.backtracking import Backtracker, BacktrackParameter |
| from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper |
| from _emerge.resolver.slot_collision import slot_conflict_handler |
| from _emerge.resolver.circular_dependency import circular_dependency_handler |
| from _emerge.resolver.output import Display, format_unmatched_atom |
| |
| if sys.hexversion >= 0x3000000: |
| basestring = str |
| long = int |
| _unicode = str |
| else: |
| _unicode = unicode |
| |
| class _scheduler_graph_config(object): |
| def __init__(self, trees, pkg_cache, graph, mergelist): |
| self.trees = trees |
| self.pkg_cache = pkg_cache |
| self.graph = graph |
| self.mergelist = mergelist |
| |
| def _wildcard_set(atoms): |
| pkgs = InternalPackageSet(allow_wildcard=True) |
| for x in atoms: |
| try: |
| x = Atom(x, allow_wildcard=True, allow_repo=False) |
| except portage.exception.InvalidAtom: |
| x = Atom("*/" + x, allow_wildcard=True, allow_repo=False) |
| pkgs.add(x) |
| return pkgs |
| |
| class _frozen_depgraph_config(object): |
| |
| def __init__(self, settings, trees, myopts, spinner): |
| self.settings = settings |
| self.target_root = settings["EROOT"] |
| self.myopts = myopts |
| self.edebug = 0 |
| if settings.get("PORTAGE_DEBUG", "") == "1": |
| self.edebug = 1 |
| self.spinner = spinner |
| self._running_root = trees[trees._running_eroot]["root_config"] |
| self.pkgsettings = {} |
| self.trees = {} |
| self._trees_orig = trees |
| self.roots = {} |
| # All Package instances |
| self._pkg_cache = {} |
| self._highest_license_masked = {} |
| dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" |
| ignore_built_slot_operator_deps = myopts.get( |
| "--ignore-built-slot-operator-deps", "n") == "y" |
| for myroot in trees: |
| self.trees[myroot] = {} |
| # Create a RootConfig instance that references |
| # the FakeVartree instead of the real one. |
| self.roots[myroot] = RootConfig( |
| trees[myroot]["vartree"].settings, |
| self.trees[myroot], |
| trees[myroot]["root_config"].setconfig) |
| for tree in ("porttree", "bintree"): |
| self.trees[myroot][tree] = trees[myroot][tree] |
| self.trees[myroot]["vartree"] = \ |
| FakeVartree(trees[myroot]["root_config"], |
| pkg_cache=self._pkg_cache, |
| pkg_root_config=self.roots[myroot], |
| dynamic_deps=dynamic_deps, |
| ignore_built_slot_operator_deps=ignore_built_slot_operator_deps) |
| self.pkgsettings[myroot] = portage.config( |
| clone=self.trees[myroot]["vartree"].settings) |
| |
| self._required_set_names = set(["world"]) |
| |
| atoms = ' '.join(myopts.get("--exclude", [])).split() |
| self.excluded_pkgs = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split() |
| self.reinstall_atoms = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split() |
| self.usepkg_exclude = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split() |
| self.useoldpkg_atoms = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split() |
| self.rebuild_exclude = _wildcard_set(atoms) |
| atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split() |
| self.rebuild_ignore = _wildcard_set(atoms) |
| |
| self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts |
| self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts |
| self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts |
| |
| class _depgraph_sets(object): |
| def __init__(self): |
| # contains all sets added to the graph |
| self.sets = {} |
| # contains non-set atoms given as arguments |
| self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True) |
| # contains all atoms from all sets added to the graph, including |
| # atoms given as arguments |
| self.atoms = InternalPackageSet(allow_repo=True) |
| self.atom_arg_map = {} |
| |
| class _rebuild_config(object): |
| def __init__(self, frozen_config, backtrack_parameters): |
| self._graph = digraph() |
| self._frozen_config = frozen_config |
| self.rebuild_list = backtrack_parameters.rebuild_list.copy() |
| self.orig_rebuild_list = self.rebuild_list.copy() |
| self.reinstall_list = backtrack_parameters.reinstall_list.copy() |
| self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev |
| self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver |
| self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt |
| self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or |
| self.rebuild_if_unbuilt) |
| |
| def add(self, dep_pkg, dep): |
| parent = dep.collapsed_parent |
| priority = dep.collapsed_priority |
| rebuild_exclude = self._frozen_config.rebuild_exclude |
| rebuild_ignore = self._frozen_config.rebuild_ignore |
| if (self.rebuild and isinstance(parent, Package) and |
| parent.built and priority.buildtime and |
| isinstance(dep_pkg, Package) and |
| not rebuild_exclude.findAtomForPackage(parent) and |
| not rebuild_ignore.findAtomForPackage(dep_pkg)): |
| self._graph.add(dep_pkg, parent, priority) |
| |
| def _needs_rebuild(self, dep_pkg): |
| """Check whether packages that depend on dep_pkg need to be rebuilt.""" |
| dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom) |
| if dep_pkg.built or dep_root_slot in self.orig_rebuild_list: |
| return False |
| |
| if self.rebuild_if_unbuilt: |
| # dep_pkg is being installed from source, so binary |
| # packages for parents are invalid. Force rebuild |
| return True |
| |
| trees = self._frozen_config.trees |
| vardb = trees[dep_pkg.root]["vartree"].dbapi |
| if self.rebuild_if_new_rev: |
| # Parent packages are valid if a package with the same |
| # cpv is already installed. |
| return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom) |
| |
| # Otherwise, parent packages are valid if a package with the same |
| # version (excluding revision) is already installed. |
| assert self.rebuild_if_new_ver |
| cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] |
| for inst_cpv in vardb.match(dep_pkg.slot_atom): |
| inst_cpv_norev = catpkgsplit(inst_cpv)[:-1] |
| if inst_cpv_norev == cpv_norev: |
| return False |
| |
| return True |
| |
| def _trigger_rebuild(self, parent, build_deps): |
| root_slot = (parent.root, parent.slot_atom) |
| if root_slot in self.rebuild_list: |
| return False |
| trees = self._frozen_config.trees |
| reinstall = False |
| for slot_atom, dep_pkg in build_deps.items(): |
| dep_root_slot = (dep_pkg.root, slot_atom) |
| if self._needs_rebuild(dep_pkg): |
| self.rebuild_list.add(root_slot) |
| return True |
| elif ("--usepkg" in self._frozen_config.myopts and |
| (dep_root_slot in self.reinstall_list or |
| dep_root_slot in self.rebuild_list or |
| not dep_pkg.installed)): |
| |
| # A direct rebuild dependency is being installed. We |
| # should update the parent as well to the latest binary, |
| # if that binary is valid. |
| # |
| # To validate the binary, we check whether all of the |
| # rebuild dependencies are present on the same binhost. |
| # |
| # 1) If parent is present on the binhost, but one of its |
| # rebuild dependencies is not, then the parent should |
| # be rebuilt from source. |
| # 2) Otherwise, the parent binary is assumed to be valid, |
| # because all of its rebuild dependencies are |
| # consistent. |
| bintree = trees[parent.root]["bintree"] |
| uri = bintree.get_pkgindex_uri(parent.cpv) |
| dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv) |
| bindb = bintree.dbapi |
| if self.rebuild_if_new_ver and uri and uri != dep_uri: |
| cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1] |
| for cpv in bindb.match(dep_pkg.slot_atom): |
| if cpv_norev == catpkgsplit(cpv)[:-1]: |
| dep_uri = bintree.get_pkgindex_uri(cpv) |
| if uri == dep_uri: |
| break |
| if uri and uri != dep_uri: |
| # 1) Remote binary package is invalid because it was |
| # built without dep_pkg. Force rebuild. |
| self.rebuild_list.add(root_slot) |
| return True |
| elif (parent.installed and |
| root_slot not in self.reinstall_list): |
| try: |
| bin_build_time, = bindb.aux_get(parent.cpv, |
| ["BUILD_TIME"]) |
| except KeyError: |
| continue |
| if bin_build_time != _unicode(parent.build_time): |
| # 2) Remote binary package is valid, and local package |
| # is not up to date. Force reinstall. |
| reinstall = True |
| if reinstall: |
| self.reinstall_list.add(root_slot) |
| return reinstall |
| |
| def trigger_rebuilds(self): |
| """ |
| Trigger rebuilds where necessary. If pkgA has been updated, and pkgB |
| depends on pkgA at both build-time and run-time, pkgB needs to be |
| rebuilt. |
| """ |
| need_restart = False |
| graph = self._graph |
| build_deps = {} |
| |
| leaf_nodes = deque(graph.leaf_nodes()) |
| |
| # Trigger rebuilds bottom-up (starting with the leaves) so that parents |
| # will always know which children are being rebuilt. |
| while graph: |
| if not leaf_nodes: |
| # We'll have to drop an edge. This should be quite rare. |
| leaf_nodes.append(graph.order[-1]) |
| |
| node = leaf_nodes.popleft() |
| if node not in graph: |
| # This can be triggered by circular dependencies. |
| continue |
| slot_atom = node.slot_atom |
| |
| # Remove our leaf node from the graph, keeping track of deps. |
| parents = graph.parent_nodes(node) |
| graph.remove(node) |
| node_build_deps = build_deps.get(node, {}) |
| for parent in parents: |
| if parent == node: |
| # Ignore a direct cycle. |
| continue |
| parent_bdeps = build_deps.setdefault(parent, {}) |
| parent_bdeps[slot_atom] = node |
| if not graph.child_nodes(parent): |
| leaf_nodes.append(parent) |
| |
| # Trigger rebuilds for our leaf node. Because all of our children |
| # have been processed, the build_deps will be completely filled in, |
| # and self.rebuild_list / self.reinstall_list will tell us whether |
| # any of our children need to be rebuilt or reinstalled. |
| if self._trigger_rebuild(node, node_build_deps): |
| need_restart = True |
| |
| return need_restart |
| |
| |
| class _dynamic_depgraph_config(object): |
| |
| def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters): |
| self.myparams = myparams.copy() |
| self._vdb_loaded = False |
| self._allow_backtracking = allow_backtracking |
| # Maps nodes to the reasons they were selected for reinstallation. |
| self._reinstall_nodes = {} |
| # Contains a filtered view of preferred packages that are selected |
| # from available repositories. |
| self._filtered_trees = {} |
| # Contains installed packages and new packages that have been added |
| # to the graph. |
| self._graph_trees = {} |
| # Caches visible packages returned from _select_package, for use in |
| # depgraph._iter_atoms_for_pkg() SLOT logic. |
| self._visible_pkgs = {} |
| #contains the args created by select_files |
| self._initial_arg_list = [] |
| self.digraph = portage.digraph() |
| # manages sets added to the graph |
| self.sets = {} |
| # contains all nodes pulled in by self.sets |
| self._set_nodes = set() |
| # Contains only Blocker -> Uninstall edges |
| self._blocker_uninstalls = digraph() |
| # Contains only Package -> Blocker edges |
| self._blocker_parents = digraph() |
| # Contains only irrelevant Package -> Blocker edges |
| self._irrelevant_blockers = digraph() |
| # Contains only unsolvable Package -> Blocker edges |
| self._unsolvable_blockers = digraph() |
| # Contains all Blocker -> Blocked Package edges |
| self._blocked_pkgs = digraph() |
| # Contains world packages that have been protected from |
| # uninstallation but may not have been added to the graph |
| # if the graph is not complete yet. |
| self._blocked_world_pkgs = {} |
| # Contains packages whose dependencies have been traversed. |
| # This use used to check if we have accounted for blockers |
| # relevant to a package. |
| self._traversed_pkg_deps = set() |
| self._parent_atoms = {} |
| self._slot_conflict_handler = None |
| self._circular_dependency_handler = None |
| self._serialized_tasks_cache = None |
| self._scheduler_graph = None |
| self._displayed_list = None |
| self._pprovided_args = [] |
| self._missing_args = [] |
| self._masked_installed = set() |
| self._masked_license_updates = set() |
| self._unsatisfied_deps_for_display = [] |
| self._unsatisfied_blockers_for_display = None |
| self._circular_deps_for_display = None |
| self._dep_stack = [] |
| self._dep_disjunctive_stack = [] |
| self._unsatisfied_deps = [] |
| self._initially_unsatisfied_deps = [] |
| self._ignored_deps = [] |
| self._highest_pkg_cache = {} |
| |
| # Binary packages that have been rejected because their USE |
| # didn't match the user's config. It maps packages to a set |
| # of flags causing the rejection. |
| self.ignored_binaries = {} |
| |
| self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords |
| self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes |
| self._needed_license_changes = backtrack_parameters.needed_license_changes |
| self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes |
| self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask |
| self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed |
| self._prune_rebuilds = backtrack_parameters.prune_rebuilds |
| self._need_restart = False |
| # For conditions that always require user intervention, such as |
| # unsatisfied REQUIRED_USE (currently has no autounmask support). |
| self._skip_restart = False |
| self._backtrack_infos = {} |
| |
| self._buildpkgonly_deps_unsatisfied = False |
| self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n' |
| self._success_without_autounmask = False |
| self._traverse_ignored_deps = False |
| self._complete_mode = False |
| self._slot_operator_deps = {} |
| self._package_tracker = PackageTracker() |
| # Track missed updates caused by solved conflicts. |
| self._conflict_missed_update = collections.defaultdict(dict) |
| |
| for myroot in depgraph._frozen_config.trees: |
| self.sets[myroot] = _depgraph_sets() |
| vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi |
| # This dbapi instance will model the state that the vdb will |
| # have after new packages have been installed. |
| fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker) |
| |
| def graph_tree(): |
| pass |
| graph_tree.dbapi = fakedb |
| self._graph_trees[myroot] = {} |
| self._filtered_trees[myroot] = {} |
| # Substitute the graph tree for the vartree in dep_check() since we |
| # want atom selections to be consistent with package selections |
| # have already been made. |
| self._graph_trees[myroot]["porttree"] = graph_tree |
| self._graph_trees[myroot]["vartree"] = graph_tree |
| self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi |
| self._graph_trees[myroot]["graph"] = self.digraph |
| self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg |
| def filtered_tree(): |
| pass |
| filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot) |
| self._filtered_trees[myroot]["porttree"] = filtered_tree |
| self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings) |
| |
| # Passing in graph_tree as the vartree here could lead to better |
| # atom selections in some cases by causing atoms for packages that |
| # have been added to the graph to be preferred over other choices. |
| # However, it can trigger atom selections that result in |
| # unresolvable direct circular dependencies. For example, this |
| # happens with gwydion-dylan which depends on either itself or |
| # gwydion-dylan-bin. In case gwydion-dylan is not yet installed, |
| # gwydion-dylan-bin needs to be selected in order to avoid a |
| # an unresolvable direct circular dependency. |
| # |
| # To solve the problem described above, pass in "graph_db" so that |
| # packages that have been added to the graph are distinguishable |
| # from other available packages and installed packages. Also, pass |
| # the parent package into self._select_atoms() calls so that |
| # unresolvable direct circular dependencies can be detected and |
| # avoided when possible. |
| self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi |
| self._filtered_trees[myroot]["graph"] = self.digraph |
| self._filtered_trees[myroot]["vartree"] = \ |
| depgraph._frozen_config.trees[myroot]["vartree"] |
| self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg |
| |
| dbs = [] |
| # (db, pkg_type, built, installed, db_keys) |
| if "remove" in self.myparams: |
| # For removal operations, use _dep_check_composite_db |
| # for availability and visibility checks. This provides |
| # consistency with install operations, so we don't |
| # get install/uninstall cycles like in bug #332719. |
| self._graph_trees[myroot]["porttree"] = filtered_tree |
| else: |
| if "--usepkgonly" not in depgraph._frozen_config.myopts: |
| portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi |
| db_keys = list(portdb._aux_cache_keys) |
| dbs.append((portdb, "ebuild", False, False, db_keys)) |
| |
| if "--usepkg" in depgraph._frozen_config.myopts: |
| bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi |
| db_keys = list(bindb._aux_cache_keys) |
| dbs.append((bindb, "binary", True, False, db_keys)) |
| |
| vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi |
| db_keys = list(depgraph._frozen_config._trees_orig[myroot |
| ]["vartree"].dbapi._aux_cache_keys) |
| dbs.append((vardb, "installed", True, True, db_keys)) |
| self._filtered_trees[myroot]["dbs"] = dbs |
| |
| class depgraph(object): |
| |
| pkg_tree_map = RootConfig.pkg_tree_map |
| |
| def __init__(self, settings, trees, myopts, myparams, spinner, |
| frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False): |
| if frozen_config is None: |
| frozen_config = _frozen_depgraph_config(settings, trees, |
| myopts, spinner) |
| self._frozen_config = frozen_config |
| self._dynamic_config = _dynamic_depgraph_config(self, myparams, |
| allow_backtracking, backtrack_parameters) |
| self._rebuild = _rebuild_config(frozen_config, backtrack_parameters) |
| |
| self._select_atoms = self._select_atoms_highest_available |
| self._select_package = self._select_pkg_highest_available |
| |
| self._event_loop = (portage._internal_caller and |
| global_event_loop() or EventLoop(main=False)) |
| |
| self.query = UserQuery(myopts).query |
| |
| def _load_vdb(self): |
| """ |
| Load installed package metadata if appropriate. This used to be called |
| from the constructor, but that wasn't very nice since this procedure |
| is slow and it generates spinner output. So, now it's called on-demand |
| by various methods when necessary. |
| """ |
| |
| if self._dynamic_config._vdb_loaded: |
| return |
| |
| for myroot in self._frozen_config.trees: |
| |
| dynamic_deps = self._dynamic_config.myparams.get( |
| "dynamic_deps", "y") != "n" |
| preload_installed_pkgs = \ |
| "--nodeps" not in self._frozen_config.myopts |
| |
| fake_vartree = self._frozen_config.trees[myroot]["vartree"] |
| if not fake_vartree.dbapi: |
| # This needs to be called for the first depgraph, but not for |
| # backtracking depgraphs that share the same frozen_config. |
| fake_vartree.sync() |
| |
| # FakeVartree.sync() populates virtuals, and we want |
| # self.pkgsettings to have them populated too. |
| self._frozen_config.pkgsettings[myroot] = \ |
| portage.config(clone=fake_vartree.settings) |
| |
| if preload_installed_pkgs: |
| vardb = fake_vartree.dbapi |
| |
| if not dynamic_deps: |
| for pkg in vardb: |
| self._dynamic_config._package_tracker.add_installed_pkg(pkg) |
| else: |
| max_jobs = self._frozen_config.myopts.get("--jobs") |
| max_load = self._frozen_config.myopts.get("--load-average") |
| scheduler = TaskScheduler( |
| self._dynamic_deps_preload(fake_vartree), |
| max_jobs=max_jobs, |
| max_load=max_load, |
| event_loop=fake_vartree._portdb._event_loop) |
| scheduler.start() |
| scheduler.wait() |
| |
| self._dynamic_config._vdb_loaded = True |
| |
| def _dynamic_deps_preload(self, fake_vartree): |
| portdb = fake_vartree._portdb |
| for pkg in fake_vartree.dbapi: |
| self._spinner_update() |
| self._dynamic_config._package_tracker.add_installed_pkg(pkg) |
| ebuild_path, repo_path = \ |
| portdb.findname2(pkg.cpv, myrepo=pkg.repo) |
| if ebuild_path is None: |
| fake_vartree.dynamic_deps_preload(pkg, None) |
| continue |
| metadata, ebuild_hash = portdb._pull_valid_cache( |
| pkg.cpv, ebuild_path, repo_path) |
| if metadata is not None: |
| fake_vartree.dynamic_deps_preload(pkg, metadata) |
| else: |
| proc = EbuildMetadataPhase(cpv=pkg.cpv, |
| ebuild_hash=ebuild_hash, |
| portdb=portdb, repo_path=repo_path, |
| settings=portdb.doebuild_settings) |
| proc.addExitListener( |
| self._dynamic_deps_proc_exit(pkg, fake_vartree)) |
| yield proc |
| |
| class _dynamic_deps_proc_exit(object): |
| |
| __slots__ = ('_pkg', '_fake_vartree') |
| |
| def __init__(self, pkg, fake_vartree): |
| self._pkg = pkg |
| self._fake_vartree = fake_vartree |
| |
| def __call__(self, proc): |
| metadata = None |
| if proc.returncode == os.EX_OK: |
| metadata = proc.metadata |
| self._fake_vartree.dynamic_deps_preload(self._pkg, metadata) |
| |
| def _spinner_update(self): |
| if self._frozen_config.spinner: |
| self._frozen_config.spinner.update() |
| |
| def _compute_abi_rebuild_info(self): |
| """ |
| Fill self._forced_rebuilds with packages that cause rebuilds. |
| """ |
| |
| debug = "--debug" in self._frozen_config.myopts |
| |
| # Get all atoms that might have caused a forced rebuild. |
| atoms = {} |
| for s in self._dynamic_config._initial_arg_list: |
| if s.force_reinstall: |
| root = s.root_config.root |
| atoms.setdefault(root, set()).update(s.pset) |
| |
| if debug: |
| writemsg_level("forced reinstall atoms:\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| for root in atoms: |
| writemsg_level(" root: %s\n" % root, |
| level=logging.DEBUG, noiselevel=-1) |
| for atom in atoms[root]: |
| writemsg_level(" atom: %s\n" % atom, |
| level=logging.DEBUG, noiselevel=-1) |
| writemsg_level("\n\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| # Go through all slot operator deps and check if one of these deps |
| # has a parent that is matched by one of the atoms from above. |
| forced_rebuilds = {} |
| for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items(): |
| rebuild_atoms = atoms.get(root, set()) |
| |
| for dep in deps: |
| if not isinstance(dep.parent, Package): |
| continue |
| |
| if dep.parent.installed or dep.child.installed or \ |
| dep.parent.slot_atom not in rebuild_atoms: |
| continue |
| |
| # Make sure the child's slot/subslot has changed. If it hasn't, |
| # then another child has forced this rebuild. |
| installed_pkg = self._select_pkg_from_installed(root, dep.child.slot_atom)[0] |
| if installed_pkg and installed_pkg.slot == dep.child.slot and \ |
| installed_pkg.sub_slot == dep.child.sub_slot: |
| continue |
| |
| # The child has forced a rebuild of the parent |
| forced_rebuilds.setdefault(root, {}).setdefault(dep.child, set()).add(dep.parent) |
| |
| if debug: |
| writemsg_level("slot operator dependencies:\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items(): |
| writemsg_level(" (%s, %s)\n" % \ |
| (root, slot_atom), level=logging.DEBUG, noiselevel=-1) |
| for dep in deps: |
| writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1) |
| writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1) |
| |
| writemsg_level("\n\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| |
| writemsg_level("forced rebuilds:\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| for root in forced_rebuilds: |
| writemsg_level(" root: %s\n" % root, |
| level=logging.DEBUG, noiselevel=-1) |
| for child in forced_rebuilds[root]: |
| writemsg_level(" child: %s\n" % child, |
| level=logging.DEBUG, noiselevel=-1) |
| for parent in forced_rebuilds[root][child]: |
| writemsg_level(" parent: %s\n" % parent, |
| level=logging.DEBUG, noiselevel=-1) |
| writemsg_level("\n\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| self._forced_rebuilds = forced_rebuilds |
| |
| def _show_abi_rebuild_info(self): |
| |
| if not self._forced_rebuilds: |
| return |
| |
| writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1) |
| |
| for root in self._forced_rebuilds: |
| for child in self._forced_rebuilds[root]: |
| writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1) |
| for parent in self._forced_rebuilds[root][child]: |
| writemsg_stdout(" %s\n" % (parent,), noiselevel=-1) |
| |
| def _show_ignored_binaries(self): |
| """ |
| Show binaries that have been ignored because their USE didn't |
| match the user's config. |
| """ |
| if not self._dynamic_config.ignored_binaries \ |
| or '--quiet' in self._frozen_config.myopts \ |
| or self._dynamic_config.myparams.get( |
| "binpkg_respect_use") in ("y", "n"): |
| return |
| |
| for pkg in list(self._dynamic_config.ignored_binaries): |
| |
| selected_pkg = list() |
| |
| for selected_pkg in self._dynamic_config._package_tracker.match( |
| pkg.root, pkg.slot_atom): |
| |
| if selected_pkg > pkg: |
| self._dynamic_config.ignored_binaries.pop(pkg) |
| break |
| |
| if selected_pkg.installed and \ |
| selected_pkg.cpv == pkg.cpv and \ |
| selected_pkg.build_time == pkg.build_time: |
| # We don't care about ignored binaries when an |
| # identical installed instance is selected to |
| # fill the slot. |
| self._dynamic_config.ignored_binaries.pop(pkg) |
| break |
| |
| if not self._dynamic_config.ignored_binaries: |
| return |
| |
| self._show_merge_list() |
| |
| writemsg("\n!!! The following binary packages have been ignored " + \ |
| "due to non matching USE:\n\n", noiselevel=-1) |
| |
| for pkg, flags in self._dynamic_config.ignored_binaries.items(): |
| flag_display = [] |
| for flag in sorted(flags): |
| if flag not in pkg.use.enabled: |
| flag = "-" + flag |
| flag_display.append(flag) |
| flag_display = " ".join(flag_display) |
| # The user can paste this line into package.use |
| writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1) |
| if pkg.root_config.settings["ROOT"] != "/": |
| writemsg(" # for %s" % (pkg.root,), noiselevel=-1) |
| writemsg("\n", noiselevel=-1) |
| |
| msg = [ |
| "", |
| "NOTE: The --binpkg-respect-use=n option will prevent emerge", |
| " from ignoring these binary packages if possible.", |
| " Using --binpkg-respect-use=y will silence this warning." |
| ] |
| |
| for line in msg: |
| if line: |
| line = colorize("INFORM", line) |
| writemsg(line + "\n", noiselevel=-1) |
| |
| def _get_missed_updates(self): |
| |
| # In order to minimize noise, show only the highest |
| # missed update from each SLOT. |
| missed_updates = {} |
| for pkg, mask_reasons in \ |
| chain(self._dynamic_config._runtime_pkg_mask.items(), |
| self._dynamic_config._conflict_missed_update.items()): |
| if pkg.installed: |
| # Exclude installed here since we only |
| # want to show available updates. |
| continue |
| missed_update = True |
| any_selected = False |
| for chosen_pkg in self._dynamic_config._package_tracker.match( |
| pkg.root, pkg.slot_atom): |
| any_selected = True |
| if chosen_pkg > pkg or (not chosen_pkg.installed and \ |
| chosen_pkg.version == pkg.version): |
| missed_update = False |
| break |
| if any_selected and missed_update: |
| k = (pkg.root, pkg.slot_atom) |
| if k in missed_updates: |
| other_pkg, mask_type, parent_atoms = missed_updates[k] |
| if other_pkg > pkg: |
| continue |
| for mask_type, parent_atoms in mask_reasons.items(): |
| if not parent_atoms: |
| continue |
| missed_updates[k] = (pkg, mask_type, parent_atoms) |
| break |
| |
| return missed_updates |
| |
| def _show_missed_update(self): |
| |
| missed_updates = self._get_missed_updates() |
| |
| if not missed_updates: |
| return |
| |
| missed_update_types = {} |
| for pkg, mask_type, parent_atoms in missed_updates.values(): |
| missed_update_types.setdefault(mask_type, |
| []).append((pkg, parent_atoms)) |
| |
| if '--quiet' in self._frozen_config.myopts and \ |
| '--debug' not in self._frozen_config.myopts: |
| missed_update_types.pop("slot conflict", None) |
| missed_update_types.pop("missing dependency", None) |
| |
| self._show_missed_update_slot_conflicts( |
| missed_update_types.get("slot conflict")) |
| |
| self._show_missed_update_unsatisfied_dep( |
| missed_update_types.get("missing dependency")) |
| |
| def _show_missed_update_unsatisfied_dep(self, missed_updates): |
| |
| if not missed_updates: |
| return |
| |
| self._show_merge_list() |
| backtrack_masked = [] |
| |
| for pkg, parent_atoms in missed_updates: |
| |
| try: |
| for parent, root, atom in parent_atoms: |
| self._show_unsatisfied_dep(root, atom, myparent=parent, |
| check_backtrack=True) |
| except self._backtrack_mask: |
| # This is displayed below in abbreviated form. |
| backtrack_masked.append((pkg, parent_atoms)) |
| continue |
| |
| writemsg("\n!!! The following update has been skipped " + \ |
| "due to unsatisfied dependencies:\n\n", noiselevel=-1) |
| |
| writemsg(str(pkg.slot_atom), noiselevel=-1) |
| if pkg.root_config.settings["ROOT"] != "/": |
| writemsg(" for %s" % (pkg.root,), noiselevel=-1) |
| writemsg("\n", noiselevel=-1) |
| |
| for parent, root, atom in parent_atoms: |
| self._show_unsatisfied_dep(root, atom, myparent=parent) |
| writemsg("\n", noiselevel=-1) |
| |
| if backtrack_masked: |
| # These are shown in abbreviated form, in order to avoid terminal |
| # flooding from mask messages as reported in bug #285832. |
| writemsg("\n!!! The following update(s) have been skipped " + \ |
| "due to unsatisfied dependencies\n" + \ |
| "!!! triggered by backtracking:\n\n", noiselevel=-1) |
| for pkg, parent_atoms in backtrack_masked: |
| writemsg(str(pkg.slot_atom), noiselevel=-1) |
| if pkg.root_config.settings["ROOT"] != "/": |
| writemsg(" for %s" % (pkg.root,), noiselevel=-1) |
| writemsg("\n", noiselevel=-1) |
| |
| def _show_missed_update_slot_conflicts(self, missed_updates): |
| |
| if not missed_updates: |
| return |
| |
| self._show_merge_list() |
| msg = [] |
| msg.append("\nWARNING: One or more updates/rebuilds have been " + \ |
| "skipped due to a dependency conflict:\n\n") |
| |
| indent = " " |
| for pkg, parent_atoms in missed_updates: |
| msg.append(str(pkg.slot_atom)) |
| if pkg.root_config.settings["ROOT"] != "/": |
| msg.append(" for %s" % (pkg.root,)) |
| msg.append("\n\n") |
| |
| msg.append(indent) |
| msg.append(str(pkg)) |
| msg.append(" conflicts with\n") |
| |
| for parent, atom in parent_atoms: |
| if isinstance(parent, |
| (PackageArg, AtomArg)): |
| # For PackageArg and AtomArg types, it's |
| # redundant to display the atom attribute. |
| msg.append(2*indent) |
| msg.append(str(parent)) |
| msg.append("\n") |
| else: |
| # Display the specific atom from SetArg or |
| # Package types. |
| atom, marker = format_unmatched_atom( |
| pkg, atom, self._pkg_use_enabled) |
| |
| msg.append(2*indent) |
| msg.append("%s required by %s\n" % (atom, parent)) |
| msg.append(2*indent) |
| msg.append(marker) |
| msg.append("\n") |
| msg.append("\n") |
| |
| writemsg("".join(msg), noiselevel=-1) |
| |
| def _show_slot_collision_notice(self): |
| """Show an informational message advising the user to mask one of the |
| the packages. In some cases it may be possible to resolve this |
| automatically, but support for backtracking (removal nodes that have |
| already been selected) will be required in order to handle all possible |
| cases. |
| """ |
| |
| if not any(self._dynamic_config._package_tracker.slot_conflicts()): |
| return |
| |
| self._show_merge_list() |
| |
| self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self) |
| handler = self._dynamic_config._slot_conflict_handler |
| |
| conflict = handler.get_conflict() |
| writemsg(conflict, noiselevel=-1) |
| |
| explanation = handler.get_explanation() |
| if explanation: |
| writemsg(explanation, noiselevel=-1) |
| return |
| |
| if "--quiet" in self._frozen_config.myopts: |
| return |
| |
| msg = [] |
| msg.append("It may be possible to solve this problem ") |
| msg.append("by using package.mask to prevent one of ") |
| msg.append("those packages from being selected. ") |
| msg.append("However, it is also possible that conflicting ") |
| msg.append("dependencies exist such that they are impossible to ") |
| msg.append("satisfy simultaneously. If such a conflict exists in ") |
| msg.append("the dependencies of two different packages, then those ") |
| msg.append("packages can not be installed simultaneously.") |
| backtrack_opt = self._frozen_config.myopts.get('--backtrack') |
| if not self._dynamic_config._allow_backtracking and \ |
| (backtrack_opt is None or \ |
| (backtrack_opt > 0 and backtrack_opt < 30)): |
| msg.append(" You may want to try a larger value of the ") |
| msg.append("--backtrack option, such as --backtrack=30, ") |
| msg.append("in order to see if that will solve this conflict ") |
| msg.append("automatically.") |
| |
| for line in textwrap.wrap(''.join(msg), 70): |
| writemsg(line + '\n', noiselevel=-1) |
| writemsg('\n', noiselevel=-1) |
| |
| msg = [] |
| msg.append("For more information, see MASKED PACKAGES ") |
| msg.append("section in the emerge man page or refer ") |
| msg.append("to the Gentoo Handbook.") |
| for line in textwrap.wrap(''.join(msg), 70): |
| writemsg(line + '\n', noiselevel=-1) |
| writemsg('\n', noiselevel=-1) |
| |
| def _solve_non_slot_operator_slot_conflicts(self): |
| """ |
| This function solves slot conflicts which can |
| be solved by simply choosing one of the conflicting |
| and removing all the other ones. |
| It is able to solve somewhat more complex cases where |
| conflicts can only be solved simultaniously. |
| """ |
| debug = "--debug" in self._frozen_config.myopts |
| |
| # List all conflicts. Ignore those that involve slot operator rebuilds |
| # as the logic there needs special slot conflict behavior which isn't |
| # provided by this function. |
| conflicts = [] |
| for conflict in self._dynamic_config._package_tracker.slot_conflicts(): |
| slot_key = conflict.root, conflict.atom |
| if slot_key not in self._dynamic_config._slot_operator_replace_installed: |
| conflicts.append(conflict) |
| |
| if not conflicts: |
| return |
| |
| if debug: |
| writemsg_level( |
| "\n!!! Slot conflict handler started.\n", |
| level=logging.DEBUG, noiselevel=-1) |
| |
| # Get a set of all conflicting packages. |
| conflict_pkgs = set() |
| for conflict in conflicts: |
| conflict_pkgs.update(conflict) |
| |
| # Get the list of other packages which are only |
| # required by conflict packages. |
| indirect_conflict_candidates = set() |
| for pkg in conflict_pkgs: |
| indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg)) |
| indirect_conflict_candidates.difference_update(conflict_pkgs) |
| |
| indirect_conflict_pkgs = set() |
| while indirect_conflict_candidates: |
| pkg = indirect_conflict_candidates.pop() |
| |
| only_conflict_parents = True |
| for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): |
| if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs: |
| only_conflict_parents = False |
| break |
| if not only_conflict_parents: |
| continue |
| |
| indirect_conflict_pkgs.add(pkg) |
| for child in self._dynamic_config.digraph.child_nodes(pkg): |
| if child in conflict_pkgs or child in indirect_conflict_pkgs: |
| continue |
| indirect_conflict_candidates.add(child) |
| |
| # Create a graph containing the conflict packages |
| # and a special 'non_conflict_node' that represents |
| # all non-conflict packages. |
| conflict_graph = digraph() |
| |
| non_conflict_node = "(non-conflict package)" |
| conflict_graph.add(non_conflict_node, None) |
| |
| for pkg in chain(conflict_pkgs, indirect_conflict_pkgs): |
| conflict_graph.add(pkg, None) |
| |
| # Add parent->child edges for each conflict package. |
| # Parents, which aren't conflict packages are represented |
| # by 'non_conflict_node'. |
| # If several conflicting packages are matched, but not all, |
| # add a tuple with the matched packages to the graph. |
| class or_tuple(tuple): |
| """ |
| Helper class for debug printing. |
| """ |
| def __str__(self): |
| return "(%s)" % ",".join(str(pkg) for pkg in self) |
| |
| for conflict in conflicts: |
| if debug: |
| writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1) |
| writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1) |
| writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1) |
| for pkg in conflict: |
| writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1) |
| |
| all_parent_atoms = set() |
| for pkg in conflict: |
| all_parent_atoms.update( |
| self._dynamic_config._parent_atoms.get(pkg, [])) |
| |
| for parent, atom in all_parent_atoms: |
| is_arg_parent = isinstance(parent, AtomArg) |
| is_non_conflict_parent = parent not in conflict_pkgs and \ |
| parent not in indirect_conflict_pkgs |
| |
| if debug: |
| writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1) |
| writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent), |
| level=logging.DEBUG, noiselevel=-1) |
| writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1) |
| |
| if is_non_conflict_parent: |
| parent = non_conflict_node |
| |
| atom_set = InternalPackageSet( |
| initial_atoms=(atom,), allow_repo=True) |
| |
| matched = [] |
| for pkg in conflict: |
| if atom_set.findAtomForPackage(pkg, \ |
| modified_use=self._pkg_use_enabled(pkg)) and \ |
| not (is_arg_parent and pkg.installed): |
| matched.append(pkg) |
| |
| if debug: |
| for match in matched: |
| writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1) |
| |
| if len(matched) == len(conflict): |
| # All packages match. |
| continue |
| elif len(matched) == 1: |
| conflict_graph.add(matched[0], parent) |
| else: |
| # More than one packages matched, but not all. |
| conflict_graph.add(or_tuple(matched), parent) |
| |
| for pkg in indirect_conflict_pkgs: |
| for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): |
| if parent not in conflict_pkgs and \ |
| parent not in indirect_conflict_pkgs: |
| parent = non_conflict_node |
| conflict_graph.add(pkg, parent) |
| |
| if debug: |
| writemsg_level( |
| "\n!!! Slot conflict graph:\n", |
| level=logging.DEBUG, noiselevel=-1) |
| conflict_graph.debug_print() |
| |
| # Now select required packages. Collect them in the |
| # 'forced' set. |
| forced = set([non_conflict_node]) |
| unexplored = set([non_conflict_node]) |
| # or_tuples get special handling. We first explore |
| # all packages in the hope of having forced one of |
| # the packages in the tuple. This way we don't have |
| # to choose one. |
| unexplored_tuples = set() |
| |
| while unexplored: |
| # Handle all unexplored packages. |
| while unexplored: |
| node = unexplored.pop() |
| for child in conflict_graph.child_nodes(node): |
| if child in forced: |
| continue |
| forced.add(child) |
| if isinstance(child, Package): |
| unexplored.add(child) |
| else: |
| unexplored_tuples.add(child) |
| |
| # Now handle unexplored or_tuples. Move on with packages |
| # once we had to choose one. |
| while unexplored_tuples: |
| nodes = unexplored_tuples.pop() |
| if any(node in forced for node in nodes): |
| # At least one of the packages in the |
| # tuple is already forced, which means the |
| # dependency represented by this tuple |
| # is satisfied. |
| continue |
| |
| # We now have to choose one of packages in the tuple. |
| # In theory one could solve more conflicts if we'd be |
| # able to try different choices here, but that has lots |
| # of other problems. For now choose the package that was |
| # pulled first, as this should be the most desirable choice |
| # (otherwise it wouldn't have been the first one). |
| forced.add(nodes[0]) |
| unexplored.add(nodes[0]) |
| break |
| |
| # Remove 'non_conflict_node' and or_tuples from 'forced'. |
| forced = set(pkg for pkg in forced if isinstance(pkg, Package)) |
| non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced) |
| |
| if debug: |
| writemsg_level( |
| "\n!!! Slot conflict solution:\n", |
| level=logging.DEBUG, noiselevel=-1) |
| for conflict in conflicts: |
| writemsg_level( |
| " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom), |
| level=logging.DEBUG, noiselevel=-1) |
| for pkg in conflict: |
| if pkg in forced: |
| writemsg_level( |
| " keep: %s\n" % pkg, |
| level=logging.DEBUG, noiselevel=-1) |
| else: |
| writemsg_level( |
| " remove: %s\n" % pkg, |
| level=logging.DEBUG, noiselevel=-1) |
| |
| broken_packages = set() |
| for pkg in non_forced: |
| for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []): |
| if isinstance(parent, Package) and parent not in non_forced: |
| # Non-forcing set args are expected to be a parent of all |
| # packages in the conflict. |
| broken_packages.add(parent) |
| self._remove_pkg(pkg) |
| |
| # Process the dependencies of choosen conflict packages |
| # again to properly account for blockers. |
| broken_packages.update(forced) |
| |
| # Filter out broken packages which have been removed during |
| # recursive removal in self._remove_pkg. |
| broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \ |
| if self._dynamic_config._package_tracker.contains(pkg, installed=False)) |
| |
| self._dynamic_config._dep_stack.extend(broken_packages) |
| |
| if broken_packages: |
| # Process dependencies. This cannot fail because we just ensured that |
| # the remaining packages satisfy all dependencies. |
| self._create_graph() |
| |
| # Record missed updates. |
| for conflict in conflicts: |
| if not any(pkg in non_forced for pkg in conflict): |
| continue |
| for pkg in conflict: |
| if pkg not in non_forced: |
| continue |
| |
| for other in conflict: |
| if other is pkg: |
| continue |
| |
| for parent, atom in self._dynamic_config._parent_atoms.get(other, []): |
| atom_set = InternalPackageSet( |
| initial_atoms=(atom,), allow_repo=True) |
| if not atom_set.findAtomForPackage(pkg, |
| modified_use=self._pkg_use_enabled(pkg)): |
| self._dynamic_config._conflict_missed_update[pkg].setdefault( |
| "slot conflict", set()) |
| self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add( |
| (parent, atom)) |
| |
| |
| def _process_slot_conflicts(self): |
| """ |
| If there are any slot conflicts and backtracking is enabled, |
| _complete_graph should complete the graph before this method |
| is called, so that all relevant reverse dependencies are |
| available for use in backtracking decisions. |
| """ |
| |
| self._solve_non_slot_operator_slot_conflicts() |
| |
| for conflict in self._dynamic_config._package_tracker.slot_conflicts(): |
| self._process_slot_conflict(conflict) |
| |
| def _process_slot_conflict(self, conflict): |
| """ |
| Process slot conflict data to identify specific atoms which |
| lead to conflict. These atoms only match a subset of the |
| packages that have been pulled into a given slot. |
| """ |
| root = conflict.root |
| slot_atom = conflict.atom |
| slot_nodes = conflict.pkgs |
| |
| debug = "--debug" in self._frozen_config.myopts |
| |
| slot_parent_atoms = set() |
| for pkg in slot_nodes: |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if not parent_atoms: |
| continue |
| slot_parent_atoms.update(parent_atoms) |
| |
| conflict_pkgs = [] |
| conflict_atoms = {} |
| for pkg in slot_nodes: |
| |
| if self._dynamic_config._allow_backtracking and \ |
| pkg in self._dynamic_config._runtime_pkg_mask: |
| if debug: |
| writemsg_level( |
| "!!! backtracking loop detected: %s %s\n" % \ |
| (pkg, |
| self._dynamic_config._runtime_pkg_mask[pkg]), |
| level=logging.DEBUG, noiselevel=-1) |
| |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if parent_atoms is None: |
| parent_atoms = set() |
| self._dynamic_config._parent_atoms[pkg] = parent_atoms |
| |
| all_match = True |
| for parent_atom in slot_parent_atoms: |
| if parent_atom in parent_atoms: |
| continue |
| # Use package set for matching since it will match via |
| # PROVIDE when necessary, while match_from_list does not. |
| parent, atom = parent_atom |
| atom_set = InternalPackageSet( |
| initial_atoms=(atom,), allow_repo=True) |
| if atom_set.findAtomForPackage(pkg, |
| modified_use=self._pkg_use_enabled(pkg)): |
| parent_atoms.add(parent_atom) |
| else: |
| all_match = False |
| conflict_atoms.setdefault(parent_atom, set()).add(pkg) |
| |
| if not all_match: |
| conflict_pkgs.append(pkg) |
| |
| if conflict_pkgs and \ |
| self._dynamic_config._allow_backtracking and \ |
| not self._accept_blocker_conflicts(): |
| remaining = [] |
| for pkg in conflict_pkgs: |
| if self._slot_conflict_backtrack_abi(pkg, |
| slot_nodes, conflict_atoms): |
| backtrack_infos = self._dynamic_config._backtrack_infos |
| config = backtrack_infos.setdefault("config", {}) |
| config.setdefault("slot_conflict_abi", set()).add(pkg) |
| else: |
| remaining.append(pkg) |
| if remaining: |
| self._slot_confict_backtrack(root, slot_atom, |
| slot_parent_atoms, remaining) |
| |
| def _slot_confict_backtrack(self, root, slot_atom, |
| all_parents, conflict_pkgs): |
| |
| debug = "--debug" in self._frozen_config.myopts |
| existing_node = next(self._dynamic_config._package_tracker.match( |
| root, slot_atom, installed=False)) |
| # In order to avoid a missed update, first mask lower versions |
| # that conflict with higher versions (the backtracker visits |
| # these in reverse order). |
| conflict_pkgs.sort(reverse=True) |
| backtrack_data = [] |
| for to_be_masked in conflict_pkgs: |
| # For missed update messages, find out which |
| # atoms matched to_be_selected that did not |
| # match to_be_masked. |
| parent_atoms = \ |
| self._dynamic_config._parent_atoms.get(to_be_masked, set()) |
| conflict_atoms = set(parent_atom for parent_atom in all_parents \ |
| if parent_atom not in parent_atoms) |
| backtrack_data.append((to_be_masked, conflict_atoms)) |
| |
| to_be_masked = backtrack_data[-1][0] |
| |
| self._dynamic_config._backtrack_infos.setdefault( |
| "slot conflict", []).append(backtrack_data) |
| self._dynamic_config._need_restart = True |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to slot conflict:") |
| msg.append(" first package: %s" % existing_node) |
| msg.append(" package to mask: %s" % to_be_masked) |
| msg.append(" slot: %s" % slot_atom) |
| msg.append(" parents: %s" % ", ".join( \ |
| "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents)) |
| msg.append("") |
| writemsg_level("".join("%s\n" % l for l in msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms): |
| """ |
| If one or more conflict atoms have a slot/sub-slot dep that can be resolved |
| by rebuilding the parent package, then schedule the rebuild via |
| backtracking, and return True. Otherwise, return False. |
| """ |
| |
| found_update = False |
| for parent_atom, conflict_pkgs in conflict_atoms.items(): |
| parent, atom = parent_atom |
| |
| if not isinstance(parent, Package): |
| continue |
| |
| if atom.slot_operator != "=" or not parent.built: |
| continue |
| |
| if pkg not in conflict_pkgs: |
| continue |
| |
| for other_pkg in slot_nodes: |
| if other_pkg in conflict_pkgs: |
| continue |
| |
| dep = Dependency(atom=atom, child=other_pkg, |
| parent=parent, root=pkg.root) |
| |
| new_dep = \ |
| self._slot_operator_update_probe_slot_conflict(dep) |
| if new_dep is not None: |
| self._slot_operator_update_backtrack(dep, |
| new_dep=new_dep) |
| found_update = True |
| |
| return found_update |
| |
| def _slot_change_probe(self, dep): |
| """ |
| @rtype: bool |
| @return: True if dep.child should be rebuilt due to a change |
| in sub-slot (without revbump, as in bug #456208). |
| """ |
| if not (isinstance(dep.parent, Package) and \ |
| not dep.parent.built and dep.child.built): |
| return None |
| |
| root_config = self._frozen_config.roots[dep.root] |
| matches = [] |
| try: |
| matches.append(self._pkg(dep.child.cpv, "ebuild", |
| root_config, myrepo=dep.child.repo)) |
| except PackageNotFound: |
| pass |
| |
| for unbuilt_child in chain(matches, |
| self._iter_match_pkgs(root_config, "ebuild", |
| Atom("=%s" % (dep.child.cpv,)))): |
| if unbuilt_child in self._dynamic_config._runtime_pkg_mask: |
| continue |
| if self._frozen_config.excluded_pkgs.findAtomForPackage( |
| unbuilt_child, |
| modified_use=self._pkg_use_enabled(unbuilt_child)): |
| continue |
| if not self._pkg_visibility_check(unbuilt_child): |
| continue |
| break |
| else: |
| return None |
| |
| if unbuilt_child.slot == dep.child.slot and \ |
| unbuilt_child.sub_slot == dep.child.sub_slot: |
| return None |
| |
| return unbuilt_child |
| |
| def _slot_change_backtrack(self, dep, new_child_slot): |
| child = dep.child |
| if "--debug" in self._frozen_config.myopts: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to slot/sub-slot change:") |
| msg.append(" child package: %s" % child) |
| msg.append(" child slot: %s/%s" % |
| (child.slot, child.sub_slot)) |
| msg.append(" new child: %s" % new_child_slot) |
| msg.append(" new child slot: %s/%s" % |
| (new_child_slot.slot, new_child_slot.sub_slot)) |
| msg.append(" parent package: %s" % dep.parent) |
| msg.append(" atom: %s" % dep.atom) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| backtrack_infos = self._dynamic_config._backtrack_infos |
| config = backtrack_infos.setdefault("config", {}) |
| |
| # mask unwanted binary packages if necessary |
| masks = {} |
| if not child.installed: |
| masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None |
| if masks: |
| config.setdefault("slot_operator_mask_built", {}).update(masks) |
| |
| # trigger replacement of installed packages if necessary |
| reinstalls = set() |
| if child.installed: |
| replacement_atom = self._replace_installed_atom(child) |
| if replacement_atom is not None: |
| reinstalls.add((child.root, replacement_atom)) |
| if reinstalls: |
| config.setdefault("slot_operator_replace_installed", |
| set()).update(reinstalls) |
| |
| self._dynamic_config._need_restart = True |
| |
| def _slot_operator_update_backtrack(self, dep, new_child_slot=None, |
| new_dep=None): |
| if new_child_slot is None: |
| child = dep.child |
| else: |
| child = new_child_slot |
| if "--debug" in self._frozen_config.myopts: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to missed slot abi update:") |
| msg.append(" child package: %s" % child) |
| if new_child_slot is not None: |
| msg.append(" new child slot package: %s" % new_child_slot) |
| msg.append(" parent package: %s" % dep.parent) |
| if new_dep is not None: |
| msg.append(" new parent pkg: %s" % new_dep.parent) |
| msg.append(" atom: %s" % dep.atom) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| backtrack_infos = self._dynamic_config._backtrack_infos |
| config = backtrack_infos.setdefault("config", {}) |
| |
| # mask unwanted binary packages if necessary |
| abi_masks = {} |
| if new_child_slot is None: |
| if not child.installed: |
| abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None |
| if not dep.parent.installed: |
| abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None |
| if abi_masks: |
| config.setdefault("slot_operator_mask_built", {}).update(abi_masks) |
| |
| # trigger replacement of installed packages if necessary |
| abi_reinstalls = set() |
| if dep.parent.installed: |
| if new_dep is not None: |
| replacement_atom = new_dep.parent.slot_atom |
| else: |
| replacement_atom = self._replace_installed_atom(dep.parent) |
| if replacement_atom is not None: |
| abi_reinstalls.add((dep.parent.root, replacement_atom)) |
| if new_child_slot is None and child.installed: |
| replacement_atom = self._replace_installed_atom(child) |
| if replacement_atom is not None: |
| abi_reinstalls.add((child.root, replacement_atom)) |
| if abi_reinstalls: |
| config.setdefault("slot_operator_replace_installed", |
| set()).update(abi_reinstalls) |
| |
| self._dynamic_config._need_restart = True |
| |
| def _slot_operator_update_probe_slot_conflict(self, dep): |
| new_dep = self._slot_operator_update_probe(dep, slot_conflict=True) |
| |
| if new_dep is not None: |
| return new_dep |
| |
| if self._dynamic_config._autounmask is True: |
| |
| for autounmask_level in self._autounmask_levels(): |
| |
| new_dep = self._slot_operator_update_probe(dep, |
| slot_conflict=True, autounmask_level=autounmask_level) |
| |
| if new_dep is not None: |
| return new_dep |
| |
| return None |
| |
| def _slot_operator_update_probe(self, dep, new_child_slot=False, |
| slot_conflict=False, autounmask_level=None): |
| """ |
| slot/sub-slot := operators tend to prevent updates from getting pulled in, |
| since installed packages pull in packages with the slot/sub-slot that they |
| were built against. Detect this case so that we can schedule rebuilds |
| and reinstalls when appropriate. |
| NOTE: This function only searches for updates that involve upgrades |
| to higher versions, since the logic required to detect when a |
| downgrade would be desirable is not implemented. |
| """ |
| |
| if dep.child.installed and \ |
| self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child, |
| modified_use=self._pkg_use_enabled(dep.child)): |
| return None |
| |
| if dep.parent.installed and \ |
| self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, |
| modified_use=self._pkg_use_enabled(dep.parent)): |
| return None |
| |
| debug = "--debug" in self._frozen_config.myopts |
| selective = "selective" in self._dynamic_config.myparams |
| want_downgrade = None |
| |
| def check_reverse_dependencies(existing_pkg, candidate_pkg): |
| """ |
| Check if candidate_pkg satisfies all of existing_pkg's non- |
| slot operator parents. |
| """ |
| for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): |
| if atom.slot_operator == "=" and getattr(parent, "built", False): |
| continue |
| |
| atom_set = InternalPackageSet(initial_atoms=(atom,), |
| allow_repo=True) |
| if not atom_set.findAtomForPackage(candidate_pkg, |
| modified_use=self._pkg_use_enabled(candidate_pkg)): |
| return False |
| return True |
| |
| |
| for replacement_parent in self._iter_similar_available(dep.parent, |
| dep.parent.slot_atom, autounmask_level=autounmask_level): |
| |
| if not check_reverse_dependencies(dep.parent, replacement_parent): |
| continue |
| |
| selected_atoms = None |
| |
| atoms = set() |
| invalid_metadata = False |
| for dep_key in ("DEPEND", "HDEPEND", "RDEPEND", "PDEPEND"): |
| dep_string = replacement_parent._metadata[dep_key] |
| if not dep_string: |
| continue |
| |
| try: |
| dep_string = portage.dep.use_reduce(dep_string, |
| uselist=self._pkg_use_enabled(replacement_parent), |
| is_valid_flag=replacement_parent.iuse.is_valid_flag, |
| flat=True, token_class=Atom, |
| eapi=replacement_parent.eapi) |
| except portage.exception.InvalidDependString: |
| invalid_metadata = True |
| break |
| |
| atoms.update(token for token in dep_string if isinstance(token, Atom)) |
| |
| if invalid_metadata: |
| continue |
| |
| # List of list of child,atom pairs for each atom. |
| replacement_candidates = [] |
| # Set of all packages all atoms can agree on. |
| all_candidate_pkgs = None |
| |
| for atom in atoms: |
| if atom.blocker or \ |
| atom.cp != dep.atom.cp: |
| continue |
| |
| # Discard USE deps, we're only searching for an approximate |
| # pattern, and dealing with USE states is too complex for |
| # this purpose. |
| unevaluated_atom = atom.unevaluated_atom |
| atom = atom.without_use |
| |
| if replacement_parent.built and \ |
| portage.dep._match_slot(atom, dep.child): |
| # Our selected replacement_parent appears to be built |
| # for the existing child selection. So, discard this |
| # parent and search for another. |
| break |
| |
| candidate_pkg_atoms = [] |
| candidate_pkgs = [] |
| for pkg in self._iter_similar_available( |
| dep.child, atom): |
| if pkg.slot == dep.child.slot and \ |
| pkg.sub_slot == dep.child.sub_slot: |
| # If slot/sub-slot is identical, then there's |
| # no point in updating. |
| continue |
| if new_child_slot: |
| if pkg.slot == dep.child.slot: |
| continue |
| if pkg < dep.child: |
| # the new slot only matters if the |
| # package version is higher |
| continue |
| else: |
| if pkg.slot != dep.child.slot: |
| continue |
| if pkg < dep.child: |
| if want_downgrade is None: |
| want_downgrade = self._downgrade_probe(dep.child) |
| # be careful not to trigger a rebuild when |
| # the only version available with a |
| # different slot_operator is an older version |
| if not want_downgrade: |
| continue |
| if pkg.version == dep.child.version and not dep.child.built: |
| continue |
| |
| insignificant = False |
| if not slot_conflict and \ |
| selective and \ |
| dep.parent.installed and \ |
| dep.child.installed and \ |
| dep.parent >= replacement_parent and \ |
| dep.child.cpv == pkg.cpv: |
| # Then can happen if the child's sub-slot changed |
| # without a revision bump. The sub-slot change is |
| # considered insignificant until one of its parent |
| # packages needs to be rebuilt (which may trigger a |
| # slot conflict). |
| insignificant = True |
| |
| if not insignificant: |
| # Evaluate USE conditionals and || deps, in order |
| # to see if this atom is really desirable, since |
| # otherwise we may trigger an undesirable rebuild |
| # as in bug #460304. |
| if selected_atoms is None: |
| selected_atoms = self._select_atoms_probe( |
| dep.child.root, replacement_parent) |
| if unevaluated_atom not in selected_atoms: |
| continue |
| |
| if not insignificant and \ |
| check_reverse_dependencies(dep.child, pkg): |
| |
| candidate_pkg_atoms.append((pkg, unevaluated_atom)) |
| candidate_pkgs.append(pkg) |
| replacement_candidates.append(candidate_pkg_atoms) |
| if all_candidate_pkgs is None: |
| all_candidate_pkgs = set(candidate_pkgs) |
| else: |
| all_candidate_pkgs.intersection_update(candidate_pkgs) |
| |
| if not all_candidate_pkgs: |
| # If the atoms that connect parent and child can't agree on |
| # any replacement child, we can't do anything. |
| continue |
| |
| # Now select one of the pkgs as replacement. This is as easy as |
| # selecting the highest version. |
| # The more complicated part is to choose an atom for the |
| # new Dependency object. Choose the one which ranked the selected |
| # parent highest. |
| selected = None |
| for candidate_pkg_atoms in replacement_candidates: |
| for i, (pkg, atom) in enumerate(candidate_pkg_atoms): |
| if pkg not in all_candidate_pkgs: |
| continue |
| if selected is None or \ |
| selected[0] < pkg or \ |
| (selected[0] is pkg and i < selected[2]): |
| selected = (pkg, atom, i) |
| |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("slot_operator_update_probe:") |
| msg.append(" existing child package: %s" % dep.child) |
| msg.append(" existing parent package: %s" % dep.parent) |
| msg.append(" new child package: %s" % selected[0]) |
| msg.append(" new parent package: %s" % replacement_parent) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return Dependency(parent=replacement_parent, |
| child=selected[0], atom=selected[1]) |
| |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("slot_operator_update_probe:") |
| msg.append(" existing child package: %s" % dep.child) |
| msg.append(" existing parent package: %s" % dep.parent) |
| msg.append(" new child package: %s" % None) |
| msg.append(" new parent package: %s" % None) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return None |
| |
| def _slot_operator_unsatisfied_probe(self, dep): |
| |
| if dep.parent.installed and \ |
| self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent, |
| modified_use=self._pkg_use_enabled(dep.parent)): |
| return False |
| |
| debug = "--debug" in self._frozen_config.myopts |
| |
| for replacement_parent in self._iter_similar_available(dep.parent, |
| dep.parent.slot_atom): |
| |
| for atom in replacement_parent.validated_atoms: |
| if not atom.slot_operator == "=" or \ |
| atom.blocker or \ |
| atom.cp != dep.atom.cp: |
| continue |
| |
| # Discard USE deps, we're only searching for an approximate |
| # pattern, and dealing with USE states is too complex for |
| # this purpose. |
| atom = atom.without_use |
| |
| pkg, existing_node = self._select_package(dep.root, atom, |
| onlydeps=dep.onlydeps) |
| |
| if pkg is not None: |
| |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("slot_operator_unsatisfied_probe:") |
| msg.append(" existing parent package: %s" % dep.parent) |
| msg.append(" existing parent atom: %s" % dep.atom) |
| msg.append(" new parent package: %s" % replacement_parent) |
| msg.append(" new child package: %s" % pkg) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return True |
| |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("slot_operator_unsatisfied_probe:") |
| msg.append(" existing parent package: %s" % dep.parent) |
| msg.append(" existing parent atom: %s" % dep.atom) |
| msg.append(" new parent package: %s" % None) |
| msg.append(" new child package: %s" % None) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return False |
| |
| def _slot_operator_unsatisfied_backtrack(self, dep): |
| |
| parent = dep.parent |
| |
| if "--debug" in self._frozen_config.myopts: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to unsatisfied " |
| "built slot-operator dep:") |
| msg.append(" parent package: %s" % parent) |
| msg.append(" atom: %s" % dep.atom) |
| msg.append("") |
| writemsg_level("\n".join(msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| backtrack_infos = self._dynamic_config._backtrack_infos |
| config = backtrack_infos.setdefault("config", {}) |
| |
| # mask unwanted binary packages if necessary |
| masks = {} |
| if not parent.installed: |
| masks.setdefault(parent, {})["slot_operator_mask_built"] = None |
| if masks: |
| config.setdefault("slot_operator_mask_built", {}).update(masks) |
| |
| # trigger replacement of installed packages if necessary |
| reinstalls = set() |
| if parent.installed: |
| replacement_atom = self._replace_installed_atom(parent) |
| if replacement_atom is not None: |
| reinstalls.add((parent.root, replacement_atom)) |
| if reinstalls: |
| config.setdefault("slot_operator_replace_installed", |
| set()).update(reinstalls) |
| |
| self._dynamic_config._need_restart = True |
| |
| def _downgrade_probe(self, pkg): |
| """ |
| Detect cases where a downgrade of the given package is considered |
| desirable due to the current version being masked or unavailable. |
| """ |
| available_pkg = None |
| for available_pkg in self._iter_similar_available(pkg, |
| pkg.slot_atom): |
| if available_pkg >= pkg: |
| # There's an available package of the same or higher |
| # version, so downgrade seems undesirable. |
| return False |
| |
| return available_pkg is not None |
| |
| def _select_atoms_probe(self, root, pkg): |
| selected_atoms = [] |
| use = self._pkg_use_enabled(pkg) |
| for k in pkg._dep_keys: |
| v = pkg._metadata.get(k) |
| if not v: |
| continue |
| selected_atoms.extend(self._select_atoms( |
| root, v, myuse=use, parent=pkg)[pkg]) |
| return frozenset(x.unevaluated_atom for |
| x in selected_atoms) |
| |
| def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None): |
| """ |
| Given a package that's in the graph, do a rough check to |
| see if a similar package is available to install. The given |
| graph_pkg itself may be yielded only if it's not installed. |
| """ |
| |
| usepkgonly = "--usepkgonly" in self._frozen_config.myopts |
| useoldpkg_atoms = self._frozen_config.useoldpkg_atoms |
| use_ebuild_visibility = self._frozen_config.myopts.get( |
| '--use-ebuild-visibility', 'n') != 'n' |
| |
| for pkg in self._iter_match_pkgs_any( |
| graph_pkg.root_config, atom): |
| if pkg.cp != graph_pkg.cp: |
| # discard old-style virtual match |
| continue |
| if pkg.installed: |
| continue |
| if pkg in self._dynamic_config._runtime_pkg_mask: |
| continue |
| if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, |
| modified_use=self._pkg_use_enabled(pkg)): |
| continue |
| if pkg.built: |
| if self._equiv_binary_installed(pkg): |
| continue |
| if not (not use_ebuild_visibility and |
| (usepkgonly or useoldpkg_atoms.findAtomForPackage( |
| pkg, modified_use=self._pkg_use_enabled(pkg)))) and \ |
| not self._equiv_ebuild_visible(pkg, |
| autounmask_level=autounmask_level): |
| continue |
| if not self._pkg_visibility_check(pkg, |
| autounmask_level=autounmask_level): |
| continue |
| yield pkg |
| |
| def _replace_installed_atom(self, inst_pkg): |
| """ |
| Given an installed package, generate an atom suitable for |
| slot_operator_replace_installed backtracking info. The replacement |
| SLOT may differ from the installed SLOT, so first search by cpv. |
| """ |
| built_pkgs = [] |
| for pkg in self._iter_similar_available(inst_pkg, |
| Atom("=%s" % inst_pkg.cpv)): |
| if not pkg.built: |
| return pkg.slot_atom |
| elif not pkg.installed: |
| # avoid using SLOT from a built instance |
| built_pkgs.append(pkg) |
| |
| for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom): |
| if not pkg.built: |
| return pkg.slot_atom |
| elif not pkg.installed: |
| # avoid using SLOT from a built instance |
| built_pkgs.append(pkg) |
| |
| if built_pkgs: |
| best_version = None |
| for pkg in built_pkgs: |
| if best_version is None or pkg > best_version: |
| best_version = pkg |
| return best_version.slot_atom |
| |
| return None |
| |
| def _slot_operator_trigger_reinstalls(self): |
| """ |
| Search for packages with slot-operator deps on older slots, and schedule |
| rebuilds if they can link to a newer slot that's in the graph. |
| """ |
| |
| rebuild_if_new_slot = self._dynamic_config.myparams.get( |
| "rebuild_if_new_slot", "y") == "y" |
| |
| for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items(): |
| |
| for dep in slot_info: |
| |
| atom = dep.atom |
| if atom.slot_operator is None: |
| continue |
| |
| if not atom.slot_operator_built: |
| new_child_slot = self._slot_change_probe(dep) |
| if new_child_slot is not None: |
| self._slot_change_backtrack(dep, new_child_slot) |
| continue |
| |
| if not (dep.parent and |
| isinstance(dep.parent, Package) and dep.parent.built): |
| continue |
| |
| # Check for slot update first, since we don't want to |
| # trigger reinstall of the child package when a newer |
| # slot will be used instead. |
| if rebuild_if_new_slot: |
| new_dep = self._slot_operator_update_probe(dep, |
| new_child_slot=True) |
| if new_dep is not None: |
| self._slot_operator_update_backtrack(dep, |
| new_child_slot=new_dep.child) |
| |
| if dep.want_update: |
| if self._slot_operator_update_probe(dep): |
| self._slot_operator_update_backtrack(dep) |
| |
| def _reinstall_for_flags(self, pkg, forced_flags, |
| orig_use, orig_iuse, cur_use, cur_iuse): |
| """Return a set of flags that trigger reinstallation, or None if there |
| are no such flags.""" |
| |
| # binpkg_respect_use: Behave like newuse by default. If newuse is |
| # False and changed_use is True, then behave like changed_use. |
| binpkg_respect_use = (pkg.built and |
| self._dynamic_config.myparams.get("binpkg_respect_use") |
| in ("y", "auto")) |
| newuse = "--newuse" in self._frozen_config.myopts |
| changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall") |
| feature_flags = _get_feature_flags( |
| _get_eapi_attrs(pkg.eapi)) |
| |
| if newuse or (binpkg_respect_use and not changed_use): |
| flags = set(orig_iuse.symmetric_difference( |
| cur_iuse).difference(forced_flags)) |
| flags.update(orig_iuse.intersection(orig_use).symmetric_difference( |
| cur_iuse.intersection(cur_use))) |
| flags.difference_update(feature_flags) |
| if flags: |
| return flags |
| |
| elif changed_use or binpkg_respect_use: |
| flags = set(orig_iuse.intersection(orig_use).symmetric_difference( |
| cur_iuse.intersection(cur_use))) |
| flags.difference_update(feature_flags) |
| if flags: |
| return flags |
| return None |
| |
| def _create_graph(self, allow_unsatisfied=False): |
| dep_stack = self._dynamic_config._dep_stack |
| dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack |
| while dep_stack or dep_disjunctive_stack: |
| self._spinner_update() |
| while dep_stack: |
| dep = dep_stack.pop() |
| if isinstance(dep, Package): |
| if not self._add_pkg_deps(dep, |
| allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| continue |
| if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied): |
| return 0 |
| if dep_disjunctive_stack: |
| if not self._pop_disjunction(allow_unsatisfied): |
| return 0 |
| return 1 |
| |
| def _expand_set_args(self, input_args, add_to_digraph=False): |
| """ |
| Iterate over a list of DependencyArg instances and yield all |
| instances given in the input together with additional SetArg |
| instances that are generated from nested sets. |
| @param input_args: An iterable of DependencyArg instances |
| @type input_args: Iterable |
| @param add_to_digraph: If True then add SetArg instances |
| to the digraph, in order to record parent -> child |
| relationships from nested sets |
| @type add_to_digraph: Boolean |
| @rtype: Iterable |
| @return: All args given in the input together with additional |
| SetArg instances that are generated from nested sets |
| """ |
| |
| traversed_set_args = set() |
| |
| for arg in input_args: |
| if not isinstance(arg, SetArg): |
| yield arg |
| continue |
| |
| root_config = arg.root_config |
| depgraph_sets = self._dynamic_config.sets[root_config.root] |
| arg_stack = [arg] |
| while arg_stack: |
| arg = arg_stack.pop() |
| if arg in traversed_set_args: |
| continue |
| traversed_set_args.add(arg) |
| |
| if add_to_digraph: |
| self._dynamic_config.digraph.add(arg, None, |
| priority=BlockerDepPriority.instance) |
| |
| yield arg |
| |
| # Traverse nested sets and add them to the stack |
| # if they're not already in the graph. Also, graph |
| # edges between parent and nested sets. |
| for token in arg.pset.getNonAtoms(): |
| if not token.startswith(SETPREFIX): |
| continue |
| s = token[len(SETPREFIX):] |
| nested_set = depgraph_sets.sets.get(s) |
| if nested_set is None: |
| nested_set = root_config.sets.get(s) |
| if nested_set is not None: |
| nested_arg = SetArg(arg=token, pset=nested_set, |
| root_config=root_config) |
| arg_stack.append(nested_arg) |
| if add_to_digraph: |
| self._dynamic_config.digraph.add(nested_arg, arg, |
| priority=BlockerDepPriority.instance) |
| depgraph_sets.sets[nested_arg.name] = nested_arg.pset |
| |
| def _add_dep(self, dep, allow_unsatisfied=False): |
| debug = "--debug" in self._frozen_config.myopts |
| buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts |
| nodeps = "--nodeps" in self._frozen_config.myopts |
| if dep.blocker: |
| |
| # Slot collision nodes are not allowed to block other packages since |
| # blocker validation is only able to account for one package per slot. |
| is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \ |
| self._dynamic_config._package_tracker.slot_conflicts()) |
| if not buildpkgonly and \ |
| not nodeps and \ |
| not dep.collapsed_priority.ignored and \ |
| not dep.collapsed_priority.optional and \ |
| not is_slot_conflict_parent: |
| if dep.parent.onlydeps: |
| # It's safe to ignore blockers if the |
| # parent is an --onlydeps node. |
| return 1 |
| # The blocker applies to the root where |
| # the parent is or will be installed. |
| blocker = Blocker(atom=dep.atom, |
| eapi=dep.parent.eapi, |
| priority=dep.priority, root=dep.parent.root) |
| self._dynamic_config._blocker_parents.add(blocker, dep.parent) |
| return 1 |
| |
| if dep.child is None: |
| dep_pkg, existing_node = self._select_package(dep.root, dep.atom, |
| onlydeps=dep.onlydeps) |
| else: |
| # The caller has selected a specific package |
| # via self._minimize_packages(). |
| dep_pkg = dep.child |
| existing_node = next(self._dynamic_config._package_tracker.match( |
| dep.root, dep_pkg.slot_atom, installed=False), None) |
| |
| if not dep_pkg: |
| if (dep.collapsed_priority.optional or |
| dep.collapsed_priority.ignored): |
| # This is an unnecessary build-time dep. |
| return 1 |
| if allow_unsatisfied: |
| self._dynamic_config._unsatisfied_deps.append(dep) |
| return 1 |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((dep.root, dep.atom), {"myparent":dep.parent})) |
| |
| # The parent node should not already be in |
| # runtime_pkg_mask, since that would trigger an |
| # infinite backtracking loop. |
| if self._dynamic_config._allow_backtracking: |
| if dep.parent in self._dynamic_config._runtime_pkg_mask: |
| if debug: |
| writemsg( |
| "!!! backtracking loop detected: %s %s\n" % \ |
| (dep.parent, |
| self._dynamic_config._runtime_pkg_mask[ |
| dep.parent]), noiselevel=-1) |
| elif dep.atom.slot_operator_built and \ |
| self._slot_operator_unsatisfied_probe(dep): |
| self._slot_operator_unsatisfied_backtrack(dep) |
| return 1 |
| else: |
| # Do not backtrack if only USE have to be changed in |
| # order to satisfy the dependency. Note that when |
| # want_restart_for_use_change sets the need_restart |
| # flag, it causes _select_pkg_highest_available to |
| # return None, and eventually we come through here |
| # and skip the "missing dependency" backtracking path. |
| dep_pkg, existing_node = \ |
| self._select_package(dep.root, dep.atom.without_use, |
| onlydeps=dep.onlydeps) |
| if dep_pkg is None: |
| self._dynamic_config._backtrack_infos["missing dependency"] = dep |
| self._dynamic_config._need_restart = True |
| if debug: |
| msg = [] |
| msg.append("") |
| msg.append("") |
| msg.append("backtracking due to unsatisfied dep:") |
| msg.append(" parent: %s" % dep.parent) |
| msg.append(" priority: %s" % dep.priority) |
| msg.append(" root: %s" % dep.root) |
| msg.append(" atom: %s" % dep.atom) |
| msg.append("") |
| writemsg_level("".join("%s\n" % l for l in msg), |
| noiselevel=-1, level=logging.DEBUG) |
| |
| return 0 |
| |
| self._rebuild.add(dep_pkg, dep) |
| |
| ignore = dep.collapsed_priority.ignored and \ |
| not self._dynamic_config._traverse_ignored_deps |
| if not ignore and not self._add_pkg(dep_pkg, dep): |
| return 0 |
| return 1 |
| |
| def _check_slot_conflict(self, pkg, atom): |
| existing_node = next(self._dynamic_config._package_tracker.match( |
| pkg.root, pkg.slot_atom, installed=False), None) |
| |
| matches = None |
| if existing_node: |
| matches = pkg.cpv == existing_node.cpv |
| if pkg != existing_node and \ |
| atom is not None: |
| # Use package set for matching since it will match via |
| # PROVIDE when necessary, while match_from_list does not. |
| matches = bool(InternalPackageSet(initial_atoms=(atom,), |
| allow_repo=True).findAtomForPackage(existing_node, |
| modified_use=self._pkg_use_enabled(existing_node))) |
| |
| return (existing_node, matches) |
| |
| def _add_pkg(self, pkg, dep): |
| """ |
| Adds a package to the depgraph, queues dependencies, and handles |
| slot conflicts. |
| """ |
| debug = "--debug" in self._frozen_config.myopts |
| myparent = None |
| priority = None |
| depth = 0 |
| if dep is None: |
| dep = Dependency() |
| else: |
| myparent = dep.parent |
| priority = dep.priority |
| depth = dep.depth |
| if priority is None: |
| priority = DepPriority() |
| |
| if debug: |
| writemsg_level( |
| "\n%s%s %s\n" % ("Child:".ljust(15), pkg, |
| pkg_use_display(pkg, self._frozen_config.myopts, |
| modified_use=self._pkg_use_enabled(pkg))), |
| level=logging.DEBUG, noiselevel=-1) |
| if isinstance(myparent, |
| (PackageArg, AtomArg)): |
| # For PackageArg and AtomArg types, it's |
| # redundant to display the atom attribute. |
| writemsg_level( |
| "%s%s\n" % ("Parent Dep:".ljust(15), myparent), |
| level=logging.DEBUG, noiselevel=-1) |
| else: |
| # Display the specific atom from SetArg or |
| # Package types. |
| uneval = "" |
| if dep.atom and dep.atom.unevaluated_atom and \ |
| dep.atom is not dep.atom.unevaluated_atom: |
| uneval = " (%s)" % (dep.atom.unevaluated_atom,) |
| writemsg_level( |
| "%s%s%s required by %s\n" % |
| ("Parent Dep:".ljust(15), dep.atom, uneval, myparent), |
| level=logging.DEBUG, noiselevel=-1) |
| |
| # Ensure that the dependencies of the same package |
| # are never processed more than once. |
| previously_added = pkg in self._dynamic_config.digraph |
| |
| pkgsettings = self._frozen_config.pkgsettings[pkg.root] |
| |
| arg_atoms = None |
| if True: |
| try: |
| arg_atoms = list(self._iter_atoms_for_pkg(pkg)) |
| except portage.exception.InvalidDependString as e: |
| if not pkg.installed: |
| # should have been masked before it was selected |
| raise |
| del e |
| |
| # NOTE: REQUIRED_USE checks are delayed until after |
| # package selection, since we want to prompt the user |
| # for USE adjustment rather than have REQUIRED_USE |
| # affect package selection and || dep choices. |
| if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \ |
| eapi_has_required_use(pkg.eapi): |
| required_use_is_sat = check_required_use( |
| pkg._metadata["REQUIRED_USE"], |
| self._pkg_use_enabled(pkg), |
| pkg.iuse.is_valid_flag, |
| eapi=pkg.eapi) |
| if not required_use_is_sat: |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
| |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._add_parent_atom(pkg, parent_atom) |
| |
| atom = dep.atom |
| if atom is None: |
| atom = Atom("=" + pkg.cpv) |
| self._dynamic_config._unsatisfied_deps_for_display.append( |
| ((pkg.root, atom), |
| {"myparent" : dep.parent, "show_req_use" : pkg})) |
| self._dynamic_config._skip_restart = True |
| return 0 |
| |
| if not pkg.onlydeps: |
| |
| existing_node, existing_node_matches = \ |
| self._check_slot_conflict(pkg, dep.atom) |
| if existing_node: |
| if existing_node_matches: |
| # The existing node can be reused. |
| if pkg != existing_node: |
| pkg = existing_node |
| previously_added = True |
| try: |
| arg_atoms = list(self._iter_atoms_for_pkg(pkg)) |
| except InvalidDependString as e: |
| if not pkg.installed: |
| # should have been masked before |
| # it was selected |
| raise |
| |
| if debug: |
| writemsg_level( |
| "%s%s %s\n" % ("Re-used Child:".ljust(15), |
| pkg, pkg_use_display(pkg, |
| self._frozen_config.myopts, |
| modified_use=self._pkg_use_enabled(pkg))), |
| level=logging.DEBUG, noiselevel=-1) |
| |
| else: |
| if debug: |
| writemsg_level( |
| "%s%s %s\n" % ("Slot Conflict:".ljust(15), |
| existing_node, pkg_use_display(existing_node, |
| self._frozen_config.myopts, |
| modified_use=self._pkg_use_enabled(existing_node))), |
| level=logging.DEBUG, noiselevel=-1) |
| |
| if not previously_added: |
| self._dynamic_config._package_tracker.add_pkg(pkg) |
| self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache() |
| self._dynamic_config._highest_pkg_cache.clear() |
| self._check_masks(pkg) |
| |
| if not pkg.installed: |
| # Allow this package to satisfy old-style virtuals in case it |
| # doesn't already. Any pre-existing providers will be preferred |
| # over this one. |
| try: |
| pkgsettings.setinst(pkg.cpv, pkg._metadata) |
| # For consistency, also update the global virtuals. |
| settings = self._frozen_config.roots[pkg.root].settings |
| settings.unlock() |
| settings.setinst(pkg.cpv, pkg._metadata) |
| settings.lock() |
| except portage.exception.InvalidDependString: |
| if not pkg.installed: |
| # should have been masked before it was selected |
| raise |
| |
| if arg_atoms: |
| self._dynamic_config._set_nodes.add(pkg) |
| |
| # Do this even for onlydeps, so that the |
| # parent/child relationship is always known in case |
| # self._show_slot_collision_notice() needs to be called later. |
| # If a direct circular dependency is not an unsatisfied |
| # buildtime dependency then drop it here since otherwise |
| # it can skew the merge order calculation in an unwanted |
| # way. |
| if pkg != dep.parent or \ |
| (priority.buildtime and not priority.satisfied): |
| self._dynamic_config.digraph.add(pkg, |
| dep.parent, priority=priority) |
| if dep.atom is not None and dep.parent is not None: |
| self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
| |
| if arg_atoms: |
| for parent_atom in arg_atoms: |
| parent, atom = parent_atom |
| self._dynamic_config.digraph.add(pkg, parent, priority=priority) |
| self._add_parent_atom(pkg, parent_atom) |
| |
| # This section determines whether we go deeper into dependencies or not. |
| # We want to go deeper on a few occasions: |
| # Installing package A, we need to make sure package A's deps are met. |
| # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec |
| # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies. |
| if arg_atoms and depth > 0: |
| for parent, atom in arg_atoms: |
| if parent.reset_depth: |
| depth = 0 |
| break |
| |
| if previously_added and pkg.depth is not None: |
| depth = min(pkg.depth, depth) |
| pkg.depth = depth |
| deep = self._dynamic_config.myparams.get("deep", 0) |
| update = "--update" in self._frozen_config.myopts |
| |
| dep.want_update = (not self._dynamic_config._complete_mode and |
| (arg_atoms or update) and |
| not (deep is not True and depth > deep)) |
| |
| dep.child = pkg |
| if (not pkg.onlydeps and |
| dep.atom and dep.atom.slot_operator is not None): |
| self._add_slot_operator_dep(dep) |
| |
| recurse = deep is True or depth + 1 <= deep |
| dep_stack = self._dynamic_config._dep_stack |
| if "recurse" not in self._dynamic_config.myparams: |
| return 1 |
| elif pkg.installed and not recurse: |
| dep_stack = self._dynamic_config._ignored_deps |
| |
| self._spinner_update() |
| |
| if not previously_added: |
| dep_stack.append(pkg) |
| return 1 |
| |
| |
| def _remove_pkg(self, pkg): |
| """ |
| Remove a package and all its then parentless digraph |
| children from all depgraph datastructures. |
| """ |
| debug = "--debug" in self._frozen_config.myopts |
| if debug: |
| writemsg_level( |
| "Removing package: %s\n" % pkg, |
| level=logging.DEBUG, noiselevel=-1) |
| |
| try: |
| children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \ |
| if child is not pkg] |
| self._dynamic_config.digraph.remove(pkg) |
| except KeyError: |
| children = [] |
| |
| self._dynamic_config._package_tracker.discard_pkg(pkg) |
| |
| self._dynamic_config._parent_atoms.pop(pkg, None) |
| self._dynamic_config._set_nodes.discard(pkg) |
| |
| for child in children: |
| try: |
| self._dynamic_config._parent_atoms[child] = set((parent, atom) \ |
| for (parent, atom) in self._dynamic_config._parent_atoms[child] \ |
| if parent is not pkg) |
| except KeyError: |
| pass |
| |
| # Remove slot operator dependencies. |
| slot_key = (pkg.root, pkg.slot_atom) |
| if slot_key in self._dynamic_config._slot_operator_deps: |
| self._dynamic_config._slot_operator_deps[slot_key] = \ |
| [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \ |
| if dep.child is not pkg] |
| if not self._dynamic_config._slot_operator_deps[slot_key]: |
| del self._dynamic_config._slot_operator_deps[slot_key] |
| |
| # Remove blockers. |
| self._dynamic_config._blocker_parents.discard(pkg) |
| self._dynamic_config._irrelevant_blockers.discard(pkg) |
| self._dynamic_config._unsolvable_blockers.discard(pkg) |
| self._dynamic_config._blocked_pkgs.discard(pkg) |
| self._dynamic_config._blocked_world_pkgs.pop(pkg, None) |
| |
| for child in children: |
| if child in self._dynamic_config.digraph and \ |
| not self._dynamic_config.digraph.parent_nodes(child): |
| self._remove_pkg(child) |
| |
| # Clear caches. |
| self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache() |
| self._dynamic_config._highest_pkg_cache.clear() |
| |
| |
| def _check_masks(self, pkg): |
| |
| slot_key = (pkg.root, pkg.slot_atom) |
| |
| # Check for upgrades in the same slot that are |
| # masked due to a LICENSE change in a newer |
| # version that is not masked for any other reason. |
| other_pkg = self._frozen_config._highest_license_masked.get(slot_key) |
| if other_pkg is not None and pkg < other_pkg: |
| self._dynamic_config._masked_license_updates.add(other_pkg) |
| |
| def _add_parent_atom(self, pkg, parent_atom): |
| parent_atoms = self._dynamic_config._parent_atoms.get(pkg) |
| if parent_atoms is None: |
| parent_atoms = set() |
| self._dynamic_config._parent_atoms[pkg] = parent_atoms |
| parent_atoms.add(parent_atom) |
| |
| def _add_slot_operator_dep(self, dep): |
| slot_key = (dep.root, dep.child.slot_atom) |
| slot_info = self._dynamic_config._slot_operator_deps.get(slot_key) |
| if slot_info is None: |
| slot_info = [] |
| self._dynamic_config._slot_operator_deps[slot_key] = slot_info |
| slot_info.append(dep) |
| |
| def _add_pkg_deps(self, pkg, allow_unsatisfied=False): |
| |
| myroot = pkg.root |
| metadata = pkg._metadata |
| removal_action = "remove" in self._dynamic_config.myparams |
| eapi_attrs = _get_eapi_attrs(pkg.eapi) |
| |
| edepend={} |
| for k in Package._dep_keys: |
| edepend[k] = metadata[k] |
| |
| if not pkg.built and \ |
| "--buildpkgonly" in self._frozen_config.myopts and \ |
| "deep" not in self._dynamic_config.myparams: |
| edepend["RDEPEND"] = "" |
| edepend["PDEPEND"] = "" |
| |
| ignore_build_time_deps = False |
| if pkg.built and not removal_action: |
| if self._dynamic_config.myparams.get("bdeps", "n") == "y": |
| # Pull in build time deps as requested, but marked them as |
| # "optional" since they are not strictly required. This allows |
| # more freedom in the merge order calculation for solving |
| # circular dependencies. Don't convert to PDEPEND since that |
| # could make --with-bdeps=y less effective if it is used to |
| # adjust merge order to prevent built_with_use() calls from |
| # failing. |
| pass |
| else: |
| ignore_build_time_deps = True |
| |
| if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n": |
| # Removal actions never traverse ignored buildtime |
| # dependencies, so it's safe to discard them early. |
| edepend["DEPEND"] = "" |
| edepend["HDEPEND"] = "" |
| ignore_build_time_deps = True |
| |
| ignore_depend_deps = ignore_build_time_deps |
| ignore_hdepend_deps = ignore_build_time_deps |
| |
| if removal_action: |
| depend_root = myroot |
| else: |
| if eapi_attrs.hdepend: |
| depend_root = myroot |
| else: |
| depend_root = self._frozen_config._running_root.root |
| root_deps = self._frozen_config.myopts.get("--root-deps") |
| if root_deps is not None: |
| if root_deps is True: |
| depend_root = myroot |
| elif root_deps == "rdeps": |
| ignore_depend_deps = True |
| |
| # If rebuild mode is not enabled, it's safe to discard ignored |
| # build-time dependencies. If you want these deps to be traversed |
| # in "complete" mode then you need to specify --with-bdeps=y. |
| if not self._rebuild.rebuild: |
| if ignore_depend_deps: |
| edepend["DEPEND"] = "" |
| if ignore_hdepend_deps: |
| edepend["HDEPEND"] = "" |
| |
| deps = ( |
| (depend_root, edepend["DEPEND"], |
| self._priority(buildtime=True, |
| optional=(pkg.built or ignore_depend_deps), |
| ignored=ignore_depend_deps)), |
| (self._frozen_config._running_root.root, edepend["HDEPEND"], |
| self._priority(buildtime=True, |
| optional=(pkg.built or ignore_hdepend_deps), |
| ignored=ignore_hdepend_deps)), |
| (myroot, edepend["RDEPEND"], |
| self._priority(runtime=True)), |
| (myroot, edepend["PDEPEND"], |
| self._priority(runtime_post=True)) |
| ) |
| |
| debug = "--debug" in self._frozen_config.myopts |
| |
| for dep_root, dep_string, dep_priority in deps: |
| if not dep_string: |
| continue |
| if debug: |
| writemsg_level("\nParent: %s\n" % (pkg,), |
| noiselevel=-1, level=logging.DEBUG) |
| writemsg_level("Depstring: %s\n" % (dep_string,), |
| noiselevel=-1, level=logging.DEBUG) |
|