blob: 6bdb8d97030a20a20a803cbbca2fecff67d29583 [file] [log] [blame]
#!/usr/bin/python -O
# Copyright 1999-2006 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id$
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
import signal
def exithandler(signum,frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(1)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
sys.exit(1)
import os, stat
os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
try:
import portage
except ImportError:
from os import path as osp
sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
import portage
del os.environ["PORTAGE_LEGACY_GLOBALS"]
from portage import digraph
import emergehelp, xpak, commands, errno, re, socket, time, types
import output
from output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \
xtermTitleReset, yellow
from output import create_color_func
good = create_color_func("GOOD")
bad = create_color_func("BAD")
import portage_dep
portage_dep._dep_check_strict = True
import portage_util
import portage_locks
import portage_exception
from portage_data import secpass
from portage_util import normalize_path as normpath
if not hasattr(__builtins__, "set"):
from sets import Set as set
from itertools import chain, izip
from UserDict import DictMixin
try:
import cPickle
except ImportError:
import pickle as cPickle
class stdout_spinner(object):
scroll_msgs = [
"Gentoo Rocks ("+os.uname()[0]+")",
"Thank you for using Gentoo. :)",
"Are you actually trying to read this?",
"How many times have you stared at this?",
"We are generating the cache right now",
"You are paying too much attention.",
"A theory is better than its explanation.",
"Phasers locked on target, Captain.",
"Thrashing is just virtual crashing.",
"To be is to program.",
"Real Users hate Real Programmers.",
"When all else fails, read the instructions.",
"Functionality breeds Contempt.",
"The future lies ahead.",
"3.1415926535897932384626433832795028841971694",
"Sometimes insanity is the only alternative.",
"Inaccuracy saves a world of explanation.",
]
twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
def __init__(self):
self.spinpos = 0
self.update = self.update_twirl
self.scroll_sequence = self.scroll_msgs[
int(time.time() * 100) % len(self.scroll_msgs)]
def update_basic(self):
self.spinpos = (self.spinpos + 1) % 500
if (self.spinpos % 100) == 0:
if self.spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
def update_scroll(self):
if(self.spinpos >= len(self.scroll_sequence)):
sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
else:
sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
sys.stdout.flush()
self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
def update_twirl(self):
self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
sys.stdout.flush()
def update_quiet(self):
return
def userquery(prompt, responses=None, colours=None):
"""Displays a prompt and a set of responses, then waits for a response
which is checked against the responses and the first to match is
returned. An empty response will match the first value in responses. The
input buffer is *not* cleared prior to the prompt!
prompt: a String.
responses: a List of Strings.
colours: a List of Functions taking and returning a String, used to
process the responses for display. Typically these will be functions
like red() but could be e.g. lambda x: "DisplayString".
If responses is omitted, defaults to ["Yes", "No"], [green, red].
If only colours is omitted, defaults to [bold, ...].
Returns a member of the List responses. (If called without optional
arguments, returns "Yes" or "No".)
KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
printed."""
if responses is None:
responses, colours = ["Yes", "No"], [green, red]
elif colours is None:
colours=[bold]
colours=(colours*len(responses))[:len(responses)]
print bold(prompt),
try:
while True:
response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
for key in responses:
# An empty response will match the first value in responses.
if response.upper()==key[:len(response)].upper():
return key
print "Sorry, response '%s' not understood." % response,
except (EOFError, KeyboardInterrupt):
print "Interrupted."
sys.exit(1)
actions=[
"clean", "config", "depclean",
"info", "metadata",
"prune", "regen", "search",
"sync", "system", "unmerge", "world",
]
options=[
"--ask", "--alphabetical",
"--buildpkg", "--buildpkgonly",
"--changelog", "--columns",
"--debug", "--deep",
"--digest",
"--emptytree",
"--fetchonly", "--fetch-all-uri",
"--getbinpkg", "--getbinpkgonly",
"--help", "--ignore-default-opts",
"--noconfmem",
"--newuse", "--nocolor",
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
"--onlydeps", "--pretend",
"--quiet", "--resume",
"--searchdesc", "--selective",
"--skipfirst",
"--tree",
"--update",
"--usepkg", "--usepkgonly",
"--verbose", "--version"
]
shortmapping={
"1":"--oneshot",
"a":"--ask",
"b":"--buildpkg", "B":"--buildpkgonly",
"c":"--clean", "C":"--unmerge",
"d":"--debug", "D":"--deep",
"e":"--emptytree",
"f":"--fetchonly", "F":"--fetch-all-uri",
"g":"--getbinpkg", "G":"--getbinpkgonly",
"h":"--help",
"k":"--usepkg", "K":"--usepkgonly",
"l":"--changelog",
"n":"--noreplace", "N":"--newuse",
"o":"--onlydeps", "O":"--nodeps",
"p":"--pretend", "P":"--prune",
"q":"--quiet",
"s":"--search", "S":"--searchdesc",
't':"--tree",
"u":"--update",
"v":"--verbose", "V":"--version"
}
def emergelog(xterm_titles, mystr, short_msg=None):
if xterm_titles:
if short_msg:
xtermTitle(short_msg)
else:
xtermTitle(mystr)
try:
file_path = "/var/log/emerge.log"
mylogfile = open(file_path, "a")
portage_util.apply_secpass_permissions(file_path,
uid=portage.portage_uid, gid=portage.portage_gid,
mode=0660)
mylock = None
try:
mylock = portage_locks.lockfile(mylogfile)
# seek because we may have gotten held up by the lock.
# if so, we may not be positioned at the end of the file.
mylogfile.seek(0, 2)
mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
mylogfile.flush()
finally:
if mylock:
portage_locks.unlockfile(mylock)
mylogfile.close()
except (IOError,OSError,portage_exception.PortageException), e:
if secpass >= 1:
print >> sys.stderr, "emergelog():",e
def countdown(secs=5, doing="Starting"):
if secs:
print ">>> Waiting",secs,"seconds before starting..."
print ">>> (Control-C to abort)...\n"+doing+" in: ",
ticks=range(secs)
ticks.reverse()
for sec in ticks:
sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
sys.stdout.flush()
time.sleep(1)
print
# formats a size given in bytes nicely
def format_size(mysize):
if type(mysize) not in [types.IntType,types.LongType]:
return str(mysize)
if 0 != mysize % 1024:
# Always round up to the next kB so that it doesn't show 0 kB when
# some small file still needs to be fetched.
mysize += 1024 - mysize % 1024
mystr=str(mysize/1024)
mycount=len(mystr)
while (mycount > 3):
mycount-=3
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
def getgccversion(chost):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = 'gcc -dumpversion'
gcc_ver_prefix = 'gcc-'
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n" +
"!!! to update the environment of this terminal and possibly\n" +
"!!! other terminals also.\n"
)
mystatus, myoutput = commands.getstatusoutput("eselect compiler show")
if mystatus == os.EX_OK and len(myoutput.split("/")) == 2:
part1, part2 = myoutput.split("/")
if part1.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
mystatus, myoutput = commands.getstatusoutput(
chost + "-" + gcc_ver_command)
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
def getportageversion(portdir, target_root, profile, chost, vardb):
profilever = "unavailable"
if profile:
realpath = os.path.realpath(profile)
basepath = os.path.realpath(os.path.join(portdir, "profiles"))
if realpath.startswith(basepath):
profilever = realpath[1 + len(basepath):]
else:
try:
profilever = "!" + os.readlink(profile)
except (OSError):
pass
del realpath, basepath
libcver=[]
libclist = vardb.match("virtual/libc")
libclist += vardb.match("virtual/glibc")
libclist = portage_util.unique_array(libclist)
for x in libclist:
xs=portage.catpkgsplit(x)
if libcver:
libcver+=","+"-".join(xs[1:])
else:
libcver="-".join(xs[1:])
if libcver==[]:
libcver="unavailable"
gccver = getgccversion(chost)
unameout=os.uname()[2]+" "+os.uname()[4]
return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
def create_depgraph_params(myopts, myaction):
#configure emerge engine parameters
#
# self: include _this_ package regardless of if it is merged.
# selective: exclude the package if it is merged
# recurse: go into the dependencies
# deep: go into the dependencies of already merged packages
# empty: pretend nothing is merged
myparams = ["recurse"]
add=[]
sub=[]
if "--update" in myopts or \
"--newuse" in myopts or \
"--noreplace" in myopts or \
myaction in ("system", "world"):
add.extend(["selective"])
if "--emptytree" in myopts:
add.extend(["empty"])
sub.extend(["selective"])
if "--nodeps" in myopts:
sub.extend(["recurse"])
if "--deep" in myopts:
add.extend(["deep"])
for x in add:
if (x not in myparams) and (x not in sub):
myparams.append(x)
for x in sub:
if x in myparams:
myparams.remove(x)
return myparams
# search functionality
class search:
#
# class constants
#
VERSION_SHORT=1
VERSION_RELEASE=2
#
# public interface
#
def __init__(self, settings, portdb, vartree, spinner, searchdesc,
verbose):
"""Searches the available and installed packages for the supplied search key.
The list of available and installed packages is created at object instantiation.
This makes successive searches faster."""
self.settings = settings
self.portdb = portdb
self.vartree = vartree
self.spinner = spinner
self.verbose = verbose
self.searchdesc = searchdesc
def execute(self,searchkey):
"""Performs the search for the supplied search key"""
match_category = 0
self.searchkey=searchkey
self.packagematches = []
if self.searchdesc:
self.searchdesc=1
self.matches = {"pkg":[], "desc":[]}
else:
self.searchdesc=0
self.matches = {"pkg":[]}
print "Searching... ",
regexsearch = False
if self.searchkey.startswith('%'):
regexsearch = True
self.searchkey = self.searchkey[1:]
if self.searchkey.startswith('@'):
match_category = 1
self.searchkey = self.searchkey[1:]
if regexsearch:
self.searchre=re.compile(self.searchkey,re.I)
else:
self.searchre=re.compile(re.escape(self.searchkey), re.I)
for package in self.portdb.cp_all():
self.spinner.update()
if match_category:
match_string = package[:]
else:
match_string = package.split("/")[-1]
masked=0
if self.searchre.search(match_string):
if not self.portdb.xmatch("match-visible", package):
masked=1
self.matches["pkg"].append([package,masked])
elif self.searchdesc: # DESCRIPTION searching
full_package = self.portdb.xmatch("bestmatch-visible", package)
if not full_package:
#no match found; we don't want to query description
full_package = portage.best(
self.portdb.xmatch("match-all", package))
if not full_package:
continue
else:
masked=1
try:
full_desc = self.portdb.aux_get(
full_package, ["DESCRIPTION"])[0]
except KeyError:
print "emerge: search: aux_get() failed, skipping"
continue
if self.searchre.search(full_desc):
self.matches["desc"].append([full_package,masked])
self.mlen=0
for mtype in self.matches.keys():
self.matches[mtype].sort()
self.mlen += len(self.matches[mtype])
def output(self):
"""Outputs the results of the search."""
print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
print "[ Applications found : "+white(str(self.mlen))+" ]"
print " "
for mtype in self.matches.keys():
for match,masked in self.matches[mtype]:
if mtype=="pkg":
catpack=match
full_package = self.portdb.xmatch(
"bestmatch-visible", match)
if not full_package:
#no match found; we don't want to query description
masked=1
full_package = portage.best(
self.portdb.xmatch("match-all",match))
else:
full_package = match
match = portage.pkgsplit(match)[0]
if full_package:
try:
desc, homepage, license = self.portdb.aux_get(
full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
except KeyError:
print "emerge: search: aux_get() failed, skipping"
continue
if masked:
print green("*")+" "+white(match)+" "+red("[ Masked ]")
else:
print green("*")+" "+white(match)
myversion = self.getVersion(full_package, search.VERSION_RELEASE)
mysum = [0,0]
mycat = match.split("/")[0]
mypkg = match.split("/")[1]
mycpv = match + "-" + myversion
myebuild = self.portdb.findname(mycpv)
pkgdir = os.path.dirname(myebuild)
import portage_manifest
mf = portage_manifest.Manifest(
pkgdir, self.settings["DISTDIR"])
fetchlist = self.portdb.getfetchlist(mycpv,
mysettings=self.settings, all=True)[1]
try:
mysum[0] = mf.getDistfilesSize(fetchlist)
mystr = str(mysum[0]/1024)
mycount=len(mystr)
while (mycount > 3):
mycount-=3
mystr=mystr[:mycount]+","+mystr[mycount:]
mysum[0]=mystr+" kB"
except KeyError, e:
mysum[0] = "Unknown (missing digest for %s)" % str(e)
if self.verbose:
print " ", darkgreen("Latest version available:"),myversion
print " ", self.getInstallationStatus(mycat+'/'+mypkg)
print " ", darkgreen("Size of files:"),mysum[0]
print " ", darkgreen("Homepage:")+" ",homepage
print " ", darkgreen("Description:")+" ",desc
print " ", darkgreen("License:")+" ",license
print
print
#
# private interface
#
def getInstallationStatus(self,package):
installed_package = self.vartree.dep_bestmatch(package)
result = ""
version = self.getVersion(installed_package,search.VERSION_RELEASE)
if len(version) > 0:
result = darkgreen("Latest version installed:")+" "+version
else:
result = darkgreen("Latest version installed:")+" [ Not Installed ]"
return result
def getVersion(self,full_package,detail):
if len(full_package) > 1:
package_parts = portage.catpkgsplit(full_package)
if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
result = package_parts[2]+ "-" + package_parts[3]
else:
result = package_parts[2]
else:
result = ""
return result
#build our package digraph
def getlist(settings, mode):
if mode=="system":
mylines = settings.packages
elif mode=="world":
try:
file_path = os.path.join(settings["ROOT"], portage.WORLD_FILE)
myfile = open(file_path, "r")
mylines = myfile.readlines()
myfile.close()
except (OSError, IOError), e:
if e.errno == errno.ENOENT:
portage.writemsg("\n!!! World file does not exist: '%s'\n" % file_path)
mylines=[]
else:
raise
mynewlines=[]
for x in mylines:
myline=" ".join(x.split())
if not len(myline):
continue
elif myline[0]=="#":
continue
elif mode=="system":
if myline[0]!="*":
continue
myline=myline[1:]
mynewlines.append(myline.strip())
return mynewlines
def clean_world(vardb, cpv):
"""Remove a package from the world file when unmerged."""
world_filename = os.path.join(vardb.root, portage.WORLD_FILE)
worldlist = portage_util.grabfile(world_filename)
mykey = portage.cpv_getkey(cpv)
newworldlist = []
for x in worldlist:
if portage.dep_getkey(x) == mykey:
matches = vardb.match(x, use_cache=0)
if not matches:
#zap our world entry
pass
elif len(matches) == 1 and matches[0] == cpv:
#zap our world entry
pass
else:
#others are around; keep it.
newworldlist.append(x)
else:
#this doesn't match the package we're unmerging; keep it.
newworldlist.append(x)
portage_util.ensure_dirs(os.path.join(vardb.root, portage.PRIVATE_PATH),
gid=portage.portage_gid, mode=02770)
portage_util.write_atomic(world_filename, "\n".join(newworldlist))
def genericdict(mylist):
mynewdict={}
for x in mylist:
mynewdict[portage.dep_getkey(x)]=x
return mynewdict
def filter_iuse_defaults(iuse):
for flag in iuse:
if flag.startswith("+") or flag.startswith("-"):
yield flag[1:]
else:
yield flag
class DepPriority(object):
"""
This class generates an integer priority level based of various
attributes of the dependency relationship. Attributes can be assigned
at any time and the new integer value will be generated on calls to the
__int__() method. Rich comparison operators are supported.
The boolean attributes that affect the integer value are "satisfied",
"buildtime", "runtime", and "system". Various combinations of
attributes lead to the following priority levels:
Combination of properties Priority level
not satisfied and buildtime 0
not satisfied and runtime -1
satisfied and buildtime -2
satisfied and runtime -3
(none of the above) -4
Several integer constants are defined for categorization of priority
levels:
MEDIUM The upper boundary for medium dependencies.
MEDIUM_SOFT The upper boundary for medium-soft dependencies.
SOFT The upper boundary for soft dependencies.
MIN The lower boundary for soft dependencies.
"""
__slots__ = ("__weakref__", "satisfied", "buildtime", "runtime", "runtime_post", "rebuild")
MEDIUM = -1
MEDIUM_SOFT = -2
SOFT = -3
MIN = -6
def __init__(self, **kwargs):
for myattr in self.__slots__:
if myattr == "__weakref__":
continue
myvalue = kwargs.get(myattr, False)
setattr(self, myattr, myvalue)
def __int__(self):
if not self.satisfied:
if self.buildtime:
return 0
if self.runtime:
return -1
if self.runtime_post:
return -2
if self.rebuild:
return -3
if self.buildtime:
return -4
if self.runtime:
return -5
if self.runtime_post:
return -6
return -6
def __lt__(self, other):
return self.__int__() < other
def __le__(self, other):
return self.__int__() <= other
def __eq__(self, other):
return self.__int__() == other
def __ne__(self, other):
return self.__int__() != other
def __gt__(self, other):
return self.__int__() > other
def __ge__(self, other):
return self.__int__() >= other
def copy(self):
import copy
return copy.copy(self)
def __str__(self):
myvalue = self.__int__()
if myvalue > self.MEDIUM:
return "hard"
if myvalue > self.MEDIUM_SOFT:
return "medium"
if myvalue > self.SOFT:
return "medium-soft"
return "soft"
class FakeVartree(portage.vartree):
"""This is implements an in-memory copy of a vartree instance that provides
all the interfaces required for use by the depgraph. The vardb is locked
during the constructor call just long enough to read a copy of the
installed package information. This allows the depgraph to do it's
dependency calculations without holding a lock on the vardb. It also
allows things like vardb global updates to be done in memory so that the
user doesn't necessarily need write access to the vardb in cases where
global updates are necessary (updates are performed when necessary if there
is not a matching ebuild in the tree)."""
def __init__(self, real_vartree, portdb):
self.root = real_vartree.root
self.settings = real_vartree.settings
self.dbapi = portage.fakedbapi(settings=real_vartree.settings)
vdb_path = os.path.join(self.root, portage.VDB_PATH)
try:
# At least the parent needs to exist for the lock file.
portage_util.ensure_dirs(vdb_path)
except portage_exception.PortageException:
pass
vdb_lock = None
try:
if os.access(vdb_path, os.W_OK):
vdb_lock = portage_locks.lockdir(vdb_path)
mykeys = ["SLOT", "COUNTER", "PROVIDE", "USE", "IUSE",
"DEPEND", "RDEPEND", "PDEPEND"]
real_dbapi = real_vartree.dbapi
slot_counters = {}
for cpv in real_dbapi.cpv_all():
metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
myslot = metadata["SLOT"]
mycp = portage.dep_getkey(cpv)
myslot_atom = "%s:%s" % (mycp, myslot)
try:
mycounter = long(metadata["COUNTER"])
except ValueError:
mycounter = 0
metadata["COUNTER"] = str(mycounter)
other_counter = slot_counters.get(myslot_atom, None)
if other_counter is not None:
if other_counter > mycounter:
continue
slot_counters[myslot_atom] = mycounter
self.dbapi.cpv_inject(cpv, metadata=metadata)
real_dbapi.flush_cache()
finally:
if vdb_lock:
portage_locks.unlockdir(vdb_lock)
# Populate the old-style virtuals using the cached values.
if not self.settings.treeVirtuals:
self.settings.treeVirtuals = portage_util.map_dictlist_vals(
portage.getCPFromCPV, self.get_all_provides())
# Intialize variables needed for lazy cache pulls of the live ebuild
# metadata. This ensures that the vardb lock is released ASAP, without
# being delayed in case cache generation is triggered.
self._aux_get = self.dbapi.aux_get
self.dbapi.aux_get = self._aux_get_wrapper
self._aux_get_history = set()
self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
self._portdb = portdb
self._global_updates = None
def _aux_get_wrapper(self, pkg, wants):
if pkg in self._aux_get_history:
return self._aux_get(pkg, wants)
self._aux_get_history.add(pkg)
try:
# Use the live ebuild metadata if possible.
live_metadata = dict(izip(self._portdb_keys,
self._portdb.aux_get(pkg, self._portdb_keys)))
self.dbapi.aux_update(pkg, live_metadata)
except (KeyError, portage_exception.PortageException):
if self._global_updates is None:
self._global_updates = \
grab_global_updates(self._portdb.porttree_root)
perform_global_updates(
pkg, self.dbapi, self._global_updates)
return self._aux_get(pkg, wants)
def grab_global_updates(portdir):
from portage_update import grab_updates, parse_updates
updpath = os.path.join(portdir, "profiles", "updates")
try:
rawupdates = grab_updates(updpath)
except portage_exception.DirectoryNotFound:
rawupdates = []
upd_commands = []
for mykey, mystat, mycontent in rawupdates:
commands, errors = parse_updates(mycontent)
upd_commands.extend(commands)
return upd_commands
def perform_global_updates(mycpv, mydb, mycommands):
from portage_update import update_dbentries
aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
updates = update_dbentries(mycommands, aux_dict)
if updates:
mydb.aux_update(mycpv, updates)
class BlockerCache(DictMixin):
"""This caches blockers of installed packages so that dep_check does not
have to be done for every single installed package on every invocation of
emerge. The cache is invalidated whenever it is detected that something
has changed that might alter the results of dep_check() calls:
1) the set of installed packages (including COUNTER) has changed
2) the old-style virtuals have changed
"""
class BlockerData(object):
def __init__(self, counter, atoms):
self.counter = counter
self.atoms = atoms
def __init__(self, myroot, vardb):
self._vardb = vardb
self._installed_pkgs = set(vardb.cpv_all())
self._virtuals = vardb.settings.getvirtuals()
self._cache_filename = os.path.join(myroot,
portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
self._cache_version = "1"
self._cache_data = None
self._modified = False
self._load()
def _load(self):
try:
f = open(self._cache_filename)
mypickle = cPickle.Unpickler(f)
mypickle.find_global = None
self._cache_data = mypickle.load()
f.close()
del f
except (IOError, OSError, EOFError, cPickle.UnpicklingError):
pass
cache_valid = self._cache_data and \
isinstance(self._cache_data, dict) and \
self._cache_data.get("version") == self._cache_version and \
self._cache_data.get("virtuals") == self._virtuals and \
set(self._cache_data.get("blockers", [])) == self._installed_pkgs
if cache_valid:
for pkg in self._installed_pkgs:
if long(self._vardb.aux_get(pkg, ["COUNTER"])[0]) != \
self[pkg].counter:
cache_valid = False
break
if not cache_valid:
self._cache_data = {"version":self._cache_version}
self._cache_data["blockers"] = {}
self._cache_data["virtuals"] = self._virtuals
self._modified = False
def flush(self):
"""If the current user has permission and the internal blocker cache
been updated, save it to disk and mark it unmodified. This is called
by emerge after it has proccessed blockers for all installed packages.
Currently, the cache is only written if the user has superuser
privileges (since that's required to obtain a lock), but all users
have read access and benefit from faster blocker lookups (as long as
the entire cache is still valid). The cache is stored as a pickled
dict object with the following format:
{
version : "1",
"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
"virtuals" : vardb.settings.getvirtuals()
}
"""
if self._modified and \
secpass >= 2:
try:
f = portage_util.atomic_ofstream(self._cache_filename)
cPickle.dump(self._cache_data, f, -1)
f.close()
portage_util.apply_secpass_permissions(
self._cache_filename, gid=portage.portage_gid, mode=0644)
except (IOError, OSError), e:
pass
self._modified = False
def __setitem__(self, cpv, blocker_data):
"""
Update the cache and mark it as modified for a future call to
self.flush().
@param cpv: Package for which to cache blockers.
@type cpv: String
@param blocker_data: An object with counter and atoms attributes.
@type blocker_data: BlockerData
"""
self._cache_data["blockers"][cpv] = \
(blocker_data.counter, blocker_data.atoms)
self._modified = True
def __getitem__(self, cpv):
"""
@rtype: BlockerData
@returns: An object with counter and atoms attributes.
"""
return self.BlockerData(*self._cache_data["blockers"][cpv])
def keys(self):
"""This needs to be implemented so that self.__repr__() doesn't raise
an AttributeError."""
if self._cache_data and "blockers" in self._cache_data:
return self._cache_data["blockers"].keys()
return []
def show_invalid_depstring_notice(parent_node, depstring, error_msg):
from formatter import AbstractFormatter, DumbWriter
f = AbstractFormatter(DumbWriter(maxcol=72))
print "\n\n!!! Invalid or corrupt dependency specification: "
print
print error_msg
print
print parent_node
print
print depstring
print
p_type, p_root, p_key, p_status = parent_node
msg = []
if p_status == "nomerge":
category, pf = portage.catsplit(p_key)
pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
msg.append("Portage is unable to process the dependencies of the ")
msg.append("'%s' package. " % p_key)
msg.append("In order to correct this problem, the package ")
msg.append("should be uninstalled, reinstalled, or upgraded. ")
msg.append("As a temporary workaround, the --nodeps option can ")
msg.append("be used to ignore all dependencies. For reference, ")
msg.append("the problematic dependencies can be found in the ")
msg.append("*DEPEND files located in '%s/'." % pkg_location)
else:
msg.append("This package can not be installed. ")
msg.append("Please notify the '%s' package maintainer " % p_key)
msg.append("about this problem.")
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
class depgraph:
pkg_tree_map = {
"ebuild":"porttree",
"binary":"bintree",
"installed":"vartree"}
def __init__(self, settings, trees, myopts, myparams, spinner):
self.settings = settings
self.target_root = settings["ROOT"]
self.myopts = myopts
self.myparams = myparams
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.pkgsettings = {}
# Maps cpv to digraph node for all nodes added to the graph.
self.pkg_node_map = {}
# Maps slot atom to digraph node for all nodes added to the graph.
self._slot_node_map = {}
self.mydbapi = {}
self._mydbapi_keys = ["SLOT", "DEPEND", "RDEPEND", "PDEPEND"]
self.useFlags = {}
self.trees = {}
for myroot in trees:
self.trees[myroot] = {}
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["vartree"],
trees[myroot]["porttree"].dbapi)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
self.pkg_node_map[myroot] = {}
self._slot_node_map[myroot] = {}
vardb = self.trees[myroot]["vartree"].dbapi
# This fakedbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = portage.fakedbapi(settings=self.pkgsettings[myroot])
self.mydbapi[myroot] = fakedb
if "--nodeps" not in self.myopts and \
"--buildpkgonly" not in self.myopts:
# --nodeps bypasses this, since it isn't needed in this case
# and the cache pulls might trigger (slow) cache generation.
for pkg in vardb.cpv_all():
self.spinner.update()
fakedb.cpv_inject(pkg,
metadata=dict(izip(self._mydbapi_keys,
vardb.aux_get(pkg, self._mydbapi_keys))))
del vardb, fakedb
self.useFlags[myroot] = {}
if "--usepkg" in self.myopts:
self.trees[myroot]["bintree"].populate(
"--getbinpkg" in self.myopts,
"--getbinpkgonly" in self.myopts)
del trees
self.missingbins=[]
self.digraph=portage.digraph()
# Tracks simple parent/child relationships (PDEPEND relationships are
# not reversed).
self._parent_child_digraph = digraph()
self.orderedkeys=[]
self.outdatedpackages=[]
self.args_keys = []
self.blocker_digraph = digraph()
self.blocker_parents = {}
self._unresolved_blocker_parents = {}
self._slot_collision_info = []
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self._slot_collision_nodes = set()
self._altlist_cache = {}
self._pprovided_args = []
def _show_slot_collision_notice(self, packages):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases."""
msg = []
msg.append("\n!!! Multiple versions within a single " + \
"package slot have been \n")
msg.append("!!! pulled into the dependency graph:\n\n")
for node, parents in packages:
msg.append(str(node))
if parents:
msg.append(" pulled in by\n")
for parent in parents:
msg.append(" ")
msg.append(str(parent))
msg.append("\n")
else:
msg.append(" (no parents)\n")
msg.append("\n")
sys.stderr.write("".join(msg))
sys.stderr.flush()
if "--quiet" in self.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
from formatter import AbstractFormatter, DumbWriter
f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
f.writer.flush()
def create(self, mybigkey, myparent=None, addme=1, myuse=None,
priority=DepPriority(), rev_dep=False, arg=None):
"""
Fills the digraph with nodes comprised of packages to merge.
mybigkey is the package spec of the package to merge.
myparent is the package depending on mybigkey ( or None )
addme = Should we add this package to the digraph or are we just looking at it's deps?
Think --onlydeps, we need to ignore packages in that case.
#stuff to add:
#SLOT-aware emerge
#IUSE-aware emerge -> USE DEP aware depgraph
#"no downgrade" emerge
"""
# unused parameters
rev_dep = False
mytype, myroot, mykey = mybigkey
if mytype == "blocks":
if myparent and \
"--buildpkgonly" not in self.myopts and \
"--nodeps" not in self.myopts and \
myparent not in self._slot_collision_nodes:
mybigkey[1] = myparent[1]
self.blocker_parents.setdefault(
tuple(mybigkey), set()).add(myparent)
return 1
# select the correct /var database that we'll be checking against
vardbapi = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
pkgsettings = self.pkgsettings[myroot]
# if the package is already on the system, we add a "nomerge"
# directive, otherwise we add a "merge" directive.
mydbapi = self.trees[myroot][self.pkg_tree_map[mytype]].dbapi
if not arg and myroot == self.target_root:
cpv_slot = "%s:%s" % (mykey, mydbapi.aux_get(mykey, ["SLOT"])[0])
arg = portage.best_match_to_list(cpv_slot, self.args_keys)
if myuse is None:
self.pkgsettings[myroot].setcpv(mykey, mydb=portdb)
myuse = self.pkgsettings[myroot]["USE"].split()
if "--nodeps" not in self.myopts:
self.spinner.update()
merging=1
if mytype == "installed":
merging = 0
if addme and mytype != "installed":
# this is where we add the node to the list of packages to merge
if "selective" in self.myparams or not arg:
if "empty" not in self.myparams and vardbapi.cpv_exists(mykey):
merging=0
""" If we aren't merging, perform the --newuse check.
If the package has new iuse flags or different use flags then if
--newuse is specified, we need to merge the package. """
if merging==0 and "--newuse" in self.myopts and \
vardbapi.cpv_exists(mykey):
pkgsettings.setcpv(mykey, mydb=mydbapi)
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
old_use = vardbapi.aux_get(mykey, ["USE"])[0].split()
iuses = set(filter_iuse_defaults(
mydbapi.aux_get(mykey, ["IUSE"])[0].split()))
old_iuse = set(filter_iuse_defaults(
vardbapi.aux_get(mykey, ["IUSE"])[0].split()))
if iuses.symmetric_difference(
old_iuse).difference(forced_flags):
merging = 1
elif old_iuse.intersection(old_use) != \
iuses.intersection(myuse):
merging=1
if addme and merging == 1:
mybigkey.append("merge")
else:
mybigkey.append("nomerge")
jbigkey = tuple(mybigkey)
if addme:
metadata = dict(izip(self._mydbapi_keys,
mydbapi.aux_get(mykey, self._mydbapi_keys)))
if merging == 0 and vardbapi.cpv_exists(mykey) and \
mytype != "installed":
mybigkey[0] = "installed"
mydbapi = vardbapi
jbigkey = tuple(mybigkey)
metadata = dict(izip(self._mydbapi_keys,
mydbapi.aux_get(mykey, self._mydbapi_keys)))
myuse = mydbapi.aux_get(mykey, ["USE"])[0].split()
slot_atom = "%s:%s" % (portage.dep_getkey(mykey), metadata["SLOT"])
if merging and \
"empty" not in self.myparams and \
vardbapi.match(slot_atom):
# Increase the priority of dependencies on packages that
# are being rebuilt. This optimizes merge order so that
# dependencies are rebuilt/updated as soon as possible,
# which is needed especially when emerge is called by
# revdep-rebuild since dependencies may be affected by ABI
# breakage that has rendered them useless. Don't adjust
# priority here when in "empty" mode since all packages
# are being merged in that case.
priority.rebuild = True
existing_node = self._slot_node_map[myroot].get(
slot_atom, None)
slot_collision = False
if existing_node:
e_type, myroot, e_cpv, e_status = existing_node
if mykey == e_cpv:
# The existing node can be reused.
if existing_node != myparent:
# Refuse to make a node depend on itself so that
# we don't create a bogus circular dependency
# in self.altlist().
self._parent_child_digraph.add(existing_node, myparent)
self.digraph.addnode(existing_node, myparent,
priority=priority)
return 1
else:
if jbigkey in self._slot_collision_nodes:
return 1
# A slot collision has occurred. Sometimes this coincides
# with unresolvable blockers, so the slot collision will be
# shown later if there are no unresolvable blockers.
e_parents = self._parent_child_digraph.parent_nodes(
existing_node)
myparents = []
if myparent:
myparents.append(myparent)
self._slot_collision_info.append(
((jbigkey, myparents), (existing_node, e_parents)))
self._slot_collision_nodes.add(jbigkey)
slot_collision = True
if slot_collision:
# Now add this node to the graph so that self.display()
# can show use flags and --tree output. This node is
# only being partially added to the graph. It must not be
# allowed to interfere with the other nodes that have been
# added. Do not overwrite data for existing nodes in
# self.pkg_node_map and self.mydbapi since that data will
# be used for blocker validation.
self.pkg_node_map[myroot].setdefault(mykey, jbigkey)
self.useFlags[myroot].setdefault(mykey, myuse)
# Even though the graph is now invalid, continue to process
# dependencies so that things like --fetchonly can still
# function despite collisions.
else:
self.mydbapi[myroot].cpv_inject(mykey, metadata=metadata)
self._slot_node_map[myroot][slot_atom] = jbigkey
self.pkg_node_map[myroot][mykey] = jbigkey
self.useFlags[myroot][mykey] = myuse
if rev_dep and myparent:
self.digraph.addnode(myparent, jbigkey,
priority=priority)
else:
self.digraph.addnode(jbigkey, myparent,
priority=priority)
# Do this even when addme is False (--onlydeps) so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
self._parent_child_digraph.add(jbigkey, myparent)
""" This section determines whether we go deeper into dependencies or not.
We want to go deeper on a few occasions:
Installing package A, we need to make sure package A's deps are met.
emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
"""
if "deep" not in self.myparams and not merging and \
not ("--update" in self.myopts and arg and merging):
return 1
elif "recurse" not in self.myparams:
return 1
""" Check DEPEND/RDEPEND/PDEPEND/SLOT
Pull from bintree if it's binary package, porttree if it's ebuild.
Binpkg's can be either remote or local. """
edepend={}
depkeys = ["DEPEND","RDEPEND","PDEPEND"]
depvalues = mydbapi.aux_get(mykey, depkeys)
for i in xrange(len(depkeys)):
edepend[depkeys[i]] = depvalues[i]
if mytype == "ebuild":
if "--buildpkgonly" in self.myopts:
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
if not (arg and "--onlydeps" in self.myopts and \
mytype == "ebuild") and \
self.myopts.get("--with-bdeps", "n") == "n" and \
(mytype == "binary" or mybigkey[3] == "nomerge"):
edepend["DEPEND"] = ""
""" We have retrieve the dependency information, now we need to recursively
process them. DEPEND gets processed for root = "/", {R,P}DEPEND in myroot. """
mp = tuple(mybigkey)
try:
if not self.select_dep("/", edepend["DEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(buildtime=True),
parent_arg=arg):
return 0
"""RDEPEND is soft by definition. However, in order to ensure
correct merge order, we make it a hard dependency. Otherwise, a
build time dependency might not be usable due to it's run time
dependencies not being installed yet.
"""
if not self.select_dep(myroot,edepend["RDEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(runtime=True),
parent_arg=arg):
return 0
if edepend.has_key("PDEPEND") and edepend["PDEPEND"]:
# Post Depend -- Add to the list without a parent, as it depends
# on a package being present AND must be built after that package.
if not self.select_dep(myroot, edepend["PDEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(runtime_post=True),
parent_arg=arg):
return 0
except ValueError, e:
pkgs = e.args[0]
portage.writemsg("\n\n!!! An atom in the dependencies " + \
"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
for cpv in pkgs:
portage.writemsg(" %s\n" % cpv, noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
if mytype == "binary":
portage.writemsg(
"!!! This binary package cannot be installed: '%s'\n" % \
mykey, noiselevel=-1)
elif mytype == "ebuild":
myebuild, mylocation = portdb.findname2(mykey)
portage.writemsg("!!! This ebuild cannot be installed: " + \
"'%s'\n" % myebuild, noiselevel=-1)
portage.writemsg("!!! Please notify the package maintainer " + \
"that atoms must be fully-qualified.\n", noiselevel=-1)
return 0
return 1
def select_files(self,myfiles):
"given a list of .tbz2s, .ebuilds and deps, create the appropriate depgraph and return a favorite list"
myfavorites=[]
myroot = self.target_root
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
pkgsettings = self.pkgsettings[myroot]
arg_atoms = []
def visible(mylist):
matches = portdb.gvisible(portdb.visible(mylist))
return [x for x in mylist \
if x in matches or not portdb.cpv_exists(x)]
for x in myfiles:
ext = os.path.splitext(x)[1]
if ext==".tbz2":
if not os.path.exists(x):
if os.path.exists(
os.path.join(pkgsettings["PKGDIR"], "All", x)):
x = os.path.join(pkgsettings["PKGDIR"], "All", x)
elif os.path.exists(
os.path.join(pkgsettings["PKGDIR"], x)):
x = os.path.join(pkgsettings["PKGDIR"], x)
else:
print "\n\n!!! Binary package '"+str(x)+"' does not exist."
print "!!! Please ensure the tbz2 exists as specified.\n"
sys.exit(1)
mytbz2=xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
sys.exit(1)
if not self.create(["binary", myroot, mykey],
None, "--onlydeps" not in self.myopts):
return (0,myfavorites)
elif not "--oneshot" in self.myopts:
myfavorites.append(mykey)
elif ext==".ebuild":
x = os.path.realpath(x)
mykey=os.path.basename(os.path.normpath(x+"/../.."))+"/"+os.path.splitext(os.path.basename(x))[0]
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if os.path.realpath(ebuild_path) != x:
print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
sys.exit(1)
if mykey not in portdb.xmatch(
"match-visible", portage.dep_getkey(mykey)):
print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
print colorize("BAD", "*** page for details.")
countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
raise portage_exception.PackageNotFound(
"%s is not in a valid portage tree hierarchy or does not exist" % x)
if not self.create(["ebuild", myroot, mykey],
None, "--onlydeps" not in self.myopts):
return (0,myfavorites)
elif not "--oneshot" in self.myopts:
myfavorites.append(mykey)
else:
if not is_valid_package_atom(x):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
return (0,[])
try:
mykey = None
if "--usepkg" in self.myopts:
mykey = portage.dep_expand(x, mydb=bindb,
settings=pkgsettings)
if (mykey and not mykey.startswith("null/")) or \
"--usepkgonly" in self.myopts:
arg_atoms.append((x, mykey))
continue
mykey = portage.dep_expand(x,
mydb=portdb, settings=pkgsettings)
arg_atoms.append((x, mykey))
except ValueError, errpkgs:
print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
print "!!! one of the following fully-qualified ebuild names instead:\n"
for i in errpkgs[0]:
print " " + green(i)
print
sys.exit(1)
if "--update" in self.myopts:
"""Make sure all installed slots are updated when possible. Do this
with --emptytree also, to ensure that all slots are remerged."""
vardb = self.trees[self.target_root]["vartree"].dbapi
greedy_atoms = []
for myarg, myatom in arg_atoms:
greedy_atoms.append((myarg, myatom))
myslots = set()
for cpv in vardb.match(myatom):
myslots.add(vardb.aux_get(cpv, ["SLOT"])[0])
if myslots:
best_pkgs = []
if "--usepkg" in self.myopts:
mymatches = bindb.match(myatom)
if "--usepkgonly" not in self.myopts:
mymatches = visible(mymatches)
best_pkg = portage.best(mymatches)
if best_pkg:
best_slot = bindb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("binary", best_pkg, best_slot))
if "--usepkgonly" not in self.myopts:
best_pkg = portage.best(portdb.match(myatom))
if best_pkg:
best_slot = portdb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("ebuild", best_pkg, best_slot))
if best_pkgs:
best_pkg = portage.best([x[1] for x in best_pkgs])
best_pkgs = [x for x in best_pkgs if x[1] == best_pkg]
best_slot = best_pkgs[0][2]
myslots.add(best_slot)
if len(myslots) > 1:
for myslot in myslots:
myslot_atom = "%s:%s" % \
(portage.dep_getkey(myatom), myslot)
available = False
if "--usepkgonly" not in self.myopts and \
self.trees[self.target_root][
"porttree"].dbapi.match(myslot_atom):
available = True
elif "--usepkg" in self.myopts:
mymatches = bindb.match(myslot_atom)
if "--usepkgonly" not in self.myopts:
mymatches = visible(mymatches)
if mymatches:
available = True
if available:
greedy_atoms.append((myarg, myslot_atom))
arg_atoms = greedy_atoms
""" These are used inside self.create() in order to ensure packages
that happen to match arguments are not incorrectly marked as nomerge."""
self.args_keys = [x[1] for x in arg_atoms]
for myarg, myatom in arg_atoms:
try:
self.mysd = self.select_dep(myroot, myatom, arg=myarg)
except portage_exception.MissingSignature, e:
portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
sys.exit(1)
except portage_exception.InvalidSignature, e:
portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
sys.exit(1)
except SystemExit, e:
raise # Needed else can't exit
except Exception, e:
print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % mykey
print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
raise
if not self.mysd:
return (0,myfavorites)
elif not "--oneshot" in self.myopts:
mykey = portage.dep_getkey(myatom)
if mykey not in myfavorites:
myfavorites.append(mykey)
missing=0
if "--usepkgonly" in self.myopts:
for xs in self.digraph.all_nodes():
if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
if missing == 0:
print
missing += 1
print "Missing binary for:",xs[2]
if not self.validate_blockers():
return False, myfavorites
# We're true here unless we are missing binaries.
return (not missing,myfavorites)
def select_dep(self, myroot, depstring, myparent=None, arg=None,
myuse=None, raise_on_missing=False, priority=DepPriority(),
rev_deps=False, parent_arg=None):
""" Given a depstring, create the depgraph such that all dependencies are satisfied.
myroot = $ROOT from environment, where {R,P}DEPENDs are merged to.
myparent = the node whose depstring is being passed in
arg = package was specified on the command line, merge even if it's already installed
myuse = USE flags at present
raise_on_missing = Given that the depgraph is not proper, raise an exception if true
else continue trying.
return 1 on success, 0 for failure
"""
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
vardb = self.trees[myroot]["vartree"].dbapi
pkgsettings = self.pkgsettings[myroot]
if myparent:
p_type, p_root, p_key, p_status = myparent
if "--debug" in self.myopts:
print
print "Parent: ",myparent
print "Depstring:",depstring
if rev_deps:
print "Reverse:", rev_deps
print "Priority:", priority
#processing dependencies
""" Call portage.dep_check to evaluate the use? conditionals and make sure all
dependencies are satisfiable. """
if arg:
mymerge = [depstring]
pprovided = pkgsettings.pprovideddict.get(
portage.dep_getkey(depstring))
if pprovided and portage.match_from_list(depstring, pprovided):
mymerge = []
else:
try:
if myparent and p_status == "nomerge":
portage_dep._dep_check_strict = False
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
use_binaries=("--usepkgonly" in self.myopts),
myroot=myroot, trees=self.trees)
finally:
portage_dep._dep_check_strict = True
if not mycheck[0]:
if myparent:
show_invalid_depstring_notice(
myparent, depstring, mycheck[1])
else:
sys.stderr.write("\n%s\n%s\n" % (depstring, mycheck[1]))
return 0
mymerge = mycheck[1]
if not mymerge and arg and \
portage.best_match_to_list(depstring, self.args_keys):
# A provided package has been specified on the command line. The
# package will not be merged and a warning will be displayed.
self._pprovided_args.append(arg)
if myparent:
# The parent is added after it's own dep_check call so that it
# isn't allowed to satisfy a direct bootstrap dependency on itself
# via an old-style virtual. This isn't a problem with new-style
# virtuals, which are preferenced in dep_zapdeps by looking only at
# the depstring, vdb, and available packages.
p_type, p_root, p_key, p_status = myparent
if p_status == "merge":
# Update old-style virtuals if this package provides any.
# These are needed for dep_virtual calls inside dep_check.
p_db = self.trees[p_root][self.pkg_tree_map[p_type]].dbapi
try:
self.pkgsettings[p_root].setinst(p_key, p_db)
except portage_exception.InvalidDependString, e:
provide = p_db.aux_get(p_key, ["PROVIDE"])[0]
show_invalid_depstring_notice(myparent, provide, str(e))
del e
return 0
if "--debug" in self.myopts:
print "Candidates:",mymerge
for x in mymerge:
selected_pkg = None
if x[0]=="!":
selected_pkg = ["blocks", myroot, x[1:], None]
else:
#We are not processing a blocker but a normal dependency
if myparent:
"""In some cases, dep_check will return deps that shouldn't
be proccessed any further, so they are identified and
discarded here."""
if "empty" not in self.myparams and \
"deep" not in self.myparams and \
not ("--update" in self.myopts and parent_arg) and \
vardb.match(x):
continue
# List of acceptable packages, ordered by type preference.
matched_packages = []
myeb_matches = portdb.xmatch("match-visible", x)
myeb = None
if "--usepkgonly" not in self.myopts:
myeb = portage.best(myeb_matches)
myeb_pkg=None
if "--usepkg" in self.myopts:
# The next line assumes the binarytree has been populated.
# XXX: Need to work out how we use the binary tree with roots.
myeb_pkg_matches = bindb.match(x)
if "--usepkgonly" not in self.myopts:
# Remove any binary package entries that are masked in the portage tree (#55871)
myeb_pkg_matches = [pkg for pkg in myeb_pkg_matches \
if pkg in myeb_matches or \
not portdb.cpv_exists(pkg)]
if myeb_pkg_matches:
myeb_pkg = portage.best(myeb_pkg_matches)
if myeb_pkg and "--newuse" in self.myopts:
iuses = set(filter_iuse_defaults(
bindb.aux_get(myeb_pkg, ["IUSE"])[0].split()))
old_use = bindb.aux_get(myeb_pkg, ["USE"])[0].split()
mydb = None
if "--usepkgonly" not in self.myopts and myeb:
mydb = portdb
if myeb:
pkgsettings.setcpv(myeb, mydb=mydb)
else:
pkgsettings.setcpv(myeb_pkg, mydb=mydb)
now_use = pkgsettings["USE"].split()
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
cur_iuse = iuses
if "--usepkgonly" not in self.myopts and myeb:
cur_iuse = set(filter_iuse_defaults(
portdb.aux_get(myeb, ["IUSE"])[0].split()))
if iuses.symmetric_difference(
cur_iuse).difference(forced_flags):
myeb_pkg = None
elif iuses.intersection(old_use) != \
cur_iuse.intersection(now_use):
myeb_pkg = None
if myeb_pkg:
binpkguseflags = \
self.trees[myroot]["bintree"].dbapi.aux_get(
myeb_pkg, ["USE"])[0].split()
matched_packages.append(
["binary", myroot, myeb_pkg, binpkguseflags])
if "--usepkgonly" not in self.myopts and myeb_matches:
matched_packages.append(
["ebuild", myroot, myeb, None])
if not matched_packages and \
not (arg and "selective" not in self.myparams):
"""Fall back to the installed package database. This is a
last resort because the metadata tends to diverge from that
of the ebuild in the tree."""
myeb_inst_matches = vardb.match(x)
if "--usepkgonly" not in self.myopts:
""" TODO: Improve masking check for installed and
binary packages. bug #149816"""
myeb_inst_matches = [pkg for pkg in myeb_inst_matches \
if not portdb.cpv_exists(pkg)]
myeb_inst = None
if myeb_inst_matches:
myeb_inst = portage.best(myeb_inst_matches)
if myeb_inst:
binpkguseflags = vardb.aux_get(
myeb_inst, ["USE"])[0].split()
matched_packages.append(
["installed", myroot, myeb_inst, binpkguseflags])
if not matched_packages:
if raise_on_missing:
raise ValueError
if not arg:
xinfo='"'+x+'"'
else:
xinfo='"'+arg+'"'
if myparent:
xfrom = '(dependency required by '+ \
green('"%s"' % myparent[2]) + \
red(' [%s]' % myparent[0]) + ')'
alleb = portdb.xmatch("match-all", x)
if alleb:
if "--usepkgonly" not in self.myopts:
print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
print "!!! One of the following masked packages is required to complete your request:"
oldcomment = ""
for p in alleb:
mreasons = portage.getmaskingstatus(p,
settings=pkgsettings, portdb=portdb)
print "- "+p+" (masked by: "+", ".join(mreasons)+")"
comment = portage.getmaskingreason(p,
settings=pkgsettings, portdb=portdb)
if comment and comment != oldcomment:
print comment
oldcomment = comment
print
print "For more information, see MASKED PACKAGES section in the emerge man page or "
print "refer to the Gentoo Handbook."
else:
print "\n!!! "+red("There are no packages available to satisfy: ")+green(xinfo)
print "!!! Either add a suitable binary package or compile from an ebuild."
else:
print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
if myparent:
print xfrom
print
return 0
if "--debug" in self.myopts:
for pkg in matched_packages:
print (pkg[0] + ":").rjust(10), pkg[2]
if len(matched_packages) > 1:
bestmatch = portage.best(
[pkg[2] for pkg in matched_packages])
matched_packages = [pkg for pkg in matched_packages \
if pkg[2] == bestmatch]
# ordered by type preference ("ebuild" type is the last resort)
selected_pkg = matched_packages[0]
pkgtype, myroot, mycpv, myuse = selected_pkg
mydbapi = self.trees[myroot][self.pkg_tree_map[pkgtype]].dbapi
slot_atom = "%s:%s" % (portage.dep_getkey(mycpv),
mydbapi.aux_get(mycpv, ["SLOT"])[0])
existing_node = self._slot_node_map[myroot].get(
slot_atom, None)
if existing_node:
e_type, myroot, e_cpv, e_status = existing_node
if portage.match_from_list(x, [e_cpv]):
# The existing node can be reused.
selected_pkg = [e_type, myroot, e_cpv,
self.useFlags[myroot][e_cpv]]
if myparent:
#we are a dependency, so we want to be unconditionally added
mypriority = priority.copy()
if vardb.match(x):
mypriority.satisfied = True
if not self.create(selected_pkg[0:3], myparent,
myuse=selected_pkg[-1], priority=mypriority,
rev_dep=rev_deps, arg=arg):
return 0
else:
#if mysource is not set, then we are a command-line dependency and should not be added
#if --onlydeps is specified.
if not self.create(selected_pkg[0:3], myparent,
addme=("--onlydeps" not in self.myopts),
myuse=selected_pkg[-1], rev_dep=rev_deps, arg=arg):
return 0
if "--debug" in self.myopts:
print "Exiting...",myparent
return 1
def validate_blockers(self):
"""Remove any blockers from the digraph that do not match any of the
packages within the graph. If necessary, create hard deps to ensure
correct merge order such that mutually blocking packages are never
installed simultaneously."""
if "--buildpkgonly" in self.myopts or \
"--nodeps" in self.myopts:
return True
modified_slots = {}
for myroot in self.trees:
myslots = {}
modified_slots[myroot] = myslots
final_db = self.mydbapi[myroot]
slot_node_map = self._slot_node_map[myroot]
for slot_atom, mynode in slot_node_map.iteritems():
mytype, myroot, mycpv, mystatus = mynode
if mystatus == "merge":
myslots[slot_atom] = mycpv
#if "deep" in self.myparams:
if True:
# Pull in blockers from all installed packages that haven't already
# been pulled into the depgraph. This is not enabled by default
# due to the performance penalty that is incurred by all the
# additional dep_check calls that are required.
# Optimization hack for dep_check calls that minimizes the
# available matches by replacing the portdb with a fakedbapi
# instance.
class FakePortageTree(object):
def __init__(self, mydb):
self.dbapi = mydb
dep_check_trees = {}
for myroot in self.trees:
dep_check_trees[myroot] = self.trees[myroot].copy()
dep_check_trees[myroot]["porttree"] = \
FakePortageTree(self.mydbapi[myroot])
dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
for myroot in self.trees:
pkg_node_map = self.pkg_node_map[myroot]
vardb = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
pkgsettings = self.pkgsettings[myroot]
final_db = self.mydbapi[myroot]
cpv_all_installed = self.trees[myroot]["vartree"].dbapi.cpv_all()
blocker_cache = BlockerCache(myroot, vardb)
for pkg in cpv_all_installed:
blocker_atoms = None
matching_node = pkg_node_map.get(pkg, None)
if matching_node and \
matching_node[3] == "nomerge":
continue
# If this node has any blockers, create a "nomerge"
# node for it so that they can be enforced.
self.spinner.update()
blocker_data = blocker_cache.get(pkg)
if blocker_data:
blocker_atoms = blocker_data.atoms
else:
dep_vals = vardb.aux_get(pkg, dep_keys)
myuse = vardb.aux_get(pkg, ["USE"])[0].split()
depstr = " ".join(dep_vals)
# It is crucial to pass in final_db here in order to
# optimize dep_check calls by eliminating atoms via
# dep_wordreduce and dep_eval calls.
try:
portage_dep._dep_check_strict = False
try:
success, atoms = portage.dep_check(depstr,
final_db, pkgsettings, myuse=myuse,
trees=dep_check_trees, myroot=myroot)
except Exception, e:
if isinstance(e, SystemExit):
raise
# This is helpful, for example, if a ValueError
# is thrown from cpv_expand due to multiple
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
("installed", myroot, pkg, "nomerge"),
depstr, str(e))
del e
raise
finally:
portage_dep._dep_check_strict = True
if not success:
slot_atom = "%s:%s" % (portage.dep_getkey(pkg),
vardb.aux_get(pkg, ["SLOT"])[0])
if slot_atom in modified_slots[myroot]:
# This package is being replaced anyway, so
# ignore invalid dependencies so as not to
# annoy the user too much (otherwise they'd be
# forced to manually unmerge it first).
continue
show_invalid_depstring_notice(
("installed", myroot, pkg, "nomerge"),
depstr, atoms)
return False
blocker_atoms = [myatom for myatom in atoms \
if myatom.startswith("!")]
counter = long(vardb.aux_get(pkg, ["COUNTER"])[0])
blocker_cache[pkg] = \
blocker_cache.BlockerData(counter, blocker_atoms)
if blocker_atoms:
# Don't store this parent in pkg_node_map, because it's
# not needed there and it might overwrite a "merge"
# node with the same cpv.
myparent = ("installed", myroot, pkg, "nomerge")
for myatom in blocker_atoms:
blocker = ("blocks", myroot, myatom[1:])
myparents = \
self.blocker_parents.get(blocker, None)
if not myparents:
myparents = set()
self.blocker_parents[blocker] = myparents
myparents.add(myparent)
blocker_cache.flush()
del blocker_cache
for blocker in self.blocker_parents.keys():
mytype, myroot, mydep = blocker
initial_db = self.trees[myroot]["vartree"].dbapi
final_db = self.mydbapi[myroot]
blocked_initial = initial_db.match(mydep)
blocked_final = final_db.match(mydep)
if not blocked_initial and not blocked_final:
del self.blocker_parents[blocker]
continue
blocked_slots_initial = {}
blocked_slots_final = {}
for cpv in blocked_initial:
blocked_slots_initial[cpv] = \
"%s:%s" % (portage.dep_getkey(cpv),
initial_db.aux_get(cpv, ["SLOT"])[0])
for cpv in blocked_final:
blocked_slots_final[cpv] = \
"%s:%s" % (portage.dep_getkey(cpv),
final_db.aux_get(cpv, ["SLOT"])[0])
for parent in list(self.blocker_parents[blocker]):
ptype, proot, pcpv, pstatus = parent
pdbapi = self.trees[proot][self.pkg_tree_map[ptype]].dbapi
pslot = pdbapi.aux_get(pcpv, ["SLOT"])[0]
pslot_atom = "%s:%s" % (portage.dep_getkey(pcpv), pslot)
parent_static = pslot_atom not in modified_slots[proot]
unresolved_blocks = False
depends_on_order = set()
for cpv in blocked_initial:
slot_atom = blocked_slots_initial[cpv]
if slot_atom == pslot_atom:
# The parent blocks an initial package in the same
# slot as itself. The merge/nomerge status of neither
# node matters. In any case, this particular block is
# automatically resolved.
continue
if parent_static and \
slot_atom not in modified_slots[myroot]:
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
if pstatus == "merge" and \
slot_atom in modified_slots[myroot]:
replacement = final_db.match(slot_atom)
if replacement:
if not portage.match_from_list(mydep, replacement):
# Apparently a replacement may be able to
# invalidate this block.
replacement_node = \
self.pkg_node_map[proot][replacement[0]]
depends_on_order.add((replacement_node, parent))
continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
for cpv in blocked_final:
slot_atom = blocked_slots_final[cpv]
if slot_atom == pslot_atom:
# The parent blocks itself, so the merge order does not
# need to be enforced.
continue
if parent_static and \
slot_atom not in modified_slots[myroot]:
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
if not parent_static and pstatus == "nomerge" and \
slot_atom in modified_slots[myroot]:
replacement = final_db.match(pslot_atom)
if replacement:
replacement_node = \
self.pkg_node_map[proot][replacement[0]]
if replacement_node not in \
self.blocker_parents[blocker]:
# Apparently a replacement may be able to
# invalidate this block.
blocked_node = self.pkg_node_map[proot][cpv]
depends_on_order.add(
(replacement_node, blocked_node))
continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
if not unresolved_blocks and depends_on_order:
for node, pnode in depends_on_order:
# Enforce correct merge order with a hard dep.
self.digraph.addnode(node, pnode,
priority=DepPriority(buildtime=True))
# Count references to this blocker so that it can be
# invalidated after nodes referencing it have been
# merged.
self.blocker_digraph.addnode(node, blocker)
if not unresolved_blocks and not depends_on_order:
self.blocker_parents[blocker].remove(parent)
if unresolved_blocks:
self._unresolved_blocker_parents.setdefault(
blocker, set()).add(parent)
if not self.blocker_parents[blocker]:
del self.blocker_parents[blocker]
# Validate blockers that depend on merge order.
if not self.blocker_digraph.empty():
self.altlist()
if self._slot_collision_info:
# The user is only notified of a slot collision if there are no
# unresolvable blocks.
for x in self.altlist():
if x[0] == "blocks":
return True
self._show_slot_collision_notice(self._slot_collision_info[0])
if not self._accept_collisions():
return False
return True
def _accept_collisions(self):
acceptable = False
for x in ("--nodeps", "--pretend", "--fetchonly", "--fetch-all-uri"):
if x in self.myopts:
acceptable = True
break
return acceptable
def _merge_order_bias(self, mygraph):
"""Order nodes from highest to lowest overall reference count for
optimal leaf node selection."""
node_info = {}
for node in mygraph.order:
node_info[node] = len(mygraph.parent_nodes(node))
def cmp_merge_preference(node1, node2):
return node_info[node2] - node_info[node1]
mygraph.order.sort(cmp_merge_preference)
def altlist(self, reversed=False):
if reversed in self._altlist_cache:
return self._altlist_cache[reversed][:]
if reversed:
retlist = self.altlist()
retlist.reverse()
self._altlist_cache[reversed] = retlist[:]
return retlist
mygraph=self.digraph.copy()
self._merge_order_bias(mygraph)
myblockers = self.blocker_digraph.copy()
retlist=[]
circular_blocks = False
blocker_deps = None
asap_nodes = []
portage_node = None
if reversed:
get_nodes = mygraph.root_nodes
else:
get_nodes = mygraph.leaf_nodes
for cpv, node in self.pkg_node_map["/"].iteritems():
if "portage" == portage.catsplit(portage.dep_getkey(cpv))[-1]:
portage_node = node
asap_nodes.append(node)
break
ignore_priority_soft_range = [None]
ignore_priority_soft_range.extend(
xrange(DepPriority.MIN, DepPriority.SOFT + 1))
tree_mode = "--tree" in self.myopts
# Tracks whether or not the current iteration should prefer asap_nodes
# if available. This is set to False when the previous iteration
# failed to select any nodes. It is reset whenever nodes are
# successfully selected.
prefer_asap = True
while not mygraph.empty():
selected_nodes = None
if prefer_asap and asap_nodes:
"""ASAP nodes are merged before their soft deps."""
asap_nodes = [node for node in asap_nodes \
if mygraph.contains(node)]
for node in asap_nodes:
if not mygraph.child_nodes(node,
ignore_priority=DepPriority.SOFT):
selected_nodes = [node]
asap_nodes.remove(node)
break
if not selected_nodes and \
not (prefer_asap and asap_nodes):
for ignore_priority in ignore_priority_soft_range:
nodes = get_nodes(ignore_priority=ignore_priority)
if nodes:
break
if nodes:
if ignore_priority is None and not tree_mode:
# Greedily pop all of these nodes since no relationship
# has been ignored. This optimization destroys --tree
# output, so it's disabled in reversed mode.
selected_nodes = nodes
else:
# For optimal merge order:
# * Only pop one node.
# * Removing a root node (node without a parent)
# will not produce a leaf node, so avoid it.
for node in nodes:
if mygraph.parent_nodes(node):
# found a non-root node
selected_nodes = [node]
break
if not selected_nodes:
# settle for a root node
selected_nodes = [nodes[0]]
if not selected_nodes:
nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
if nodes:
"""Recursively gather a group of nodes that RDEPEND on
eachother. This ensures that they are merged as a group
and get their RDEPENDs satisfied as soon as possible."""
def gather_deps(ignore_priority,
mergeable_nodes, selected_nodes, node):
if node in selected_nodes:
return True
if node not in mergeable_nodes:
return False
if node == portage_node and mygraph.child_nodes(node,
ignore_priority=DepPriority.MEDIUM_SOFT):
# Make sure that portage always has all of it's
# RDEPENDs installed first.
return False
selected_nodes.add(node)
for child in mygraph.child_nodes(node,
ignore_priority=ignore_priority):
if not gather_deps(ignore_priority,
mergeable_nodes, selected_nodes, child):
return False
return True
mergeable_nodes = set(nodes)
if prefer_asap and asap_nodes:
nodes = asap_nodes
for ignore_priority in xrange(DepPriority.SOFT,
DepPriority.MEDIUM_SOFT + 1):
for node in nodes:
selected_nodes = set()
if gather_deps(ignore_priority,
mergeable_nodes, selected_nodes, node):
break
else:
selected_nodes = None
if selected_nodes:
break
if prefer_asap and asap_nodes and not selected_nodes:
# We failed to find any asap nodes to merge, so ignore
# them for the next iteration.
prefer_asap = False
continue
if selected_nodes and ignore_priority > DepPriority.SOFT:
# Try to merge ignored medium deps as soon as possible.
for node in selected_nodes:
children = set(mygraph.child_nodes(node))
soft = children.difference(
mygraph.child_nodes(node,
ignore_priority=DepPriority.SOFT))
medium_soft = children.difference(
mygraph.child_nodes(node,
ignore_priority=DepPriority.MEDIUM_SOFT))
medium_soft.difference_update(soft)
for child in medium_soft:
if child in selected_nodes:
continue
if child in asap_nodes:
continue
# TODO: Try harder to make these nodes get
# merged absolutely as soon as possible.
asap_nodes.append(child)
if not selected_nodes:
if not myblockers.is_empty():
"""A blocker couldn't be circumnavigated while keeping all
dependencies satisfied. The user will have to resolve this
manually. This is a panic condition and thus the order
doesn't really matter, so just pop a random node in order
to avoid a circular dependency panic if possible."""
if not circular_blocks:
circular_blocks = True
blocker_deps = myblockers.leaf_nodes()
while blocker_deps:
# Some of these nodes might have already been selected
# by the normal node selection process after the
# circular_blocks flag has been set. Therefore, we
# have to verify that they're still in the graph so
# that they're not selected more than once.
node = blocker_deps.pop()
if mygraph.contains(node):
selected_nodes = [node]
break
if not selected_nodes:
# No leaf nodes are available, so we have a circular
# dependency panic situation. Reduce the noise level to a
# minimum via repeated elimination of root nodes since they
# have no parents and thus can not be part of a cycle.
while True:
root_nodes = mygraph.root_nodes(
ignore_priority=DepPriority.SOFT)
if not root_nodes:
break
for node in root_nodes:
mygraph.remove(node)
# Display the USE flags that are enabled on nodes that are part
# of dependency cycles in case that helps the user decide to
# disable some of them.
display_order = []
tempgraph = mygraph.copy()
while not tempgraph.empty():
nodes = tempgraph.leaf_nodes()
if not nodes:
node = tempgraph.order[0]
else:
node = nodes[0]
display_order.append(list(node))
tempgraph.remove(node)
display_order.reverse()
self.myopts.pop("--quiet", None)
self.myopts.pop("--verbose", None)
self.myopts["--tree"] = True
self.display(display_order)
print "!!! Error: circular dependencies:"
print
mygraph.debug_print()
print
print "!!! Note that circular dependencies can often be avoided by temporarily"
print "!!! disabling USE flags that trigger optional dependencies."
sys.exit(1)
# At this point, we've succeeded in selecting one or more nodes, so
# it's now safe to reset the prefer_asap to it's default state.
prefer_asap = True
for node in selected_nodes:
retlist.append(list(node))
mygraph.remove(node)
if not reversed and not circular_blocks and myblockers.contains(node):
"""This node may have invalidated one or more blockers."""
myblockers.remove(node)
for blocker in myblockers.root_nodes():
if not myblockers.child_nodes(blocker):
myblockers.remove(blocker)
unresolved = \
self._unresolved_blocker_parents.get(blocker)
if unresolved:
self.blocker_parents[blocker] = unresolved
else:
del self.blocker_parents[blocker]
if not reversed:
"""Blocker validation does not work with reverse mode,
so self.altlist() should first be called with reverse disabled
so that blockers are properly validated."""
self.blocker_digraph = myblockers
""" Add any unresolved blocks so that they can be displayed."""
for blocker in self.blocker_parents:
retlist.append(list(blocker))
self._altlist_cache[reversed] = retlist[:]
return retlist
def xcreate(self,mode="system"):
vardb = self.trees[self.target_root]["vartree"].dbapi
portdb = self.trees[self.target_root]["porttree"].dbapi
bindb = self.trees[self.target_root]["bintree"].dbapi
def visible(mylist):
matches = portdb.gvisible(portdb.visible(mylist))
return [x for x in mylist \
if x in matches or not portdb.cpv_exists(x)]
world_problems = False
if mode=="system":
mylist = getlist(self.settings, "system")
else:
#world mode
worldlist = getlist(self.settings, "world")
mylist = getlist(self.settings, "system")
worlddict=genericdict(worldlist)
for x in worlddict.keys():
if not portage.isvalidatom(x):
world_problems = True
continue
elif not vardb.match(x):
world_problems = True
available = False
if "--usepkgonly" not in self.myopts and \
portdb.match(x):
available = True
elif "--usepkg" in self.myopts:
mymatches = bindb.match(x)
if "--usepkgonly" not in self.myopts:
mymatches = visible(mymatches)
if mymatches:
available = True
if not available:
continue
mylist.append(x)
newlist = []
for atom in mylist:
mykey = portage.dep_getkey(atom)
if True:
newlist.append(atom)
"""Make sure all installed slots are updated when possible.
Do this with --emptytree also, to ensure that all slots are
remerged."""
myslots = set()
for cpv in vardb.match(mykey):
myslots.add(vardb.aux_get(cpv, ["SLOT"])[0])
if myslots:
best_pkgs = []
if "--usepkg" in self.myopts:
mymatches = bindb.match(atom)
if "--usepkgonly" not in self.myopts:
mymatches = visible(mymatches)
best_pkg = portage.best(mymatches)
if best_pkg:
best_slot = bindb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("binary", best_pkg, best_slot))
if "--usepkgonly" not in self.myopts:
best_pkg = portage.best(portdb.match(atom))
if best_pkg:
best_slot = portdb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("ebuild", best_pkg, best_slot))
if best_pkgs:
best_pkg = portage.best([x[1] for x in best_pkgs])
best_pkgs = [x for x in best_pkgs if x[1] == best_pkg]
best_slot = best_pkgs[0][2]
myslots.add(best_slot)
if len(myslots) > 1:
for myslot in myslots:
myslot_atom = "%s:%s" % (mykey, myslot)
available = False
if "--usepkgonly" not in self.myopts and \
self.trees[self.target_root][
"porttree"].dbapi.match(myslot_atom):
available = True
elif "--usepkg" in self.myopts:
mymatches = bindb.match(myslot_atom)
if "--usepkgonly" not in self.myopts:
mymatches = visible(mymatches)
if mymatches:
available = True
if available:
newlist.append(myslot_atom)
mylist = newlist
missing_atoms = []
for mydep in mylist:
try:
if not self.select_dep(
self.target_root, mydep, raise_on_missing=True, arg=mydep):
print >> sys.stderr, "\n\n!!! Problem resolving dependencies for", mydep
return 0
except ValueError:
missing_atoms.append(mydep)
if not self.validate_blockers():
return False
if world_problems:
print >> sys.stderr, "\n!!! Problems have been detected with your world file"
print >> sys.stderr, "!!! Please run "+green("emaint --check world")+"\n"
if missing_atoms:
print >> sys.stderr, "\n" + colorize("BAD", "!!!") + \
" Ebuilds for the following packages are either all"
print >> sys.stderr, colorize("BAD", "!!!") + " masked or don't exist:"
print >> sys.stderr, " ".join(missing_atoms) + "\n"
return 1
def display(self,mylist,verbosity=None):
if verbosity is None:
verbosity = ("--quiet" in self.myopts and 1 or \
"--verbose" in self.myopts and 3 or 2)
changelogs=[]
p=[]
blockers = []
counters = PackageCounters()
if verbosity == 1 and "--verbose" not in self.myopts:
def create_use_string(*args):
return ""
else:
def create_use_string(name, cur_iuse, iuse_forced, cur_use,
old_iuse, old_use,
is_new, all_flags=(verbosity == 3 or "--quiet" in self.myopts),
alphabetical=("--alphabetical" in self.myopts)):
enabled = []
if alphabetical:
disabled = enabled
removed = enabled
else:
disabled = []
removed = []
cur_iuse = set(cur_iuse)
enabled_flags = cur_iuse.intersection(cur_use)
removed_iuse = set(old_iuse).difference(cur_iuse)
any_iuse = cur_iuse.union(old_iuse)
any_iuse = list(any_iuse)
any_iuse.sort()
for flag in any_iuse:
flag_str = None
isEnabled = False
if flag in enabled_flags:
isEnabled = True
if is_new or flag in old_use and all_flags:
flag_str = red(flag)
elif flag not in old_iuse:
flag_str = yellow(flag) + "%*"
elif flag not in old_use:
flag_str = green(flag) + "*"
elif flag in removed_iuse:
if all_flags:
flag_str = yellow("-" + flag) + "%"
if flag in old_use:
flag_str += "*"
flag_str = "(" + flag_str + ")"
removed.append(flag_str)
continue
else:
if is_new or flag in old_iuse and flag not in old_use and all_flags:
flag_str = blue("-" + flag)
elif flag not in old_iuse:
flag_str = yellow("-" + flag)
if flag not in iuse_forced:
flag_str += "%"
elif flag in old_use:
flag_str = green("-" + flag) + "*"
if flag_str:
if flag in iuse_forced:
flag_str = "(" + flag_str + ")"
if isEnabled:
enabled.append(flag_str)
else:
disabled.append(flag_str)
if alphabetical:
ret = " ".join(enabled)
else:
ret = " ".join(enabled + disabled + removed)
if ret:
ret = '%s="%s" ' % (name, ret)
return ret
if verbosity == 3:
# FIXME: account for the possibility of different overlays in
# /etc/make.conf vs. ${PORTAGE_CONFIGROOT}/etc/make.conf
overlays = self.settings["PORTDIR_OVERLAY"].split()
overlays_real = [os.path.realpath(t) \
for t in self.settings["PORTDIR_OVERLAY"].split()]
tree_nodes = []
display_list = []
mygraph = self._parent_child_digraph
i = 0
depth = 0
shown_edges = set()
for x in mylist:
if "blocks" == x[0]:
display_list.append((x, 0, True))
continue
if "nomerge" == x[-1]:
continue
graph_key = tuple(x)
if "--tree" in self.myopts:
depth = len(tree_nodes)
while depth and graph_key not in \
mygraph.child_nodes(tree_nodes[depth-1]):
depth -= 1
if depth:
tree_nodes = tree_nodes[:depth]
tree_nodes.append(graph_key)
display_list.append((x, depth, True))
shown_edges.add((graph_key, tree_nodes[depth-1]))
else:
traversed_nodes = set() # prevent endless circles
traversed_nodes.add(graph_key)
def add_parents(current_node, ordered):
parent_nodes = mygraph.parent_nodes(current_node)
if parent_nodes:
child_nodes = set(mygraph.child_nodes(current_node))
selected_parent = None
# First, try to avoid a direct cycle.
for node in parent_nodes:
if node not in traversed_nodes and \
node not in child_nodes:
edge = (current_node, node)
if edge in shown_edges:
continue
selected_parent = node
break
if not selected_parent:
# A direct cycle is unavoidable.
for node in parent_nodes:
if node not in traversed_nodes:
edge = (current_node, node)
if edge in shown_edges:
continue
selected_parent = node
break
if selected_parent:
shown_edges.add((current_node, selected_parent))
traversed_nodes.add(selected_parent)
add_parents(selected_parent, False)
display_list.append((list(current_node),
len(tree_nodes), ordered))
tree_nodes.append(current_node)
tree_nodes = []
add_parents(graph_key, True)
else:
display_list.append((x, depth, True))
mylist = display_list
last_merge_depth = 0
for i in xrange(len(mylist)-1,-1,-1):
graph_key, depth, ordered = mylist[i]
if not ordered and depth == 0 and i > 0 \
and graph_key == mylist[i-1][0] and \
mylist[i-1][1] == 0:
# An ordered node got a consecutive duplicate when the tree was
# being filled in.
del mylist[i]
continue
if "blocks" == graph_key[0]:
continue
if ordered and graph_key[-1] != "nomerge":
last_merge_depth = depth
continue
if depth >= last_merge_depth or \
i < len(mylist) - 1 and \
depth >= mylist[i+1][1]:
del mylist[i]
from portage import flatten
from portage_dep import use_reduce, paren_reduce
display_overlays=False
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
myfetchlist=[]
for mylist_index in xrange(len(mylist)):
x, depth, ordered = mylist[mylist_index]
pkg_type = x[0]
myroot = x[1]