blob: bef6f40610caf5005447139f69aca4f0e6bf8305 [file] [log] [blame]
</
#!/usr/bin/python -O
# Copyright 1999-2006 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id$
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
import signal
def exithandler(signum,frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(1)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
sys.exit(1)
import os, stat
os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
try:
import portage
except ImportError:
from os import path as osp
sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
import portage
del os.environ["PORTAGE_LEGACY_GLOBALS"]
from portage import digraph
import emergehelp, xpak, commands, errno, re, socket, time, types
import output
from output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \
xtermTitleReset, yellow
from output import create_color_func
good = create_color_func("GOOD")
bad = create_color_func("BAD")
# white looks bad on terminals with white background
from output import bold as white
import portage_const
import portage_dep
portage_dep._dep_check_strict = True
import portage_util
import portage_locks
import portage_exception
from portage_data import secpass
from portage_util import normalize_path as normpath
from portage_util import writemsg
if not hasattr(__builtins__, "set"):
from sets import Set as set
from itertools import chain, izip
from UserDict import DictMixin
try:
import cPickle
except ImportError:
import pickle as cPickle
class stdout_spinner(object):
scroll_msgs = [
"Gentoo Rocks ("+os.uname()[0]+")",
"Thank you for using Gentoo. :)",
"Are you actually trying to read this?",
"How many times have you stared at this?",
"We are generating the cache right now",
"You are paying too much attention.",
"A theory is better than its explanation.",
"Phasers locked on target, Captain.",
"Thrashing is just virtual crashing.",
"To be is to program.",
"Real Users hate Real Programmers.",
"When all else fails, read the instructions.",
"Functionality breeds Contempt.",
"The future lies ahead.",
"3.1415926535897932384626433832795028841971694",
"Sometimes insanity is the only alternative.",
"Inaccuracy saves a world of explanation.",
]
twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
def __init__(self):
self.spinpos = 0
self.update = self.update_twirl
self.scroll_sequence = self.scroll_msgs[
int(time.time() * 100) % len(self.scroll_msgs)]
self.last_update = 0
self.min_display_latency = 0.05
def _return_early(self):
"""
Flushing ouput to the tty too frequently wastes cpu time. Therefore,
each update* method should return without doing any output when this
method returns True.
"""
cur_time = time.time()
if cur_time - self.last_update < self.min_display_latency:
return True
self.last_update = cur_time
return False
def update_basic(self):
self.spinpos = (self.spinpos + 1) % 500
if self._return_early():
return
if (self.spinpos % 100) == 0:
if self.spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
def update_scroll(self):
if self._return_early():
return
if(self.spinpos >= len(self.scroll_sequence)):
sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
else:
sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
sys.stdout.flush()
self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
def update_twirl(self):
self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
if self._return_early():
return
sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
sys.stdout.flush()
def update_quiet(self):
return
def userquery(prompt, responses=None, colours=None):
"""Displays a prompt and a set of responses, then waits for a response
which is checked against the responses and the first to match is
returned. An empty response will match the first value in responses. The
input buffer is *not* cleared prior to the prompt!
prompt: a String.
responses: a List of Strings.
colours: a List of Functions taking and returning a String, used to
process the responses for display. Typically these will be functions
like red() but could be e.g. lambda x: "DisplayString".
If responses is omitted, defaults to ["Yes", "No"], [green, red].
If only colours is omitted, defaults to [bold, ...].
Returns a member of the List responses. (If called without optional
arguments, returns "Yes" or "No".)
KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
printed."""
if responses is None:
responses = ["Yes", "No"]
colours = [
create_color_func("PROMPT_CHOICE_DEFAULT"),
create_color_func("PROMPT_CHOICE_OTHER")
]
elif colours is None:
colours=[bold]
colours=(colours*len(responses))[:len(responses)]
print bold(prompt),
try:
while True:
response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
for key in responses:
# An empty response will match the first value in responses.
if response.upper()==key[:len(response)].upper():
return key
print "Sorry, response '%s' not understood." % response,
except (EOFError, KeyboardInterrupt):
print "Interrupted."
sys.exit(1)
actions=[
"clean", "config", "depclean",
"info", "metadata",
"prune", "regen", "search",
"sync", "system", "unmerge", "world",
]
options=[
"--ask", "--alphabetical",
"--buildpkg", "--buildpkgonly",
"--changelog", "--columns",
"--debug", "--deep",
"--digest",
"--emptytree",
"--fetchonly", "--fetch-all-uri",
"--getbinpkg", "--getbinpkgonly",
"--help", "--ignore-default-opts",
"--noconfmem",
"--newuse", "--nocolor",
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
"--onlydeps", "--pretend",
"--quiet", "--resume",
"--searchdesc", "--selective",
"--skipfirst",
"--tree",
"--update",
"--usepkg", "--usepkgonly",
"--verbose", "--version"
]
shortmapping={
"1":"--oneshot",
"a":"--ask",
"b":"--buildpkg", "B":"--buildpkgonly",
"c":"--clean", "C":"--unmerge",
"d":"--debug", "D":"--deep",
"e":"--emptytree",
"f":"--fetchonly", "F":"--fetch-all-uri",
"g":"--getbinpkg", "G":"--getbinpkgonly",
"h":"--help",
"k":"--usepkg", "K":"--usepkgonly",
"l":"--changelog",
"n":"--noreplace", "N":"--newuse",
"o":"--onlydeps", "O":"--nodeps",
"p":"--pretend", "P":"--prune",
"q":"--quiet",
"s":"--search", "S":"--searchdesc",
"t":"--tree",
"u":"--update",
"v":"--verbose", "V":"--version"
}
def emergelog(xterm_titles, mystr, short_msg=None):
if xterm_titles:
if short_msg == None:
short_msg = mystr
if "HOSTNAME" in os.environ:
short_msg = os.environ["HOSTNAME"]+": "+short_msg
xtermTitle(short_msg)
try:
file_path = "/var/log/emerge.log"
mylogfile = open(file_path, "a")
portage_util.apply_secpass_permissions(file_path,
uid=portage.portage_uid, gid=portage.portage_gid,
mode=0660)
mylock = None
try:
mylock = portage_locks.lockfile(mylogfile)
# seek because we may have gotten held up by the lock.
# if so, we may not be positioned at the end of the file.
mylogfile.seek(0, 2)
mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
mylogfile.flush()
finally:
if mylock:
portage_locks.unlockfile(mylock)
mylogfile.close()
except (IOError,OSError,portage_exception.PortageException), e:
if secpass >= 1:
print >> sys.stderr, "emergelog():",e
def countdown(secs=5, doing="Starting"):
if secs:
print ">>> Waiting",secs,"seconds before starting..."
print ">>> (Control-C to abort)...\n"+doing+" in: ",
ticks=range(secs)
ticks.reverse()
for sec in ticks:
sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
sys.stdout.flush()
time.sleep(1)
print
# formats a size given in bytes nicely
def format_size(mysize):
if type(mysize) not in [types.IntType,types.LongType]:
return str(mysize)
if 0 != mysize % 1024:
# Always round up to the next kB so that it doesn't show 0 kB when
# some small file still needs to be fetched.
mysize += 1024 - mysize % 1024
mystr=str(mysize/1024)
mycount=len(mystr)
while (mycount > 3):
mycount-=3
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
def getgccversion(chost):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = 'gcc -dumpversion'
gcc_ver_prefix = 'gcc-'
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n" +
"!!! to update the environment of this terminal and possibly\n" +
"!!! other terminals also.\n"
)
mystatus, myoutput = commands.getstatusoutput("eselect compiler show")
if mystatus == os.EX_OK and len(myoutput.split("/")) == 2:
part1, part2 = myoutput.split("/")
if part1.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
mystatus, myoutput = commands.getstatusoutput(
chost + "-" + gcc_ver_command)
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
def getportageversion(portdir, target_root, profile, chost, vardb):
profilever = "unavailable"
if profile:
realpath = os.path.realpath(profile)
basepath = os.path.realpath(os.path.join(portdir, "profiles"))
if realpath.startswith(basepath):
profilever = realpath[1 + len(basepath):]
else:
try:
profilever = "!" + os.readlink(profile)
except (OSError):
pass
del realpath, basepath
libcver=[]
libclist = vardb.match("virtual/libc")
libclist += vardb.match("virtual/glibc")
libclist = portage_util.unique_array(libclist)
for x in libclist:
xs=portage.catpkgsplit(x)
if libcver:
libcver+=","+"-".join(xs[1:])
else:
libcver="-".join(xs[1:])
if libcver==[]:
libcver="unavailable"
gccver = getgccversion(chost)
unameout=os.uname()[2]+" "+os.uname()[4]
return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
def create_depgraph_params(myopts, myaction):
#configure emerge engine parameters
#
# self: include _this_ package regardless of if it is merged.
# selective: exclude the package if it is merged
# recurse: go into the dependencies
# deep: go into the dependencies of already merged packages
# empty: pretend nothing is merged
myparams = set(["recurse"])
if "--update" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
myaction in ("system", "world"):
myparams.add("selective")
if "--emptytree" in myopts:
myparams.add("empty")
myparams.discard("selective")
if "--nodeps" in myopts:
myparams.discard("recurse")
if "--deep" in myopts:
myparams.add("deep")
return myparams
# search functionality
class search:
#
# class constants
#
VERSION_SHORT=1
VERSION_RELEASE=2
#
# public interface
#
def __init__(self, settings, trees, spinner, searchdesc,
verbose, usepkg, usepkgonly):
"""Searches the available and installed packages for the supplied search key.
The list of available and installed packages is created at object instantiation.
This makes successive searches faster."""
self.settings = settings
self.vartree = trees["vartree"]
self.spinner = spinner
self.verbose = verbose
self.searchdesc = searchdesc
def fake_portdb():
pass
self.portdb = fake_portdb
for attrib in ("aux_get", "cp_all",
"xmatch", "findname", "getfetchlist"):
setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
self._dbs = []
portdb = trees["porttree"].dbapi
bindb = trees["bintree"].dbapi
vardb = trees["vartree"].dbapi
if not usepkgonly and portdb._have_root_eclass_dir:
self._dbs.append(portdb)
if (usepkg or usepkgonly) and bindb.cp_all():
self._dbs.append(bindb)
self._dbs.append(vardb)
self._portdb = portdb
def _cp_all(self):
cp_all = set()
for db in self._dbs:
cp_all.update(db.cp_all())
return list(sorted(cp_all))
def _aux_get(self, *args, **kwargs):
for db in self._dbs:
try:
return db.aux_get(*args, **kwargs)
except KeyError:
pass
raise
def _findname(self, *args, **kwargs):
for db in self._dbs:
if db is not self._portdb:
# We don't want findname to return anything
# unless it's an ebuild in a portage tree.
# Otherwise, it's already built and we don't
# care about it.
continue
func = getattr(db, "findname", None)
if func:
value = func(*args, **kwargs)
if value:
return value
return None
def _getfetchlist(self, *args, **kwargs):
for db in self._dbs:
func = getattr(db, "getfetchlist", None)
if func:
value = func(*args, **kwargs)
if value:
return value
return [], []
def _visible(self, db, cpv, metadata):
installed = db is self.vartree.dbapi
built = installed or db is not self._portdb
return visible(self.settings, cpv, metadata,
built=built, installed=installed)
def _xmatch(self, level, atom):
"""
This method does not expand old-style virtuals because it
is restricted to returning matches for a single ${CATEGORY}/${PN}
and old-style virual matches unreliable for that when querying
multiple package databases. If necessary, old-style virtuals
can be performed on atoms prior to calling this method.
"""
cp = portage.dep_getkey(atom)
if level == "match-all":
matches = set()
for db in self._dbs:
if hasattr(db, "xmatch"):
matches.update(db.xmatch(level, atom))
else:
matches.update(db.match(atom))
result = list(x for x in matches if portage.cpv_getkey(x) == cp)
db._cpv_sort_ascending(result)
elif level == "match-visible":
matches = set()
for db in self._dbs:
if hasattr(db, "xmatch"):
matches.update(db.xmatch(level, atom))
else:
db_keys = list(db._aux_cache_keys)
for cpv in db.match(atom):
metadata = dict(izip(db_keys,
db.aux_get(cpv, db_keys)))
if not self._visible(db, cpv, metadata):
continue
matches.add(cpv)
result = list(x for x in matches if portage.cpv_getkey(x) == cp)
db._cpv_sort_ascending(result)
elif level == "bestmatch-visible":
result = None
for db in self._dbs:
if hasattr(db, "xmatch"):
cpv = db.xmatch("bestmatch-visible", atom)
if not cpv or portage.cpv_getkey(cpv) != cp:
continue
if not result or cpv == portage.best([cpv, result]):
result = cpv
else:
db_keys = list(db._aux_cache_keys)
# break out of this loop with highest visible
# match, checked in descending order
for cpv in reversed(db.match(atom)):
if portage.cpv_getkey(cpv) != cp:
continue
metadata = dict(izip(db_keys,
db.aux_get(cpv, db_keys)))
if not self._visible(db, cpv, metadata):
continue
if not result or cpv == portage.best([cpv, result]):
result = cpv
break
else:
raise NotImplementedError(level)
return result
def execute(self,searchkey):
"""Performs the search for the supplied search key"""
match_category = 0
self.searchkey=searchkey
self.packagematches = []
if self.searchdesc:
self.searchdesc=1
self.matches = {"pkg":[], "desc":[]}
else:
self.searchdesc=0
self.matches = {"pkg":[]}
print "Searching... ",
regexsearch = False
if self.searchkey.startswith('%'):
regexsearch = True
self.searchkey = self.searchkey[1:]
if self.searchkey.startswith('@'):
match_category = 1
self.searchkey = self.searchkey[1:]
if regexsearch:
self.searchre=re.compile(self.searchkey,re.I)
else:
self.searchre=re.compile(re.escape(self.searchkey), re.I)
for package in self.portdb.cp_all():
self.spinner.update()
if match_category:
match_string = package[:]
else:
match_string = package.split("/")[-1]
masked=0
if self.searchre.search(match_string):
if not self.portdb.xmatch("match-visible", package):
masked=1
self.matches["pkg"].append([package,masked])
elif self.searchdesc: # DESCRIPTION searching
full_package = self.portdb.xmatch("bestmatch-visible", package)
if not full_package:
#no match found; we don't want to query description
full_package = portage.best(
self.portdb.xmatch("match-all", package))
if not full_package:
continue
else:
masked=1
try:
full_desc = self.portdb.aux_get(
full_package, ["DESCRIPTION"])[0]
except KeyError:
print "emerge: search: aux_get() failed, skipping"
continue
if self.searchre.search(full_desc):
self.matches["desc"].append([full_package,masked])
self.mlen=0
for mtype in self.matches:
self.matches[mtype].sort()
self.mlen += len(self.matches[mtype])
def output(self):
"""Outputs the results of the search."""
print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
print "[ Applications found : "+white(str(self.mlen))+" ]"
print " "
vardb = self.vartree.dbapi
for mtype in self.matches:
for match,masked in self.matches[mtype]:
if mtype=="pkg":
catpack=match
full_package = self.portdb.xmatch(
"bestmatch-visible", match)
if not full_package:
#no match found; we don't want to query description
masked=1
full_package = portage.best(
self.portdb.xmatch("match-all",match))
else:
full_package = match
match = portage.pkgsplit(match)[0]
if full_package:
try:
desc, homepage, license = self.portdb.aux_get(
full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
except KeyError:
print "emerge: search: aux_get() failed, skipping"
continue
if masked:
print green("*")+" "+white(match)+" "+red("[ Masked ]")
else:
print green("*")+" "+white(match)
myversion = self.getVersion(full_package, search.VERSION_RELEASE)
mysum = [0,0]
file_size_str = None
mycat = match.split("/")[0]
mypkg = match.split("/")[1]
mycpv = match + "-" + myversion
myebuild = self.portdb.findname(mycpv)
if myebuild:
pkgdir = os.path.dirname(myebuild)
import portage_manifest as manifest
mf = manifest.Manifest(
pkgdir, self.settings["DISTDIR"])
fetchlist = self.portdb.getfetchlist(mycpv,
mysettings=self.settings, all=True)[1]
try:
mysum[0] = mf.getDistfilesSize(fetchlist)
except KeyError, e:
file_size_str = "Unknown (missing digest for %s)" % \
str(e)
available = False
for db in self._dbs:
if db is not vardb and \
db.cpv_exists(mycpv):
available = True
if not myebuild and hasattr(db, "bintree"):
myebuild = db.bintree.getname(mycpv)
try:
mysum[0] = os.stat(myebuild).st_size
except OSError:
myebuild = None
break
if myebuild and file_size_str is None:
mystr = str(mysum[0] / 1024)
mycount = len(mystr)
while (mycount > 3):
mycount -= 3
mystr = mystr[:mycount] + "," + mystr[mycount:]
file_size_str = mystr + " kB"
if self.verbose:
if available:
print " ", darkgreen("Latest version available:"),myversion
print " ", self.getInstallationStatus(mycat+'/'+mypkg)
if myebuild:
print " %s %s" % \
(darkgreen("Size of files:"), file_size_str)
print " ", darkgreen("Homepage:")+" ",homepage
print " ", darkgreen("Description:")+" ",desc
print " ", darkgreen("License:")+" ",license
print
print
#
# private interface
#
def getInstallationStatus(self,package):
installed_package = self.vartree.dep_bestmatch(package)
result = ""
version = self.getVersion(installed_package,search.VERSION_RELEASE)
if len(version) > 0:
result = darkgreen("Latest version installed:")+" "+version
else:
result = darkgreen("Latest version installed:")+" [ Not Installed ]"
return result
def getVersion(self,full_package,detail):
if len(full_package) > 1:
package_parts = portage.catpkgsplit(full_package)
if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
result = package_parts[2]+ "-" + package_parts[3]
else:
result = package_parts[2]
else:
result = ""
return result
#build our package digraph
def getlist(settings, mode):
if mode=="system":
mylines = settings.packages
elif mode=="world":
try:
file_path = os.path.join(settings["ROOT"], portage.WORLD_FILE)
myfile = open(file_path, "r")
mylines = myfile.readlines()
myfile.close()
except (OSError, IOError), e:
if e.errno == errno.ENOENT:
portage.writemsg("\n!!! World file does not exist: '%s'\n" % file_path)
mylines=[]
else:
raise
mynewlines=[]
for x in mylines:
myline=" ".join(x.split())
if not len(myline):
continue
elif myline[0]=="#":
continue
elif mode=="system":
if myline[0]!="*":
continue
myline=myline[1:]
mynewlines.append(myline.strip())
return mynewlines
def clean_world(vardb, cpv):
"""Remove a package from the world file when unmerged."""
world_set = WorldSet(vardb.settings)
world_set.lock()
world_set.load()
worldlist = list(world_set)
mykey = portage.cpv_getkey(cpv)
newworldlist = []
for x in worldlist:
if portage.dep_getkey(x) == mykey:
matches = vardb.match(x, use_cache=0)
if not matches:
#zap our world entry
pass
elif len(matches) == 1 and matches[0] == cpv:
#zap our world entry
pass
else:
#others are around; keep it.
newworldlist.append(x)
else:
#this doesn't match the package we're unmerging; keep it.
newworldlist.append(x)
world_set.clear()
world_set.update(newworldlist)
world_set.save()
world_set.unlock()
class AtomSet(object):
def __init__(self, atoms=None):
self._atoms = {}
if atoms:
self.update(atoms)
def clear(self):
self._atoms.clear()
def add(self, atom):
cp = portage.dep_getkey(atom)
cp_list = self._atoms.get(cp)
if cp_list is None:
cp_list = []
self._atoms[cp] = cp_list
if atom not in cp_list:
cp_list.append(atom)
def update(self, atoms):
for atom in atoms:
self.add(atom)
def __contains__(self, atom):
cp = portage.dep_getkey(atom)
if cp in self._atoms and atom in self._atoms[cp]:
return True
return False
def findAtomForPackage(self, cpv, metadata):
"""Return the best match for a given package from the arguments, or
None if there are no matches. This matches virtual arguments against
the PROVIDE metadata. This can raise an InvalidDependString exception
if an error occurs while parsing PROVIDE."""
cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
cp = portage.dep_getkey(cpv)
atoms = self._atoms.get(cp)
if atoms:
best_match = portage.best_match_to_list(cpv_slot, atoms)
if best_match:
return best_match
if not metadata["PROVIDE"]:
return None
provides = portage.flatten(portage_dep.use_reduce(
portage_dep.paren_reduce(metadata["PROVIDE"]),
uselist=metadata["USE"].split()))
for provide in provides:
provided_cp = portage.dep_getkey(provide)
atoms = self._atoms.get(provided_cp)
if atoms:
transformed_atoms = [atom.replace(provided_cp, cp) for atom in atoms]
best_match = portage.best_match_to_list(cpv_slot, transformed_atoms)
if best_match:
return atoms[transformed_atoms.index(best_match)]
return None
def __iter__(self):
for atoms in self._atoms.itervalues():
for atom in atoms:
yield atom
class SystemSet(AtomSet):
def __init__(self, settings, **kwargs):
AtomSet.__init__(self, **kwargs)
self.update(getlist(settings, "system"))
class WorldSet(AtomSet):
def __init__(self, settings, **kwargs):
AtomSet.__init__(self, **kwargs)
self.world_file = os.path.join(settings["ROOT"], portage.WORLD_FILE)
self._lock = None
def _ensure_dirs(self):
portage_util.ensure_dirs(os.path.dirname(self.world_file),
gid=portage.portage_gid, mode=02750, mask=02)
def load(self):
self.clear()
self.update(portage_util.grabfile_package(self.world_file))
def save(self):
self._ensure_dirs()
portage.write_atomic(self.world_file,
"\n".join(sorted(self)) + "\n")
def lock(self):
self._ensure_dirs()
self._lock = portage_locks.lockfile(self.world_file, wantnewlockfile=1)
def unlock(self):
portage_locks.unlockfile(self._lock)
self._lock = None
class RootConfig(object):
"""This is used internally by depgraph to track information about a
particular $ROOT."""
def __init__(self, trees):
self.trees = trees
self.settings = trees["vartree"].settings
self.root = self.settings["ROOT"]
self.sets = {}
world_set = WorldSet(self.settings)
world_set.load()
self.sets["world"] = world_set
system_set = SystemSet(self.settings)
self.sets["system"] = system_set
def create_world_atom(pkg_key, metadata, args_set, root_config):
"""Create a new atom for the world file if one does not exist. If the
argument atom is precise enough to identify a specific slot then a slot
atom will be returned. Atoms that are in the system set may also be stored
in world since system atoms can only match one slot while world atoms can
be greedy with respect to slots. Unslotted system packages will not be
stored in world."""
arg_atom = args_set.findAtomForPackage(pkg_key, metadata)
cp = portage.dep_getkey(arg_atom)
new_world_atom = cp
sets = root_config.sets
portdb = root_config.trees["porttree"].dbapi
vardb = root_config.trees["vartree"].dbapi
available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
for cpv in portdb.match(cp))
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if not slotted:
# check the vdb in case this is multislot
available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
for cpv in vardb.match(cp))
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if slotted and arg_atom != cp:
# If the user gave a specific atom, store it as a
# slot atom in the world file.
slot_atom = "%s:%s" % (cp, metadata["SLOT"])
# For USE=multislot, there are a couple of cases to
# handle here:
#
# 1) SLOT="0", but the real SLOT spontaneously changed to some
# unknown value, so just record an unslotted atom.
#
# 2) SLOT comes from an installed package and there is no
# matching SLOT in the portage tree.
#
# Make sure that the slot atom is available in either the
# portdb or the vardb, since otherwise the user certainly
# doesn't want the SLOT atom recorded in the world file
# (case 1 above). If it's only available in the vardb,
# the user may be trying to prevent a USE=multislot
# package from being removed by --depclean (case 2 above).
mydb = portdb
if not portdb.match(slot_atom):
# SLOT seems to come from an installed multislot package
mydb = vardb
# If there is no installed package matching the SLOT atom,
# it probably changed SLOT spontaneously due to USE=multislot,
# so just record an unslotted atom.
if vardb.match(slot_atom):
# Now verify that the argument is precise
# enough to identify a specific slot.
matches = mydb.match(arg_atom)
matched_slots = set()
for cpv in matches:
matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
if len(matched_slots) == 1:
new_world_atom = slot_atom
if new_world_atom == sets["world"].findAtomForPackage(pkg_key, metadata):
# Both atoms would be identical, so there's nothing to add.
return None
if not slotted:
# Unlike world atoms, system atoms are not greedy for slots, so they
# can't be safely excluded from world if they are slotted.
system_atom = sets["system"].findAtomForPackage(pkg_key, metadata)
if system_atom:
if not portage.dep_getkey(system_atom).startswith("virtual/"):
return None
# System virtuals aren't safe to exclude from world since they can
# match multiple old-style virtuals but only one of them will be
# pulled in by update or depclean.
providers = portdb.mysettings.getvirtuals().get(
portage.dep_getkey(system_atom))
if providers and len(providers) == 1 and providers[0] == cp:
return None
return new_world_atom
def filter_iuse_defaults(iuse):
for flag in iuse:
if flag.startswith("+") or flag.startswith("-"):
yield flag[1:]
else:
yield flag
class DepPriority(object):
"""
This class generates an integer priority level based of various
attributes of the dependency relationship. Attributes can be assigned
at any time and the new integer value will be generated on calls to the
__int__() method. Rich comparison operators are supported.
The boolean attributes that affect the integer value are "satisfied",
"buildtime", "runtime", and "system". Various combinations of
attributes lead to the following priority levels:
Combination of properties Priority level
not satisfied and buildtime 0
not satisfied and runtime -1
satisfied and buildtime -2
satisfied and runtime -3
(none of the above) -4
Several integer constants are defined for categorization of priority
levels:
MEDIUM The upper boundary for medium dependencies.
MEDIUM_SOFT The upper boundary for medium-soft dependencies.
SOFT The upper boundary for soft dependencies.
MIN The lower boundary for soft dependencies.
"""
__slots__ = ("__weakref__", "satisfied", "buildtime", "runtime", "runtime_post", "rebuild")
MEDIUM = -1
MEDIUM_SOFT = -2
SOFT = -3
MIN = -6
def __init__(self, **kwargs):
for myattr in self.__slots__:
if myattr == "__weakref__":
continue
myvalue = kwargs.get(myattr, False)
setattr(self, myattr, myvalue)
def __int__(self):
if not self.satisfied:
if self.buildtime:
return 0
if self.runtime:
return -1
if self.runtime_post:
return -2
if self.buildtime:
if self.rebuild:
return -3
return -4
if self.runtime:
return -5
if self.runtime_post:
return -6
return -6
def __lt__(self, other):
return self.__int__() < other
def __le__(self, other):
return self.__int__() <= other
def __eq__(self, other):
return self.__int__() == other
def __ne__(self, other):
return self.__int__() != other
def __gt__(self, other):
return self.__int__() > other
def __ge__(self, other):
return self.__int__() >= other
def copy(self):
import copy
return copy.copy(self)
def __str__(self):
myvalue = self.__int__()
if myvalue > self.MEDIUM:
return "hard"
if myvalue > self.MEDIUM_SOFT:
return "medium"
if myvalue > self.SOFT:
return "medium-soft"
return "soft"
class FakeVartree(portage.vartree):
"""This is implements an in-memory copy of a vartree instance that provides
all the interfaces required for use by the depgraph. The vardb is locked
during the constructor call just long enough to read a copy of the
installed package information. This allows the depgraph to do it's
dependency calculations without holding a lock on the vardb. It also
allows things like vardb global updates to be done in memory so that the
user doesn't necessarily need write access to the vardb in cases where
global updates are necessary (updates are performed when necessary if there
is not a matching ebuild in the tree)."""
def __init__(self, real_vartree, portdb, db_keys):
self.root = real_vartree.root
self.settings = real_vartree.settings
mykeys = db_keys[:]
for required_key in ("COUNTER", "SLOT"):
if required_key not in mykeys:
mykeys.append(required_key)
self.dbapi = portage.fakedbapi(settings=real_vartree.settings)
vdb_path = os.path.join(self.root, portage.VDB_PATH)
try:
# At least the parent needs to exist for the lock file.
portage_util.ensure_dirs(vdb_path)
except portage_exception.PortageException:
pass
vdb_lock = None
try:
if os.access(vdb_path, os.W_OK):
vdb_lock = portage_locks.lockdir(vdb_path)
real_dbapi = real_vartree.dbapi
slot_counters = {}
for cpv in real_dbapi.cpv_all():
metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
myslot = metadata["SLOT"]
mycp = portage.dep_getkey(cpv)
myslot_atom = "%s:%s" % (mycp, myslot)
try:
mycounter = long(metadata["COUNTER"])
except ValueError:
mycounter = 0
metadata["COUNTER"] = str(mycounter)
other_counter = slot_counters.get(myslot_atom, None)
if other_counter is not None:
if other_counter > mycounter:
continue
slot_counters[myslot_atom] = mycounter
self.dbapi.cpv_inject(cpv, metadata=metadata)
real_dbapi.flush_cache()
finally:
if vdb_lock:
portage_locks.unlockdir(vdb_lock)
# Populate the old-style virtuals using the cached values.
if not self.settings.treeVirtuals:
self.settings.treeVirtuals = portage_util.map_dictlist_vals(
portage.getCPFromCPV, self.get_all_provides())
# Intialize variables needed for lazy cache pulls of the live ebuild
# metadata. This ensures that the vardb lock is released ASAP, without
# being delayed in case cache generation is triggered.
self._aux_get = self.dbapi.aux_get
self.dbapi.aux_get = self._aux_get_wrapper
self._aux_get_history = set()
self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
self._portdb = portdb
self._global_updates = None
def _aux_get_wrapper(self, pkg, wants):
if pkg in self._aux_get_history:
return self._aux_get(pkg, wants)
self._aux_get_history.add(pkg)
try:
# Use the live ebuild metadata if possible.
live_metadata = dict(izip(self._portdb_keys,
self._portdb.aux_get(pkg, self._portdb_keys)))
self.dbapi.aux_update(pkg, live_metadata)
except (KeyError, portage_exception.PortageException):
if self._global_updates is None:
self._global_updates = \
grab_global_updates(self._portdb.porttree_root)
perform_global_updates(
pkg, self.dbapi, self._global_updates)
return self._aux_get(pkg, wants)
def grab_global_updates(portdir):
from portage_update import grab_updates, parse_updates
updpath = os.path.join(portdir, "profiles", "updates")
try:
rawupdates = grab_updates(updpath)
except portage_exception.DirectoryNotFound:
rawupdates = []
upd_commands = []
for mykey, mystat, mycontent in rawupdates:
commands, errors = parse_updates(mycontent)
upd_commands.extend(commands)
return upd_commands
def perform_global_updates(mycpv, mydb, mycommands):
from portage_update import update_dbentries
aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
updates = update_dbentries(mycommands, aux_dict)
if updates:
mydb.aux_update(mycpv, updates)
def visible(pkgsettings, cpv, metadata, built=False, installed=False):
"""
Check if a package is visible. This can raise an InvalidDependString
exception if LICENSE is invalid.
TODO: optionally generate a list of masking reasons
@rtype: Boolean
@returns: True if the package is visible, False otherwise.
"""
if not metadata["SLOT"]:
return False
if built and not installed and \
metadata["CHOST"] != pkgsettings["CHOST"]:
return False
if not portage.eapi_is_supported(metadata["EAPI"]):
return False
if not installed and pkgsettings._getMissingKeywords(cpv, metadata):
return False
if pkgsettings._getMaskAtom(cpv, metadata):
return False
if pkgsettings._getProfileMaskAtom(cpv, metadata):
return False
return True
def get_masking_status(pkg, pkgsettings, root_config):
mreasons = portage.getmaskingstatus(
pkg, settings=pkgsettings,
portdb=root_config.trees["porttree"].dbapi)
if pkg.built and not pkg.installed and \
pkg.metadata["CHOST"] != root_config.settings["CHOST"]:
mreasons.append("CHOST: %s" % \
pkg.metadata["CHOST"])
if not pkg.metadata["SLOT"]:
mreasons.append("invalid: SLOT is undefined")
return mreasons
def get_mask_info(root_config, cpv, pkgsettings,
db, pkg_type, built, installed, db_keys):
eapi_masked = False
try:
metadata = dict(izip(db_keys,
db.aux_get(cpv, db_keys)))
except KeyError:
metadata = None
if metadata and not built:
pkgsettings.setcpv(cpv, mydb=metadata)
metadata["USE"] = pkgsettings.get("USE", "")
if metadata is None:
mreasons = ["corruption"]
else:
pkg = Package(type_name=pkg_type, root=root_config.root,
cpv=cpv, built=built, installed=installed, metadata=metadata)
mreasons = get_masking_status(pkg, pkgsettings, root_config)
return metadata, mreasons
def show_masked_packages(masked_packages):
shown_licenses = set()
shown_comments = set()
# Maybe there is both an ebuild and a binary. Only
# show one of them to avoid redundant appearance.
shown_cpvs = set()
have_eapi_mask = False
for (root_config, pkgsettings, cpv,
metadata, mreasons) in masked_packages:
if cpv in shown_cpvs:
continue
shown_cpvs.add(cpv)
comment, filename = None, None
if "package.mask" in mreasons:
comment, filename = \
portage.getmaskingreason(
cpv, metadata=metadata,
settings=pkgsettings,
portdb=root_config.trees["porttree"].dbapi,
return_location=True)
missing_licenses = []
if metadata:
if not portage.eapi_is_supported(metadata["EAPI"]):
have_eapi_mask = True
print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
if comment and comment not in shown_comments:
print filename+":"
print comment
shown_comments.add(comment)
return have_eapi_mask
class Package(object):
__slots__ = ("__weakref__", "built", "cpv", "depth",
"installed", "metadata", "root", "onlydeps", "type_name",
"cpv_slot", "slot_atom", "_digraph_node")
def __init__(self, **kwargs):
for myattr in self.__slots__:
if myattr == "__weakref__":
continue
myvalue = kwargs.get(myattr, None)
setattr(self, myattr, myvalue)
self.slot_atom = "%s:%s" % \
(portage.cpv_getkey(self.cpv), self.metadata["SLOT"])
self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
status = "merge"
if self.onlydeps or self.installed:
status = "nomerge"
self._digraph_node = (self.type_name, self.root, self.cpv, status)
def __eq__(self, other):
return self._digraph_node == other
def __ne__(self, other):
return self._digraph_node != other
def __hash__(self):
return hash(self._digraph_node)
def __len__(self):
return len(self._digraph_node)
def __getitem__(self, key):
return self._digraph_node[key]
def __iter__(self):
return iter(self._digraph_node)
def __contains__(self, key):
return key in self._digraph_node
def __str__(self):
return str(self._digraph_node)
class BlockerCache(DictMixin):
"""This caches blockers of installed packages so that dep_check does not
have to be done for every single installed package on every invocation of
emerge. The cache is invalidated whenever it is detected that something
has changed that might alter the results of dep_check() calls:
1) the set of installed packages (including COUNTER) has changed
2) the old-style virtuals have changed
"""
class BlockerData(object):
def __init__(self, counter, atoms):
self.counter = counter
self.atoms = atoms
def __init__(self, myroot, vardb):
self._vardb = vardb
self._installed_pkgs = set(vardb.cpv_all())
self._virtuals = vardb.settings.getvirtuals()
self._cache_filename = os.path.join(myroot,
portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
self._cache_version = "1"
self._cache_data = None
self._modified = False
self._load()
def _load(self):
try:
f = open(self._cache_filename)
mypickle = cPickle.Unpickler(f)
mypickle.find_global = None
self._cache_data = mypickle.load()
f.close()
del f
except (IOError, OSError, EOFError, cPickle.UnpicklingError):
pass
cache_valid = self._cache_data and \
isinstance(self._cache_data, dict) and \
self._cache_data.get("version") == self._cache_version and \
self._cache_data.get("virtuals") == self._virtuals and \
set(self._cache_data.get("blockers", [])) == self._installed_pkgs
if cache_valid:
for pkg in self._installed_pkgs:
if long(self._vardb.aux_get(pkg, ["COUNTER"])[0]) != \
self[pkg].counter:
cache_valid = False
break
if not cache_valid:
self._cache_data = {"version":self._cache_version}
self._cache_data["blockers"] = {}
self._cache_data["virtuals"] = self._virtuals
self._modified = False
def flush(self):
"""If the current user has permission and the internal blocker cache
been updated, save it to disk and mark it unmodified. This is called
by emerge after it has proccessed blockers for all installed packages.
Currently, the cache is only written if the user has superuser
privileges (since that's required to obtain a lock), but all users
have read access and benefit from faster blocker lookups (as long as
the entire cache is still valid). The cache is stored as a pickled
dict object with the following format:
{
version : "1",
"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
"virtuals" : vardb.settings.getvirtuals()
}
"""
if self._modified and \
secpass >= 2:
try:
f = portage_util.atomic_ofstream(self._cache_filename)
cPickle.dump(self._cache_data, f, -1)
f.close()
portage_util.apply_secpass_permissions(
self._cache_filename, gid=portage.portage_gid, mode=0644)
except (IOError, OSError), e:
pass
self._modified = False
def __setitem__(self, cpv, blocker_data):
"""
Update the cache and mark it as modified for a future call to
self.flush().
@param cpv: Package for which to cache blockers.
@type cpv: String
@param blocker_data: An object with counter and atoms attributes.
@type blocker_data: BlockerData
"""
self._cache_data["blockers"][cpv] = \
(blocker_data.counter, blocker_data.atoms)
self._modified = True
def __getitem__(self, cpv):
"""
@rtype: BlockerData
@returns: An object with counter and atoms attributes.
"""
return self.BlockerData(*self._cache_data["blockers"][cpv])
def keys(self):
"""This needs to be implemented so that self.__repr__() doesn't raise
an AttributeError."""
if self._cache_data and "blockers" in self._cache_data:
return self._cache_data["blockers"].keys()
return []
def show_invalid_depstring_notice(parent_node, depstring, error_msg):
from formatter import AbstractFormatter, DumbWriter
f = AbstractFormatter(DumbWriter(maxcol=72))
print "\n\n!!! Invalid or corrupt dependency specification: "
print
print error_msg
print
print parent_node
print
print depstring
print
p_type, p_root, p_key, p_status = parent_node
msg = []
if p_status == "nomerge":
category, pf = portage.catsplit(p_key)
pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
msg.append("Portage is unable to process the dependencies of the ")
msg.append("'%s' package. " % p_key)
msg.append("In order to correct this problem, the package ")
msg.append("should be uninstalled, reinstalled, or upgraded. ")
msg.append("As a temporary workaround, the --nodeps option can ")
msg.append("be used to ignore all dependencies. For reference, ")
msg.append("the problematic dependencies can be found in the ")
msg.append("*DEPEND files located in '%s/'." % pkg_location)
else:
msg.append("This package can not be installed. ")
msg.append("Please notify the '%s' package maintainer " % p_key)
msg.append("about this problem.")
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
class depgraph:
pkg_tree_map = {
"ebuild":"porttree",
"binary":"bintree",
"installed":"vartree"}
_mydbapi_keys = [
"CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
"repository", "RESTRICT", "SLOT", "USE"]
def __init__(self, settings, trees, myopts, myparams, spinner):
self.settings = settings
self.target_root = settings["ROOT"]
self.myopts = myopts
self.myparams = myparams
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.pkgsettings = {}
# Maps cpv to digraph node for all nodes added to the graph.
self.pkg_node_map = {}
# Maps slot atom to digraph node for all nodes added to the graph.
self._slot_node_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
self.mydbapi = {}
self.trees = {}
self.roots = {}
for myroot in trees:
self.trees[myroot] = {}
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["vartree"],
trees[myroot]["porttree"].dbapi,
self._mydbapi_keys)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
self.pkg_node_map[myroot] = {}
self._slot_node_map[myroot] = {}
vardb = self.trees[myroot]["vartree"].dbapi
self.roots[myroot] = RootConfig(self.trees[myroot])
# This fakedbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = portage.fakedbapi(settings=self.pkgsettings[myroot])
self.mydbapi[myroot] = fakedb
if "--nodeps" not in self.myopts and \
"--buildpkgonly" not in self.myopts:
# --nodeps bypasses this, since it isn't needed in this case
# and the cache pulls might trigger (slow) cache generation.
for pkg in vardb.cpv_all():
self.spinner.update()
fakedb.cpv_inject(pkg,
metadata=dict(izip(self._mydbapi_keys,
vardb.aux_get(pkg, self._mydbapi_keys))))
del vardb, fakedb
if "--usepkg" in self.myopts:
self.trees[myroot]["bintree"].populate(
"--getbinpkg" in self.myopts,
"--getbinpkgonly" in self.myopts)
del trees
self.digraph=portage.digraph()
# Tracks simple parent/child relationships (PDEPEND relationships are
# not reversed).
self._parent_child_digraph = digraph()
# contains all sets added to the graph
self._sets = {}
# contains atoms given as arguments
self._sets["args"] = AtomSet()
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self._set_atoms = AtomSet()
# contains all nodes pulled in by self._set_atoms
self._set_nodes = set()
self.blocker_digraph = digraph()
self.blocker_parents = {}
self._unresolved_blocker_parents = {}
self._slot_collision_info = set()
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self._slot_collision_nodes = set()
self._altlist_cache = {}
self._pprovided_args = []
self._missing_args = []
self._masked_installed = []
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases."""
msg = []
msg.append("\n!!! Multiple versions within a single " + \
"package slot have been \n")
msg.append("!!! pulled into the dependency graph:\n\n")
indent = " "
# Max number of parents shown, to avoid flooding the display.
max_parents = 3
for slot_atom, root in self._slot_collision_info:
msg.append(slot_atom)
msg.append("\n\n")
slot_nodes = []
for node in self._slot_collision_nodes:
type_name, pkg_root, cpv, pkg_status = node
if pkg_root != root:
continue
mydb = self.roots[root].trees[
self.pkg_tree_map[type_name]].dbapi
slot = mydb.aux_get(cpv, ["SLOT"])[0]
if slot_atom == "%s:%s" % (portage.cpv_getkey(cpv), slot):
slot_nodes.append(node)
slot_nodes.append(self._slot_node_map[root][slot_atom])
for node in slot_nodes:
msg.append(indent)
msg.append(str(node))
parents = self._parent_child_digraph.parent_nodes(node)
if parents:
omitted_parents = 0
if len(parents) > max_parents:
omitted_parents = len(parents) - max_parents
pruned_list = []
for parent in parents:
pruned_list.append(parent)
if len(pruned_list) == max_parents:
break
parents = pruned_list
msg.append(" pulled in by\n")
for parent in parents:
msg.append(2*indent)
msg.append(str(parent))
msg.append("\n")
if omitted_parents:
msg.append(2*indent)
msg.append("(and %d more)\n" % omitted_parents)
else:
msg.append(" (no parents)\n")
msg.append("\n")
msg.append("\n")
sys.stderr.write("".join(msg))
sys.stderr.flush()
if "--quiet" in self.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
from formatter import AbstractFormatter, DumbWriter
f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for x in msg:
f.add_flowing_data(x)
f.end_paragraph(1)
f.writer.flush()
def _reinstall_for_flags(self, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
if "--newuse" in self.myopts:
flags = orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags)
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
if flags:
return flags
elif "changed-use" == self.myopts.get("--reinstall"):
flags = orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use))
if flags:
return flags
return None
def create(self, mybigkey, myparent=None, addme=1, metadata=None,
priority=DepPriority(), rev_dep=False, arg=None):
"""
Fills the digraph with nodes comprised of packages to merge.
mybigkey is the package spec of the package to merge.
myparent is the package depending on mybigkey ( or None )
addme = Should we add this package to the digraph or are we just looking at it's deps?
Think --onlydeps, we need to ignore packages in that case.
#stuff to add:
#SLOT-aware emerge
#IUSE-aware emerge -> USE DEP aware depgraph
#"no downgrade" emerge
"""
# unused parameters
rev_dep = False
mytype, myroot, mykey = mybigkey
# select the correct /var database that we'll be checking against
vardbapi = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
pkgsettings = self.pkgsettings[myroot]
# if the package is already on the system, we add a "nomerge"
# directive, otherwise we add a "merge" directive.
mydbapi = self.trees[myroot][self.pkg_tree_map[mytype]].dbapi
if metadata is None:
metadata = dict(izip(self._mydbapi_keys,
mydbapi.aux_get(mykey, self._mydbapi_keys)))
if mytype == "ebuild":
pkgsettings.setcpv(mykey, mydb=portdb)
metadata["USE"] = pkgsettings["USE"]
myuse = metadata["USE"].split()
if not arg and myroot == self.target_root:
try:
arg = self._set_atoms.findAtomForPackage(mykey, metadata)
except portage_exception.InvalidDependString, e:
if mytype != "installed":
show_invalid_depstring_notice(tuple(mybigkey+["merge"]),
metadata["PROVIDE"], str(e))
return 0
del e
reinstall_for_flags = None
merging=1
if mytype == "installed":
merging = 0
if addme and mytype != "installed":
# this is where we add the node to the list of packages to merge
if "selective" in self.myparams or not arg:
if "empty" not in self.myparams and vardbapi.cpv_exists(mykey):
merging=0
""" If we aren't merging, perform the --newuse check.
If the package has new iuse flags or different use flags then if
--newuse is specified, we need to merge the package. """
if merging == 0 and \
myroot == self.target_root and \
("--newuse" in self.myopts or
"--reinstall" in self.myopts) and \
vardbapi.cpv_exists(mykey):
pkgsettings.setcpv(mykey, mydb=mydbapi)
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
old_use = vardbapi.aux_get(mykey, ["USE"])[0].split()
iuses = set(filter_iuse_defaults(metadata["IUSE"].split()))
old_iuse = set(filter_iuse_defaults(
vardbapi.aux_get(mykey, ["IUSE"])[0].split()))
reinstall_for_flags = self._reinstall_for_flags(
forced_flags, old_use, old_iuse, myuse, iuses)
if reinstall_for_flags:
merging = 1
if addme and merging == 1:
mybigkey.append("merge")
else:
mybigkey.append("nomerge")
jbigkey = tuple(mybigkey)
if addme:
if merging == 0 and vardbapi.cpv_exists(mykey) and \
mytype != "installed":
mytype = "installed"
mybigkey[0] = "installed"
mydbapi = vardbapi
jbigkey = tuple(mybigkey)
metadata = dict(izip(self._mydbapi_keys,
mydbapi.aux_get(mykey, self._mydbapi_keys)))
myuse = metadata["USE"].split()
slot_atom = "%s:%s" % (portage.dep_getkey(mykey), metadata["SLOT"])
if merging and \
"empty" not in self.myparams and \
vardbapi.match(slot_atom):
# Increase the priority of dependencies on packages that
# are being rebuilt. This optimizes merge order so that
# dependencies are rebuilt/updated as soon as possible,
# which is needed especially when emerge is called by
# revdep-rebuild since dependencies may be affected by ABI
# breakage that has rendered them useless. Don't adjust
# priority here when in "empty" mode since all packages
# are being merged in that case.
priority.rebuild = True
existing_node = self._slot_node_map[myroot].get(
slot_atom, None)
slot_collision = False
if existing_node:
e_type, myroot, e_cpv, e_status = existing_node
if mykey == e_cpv:
# The existing node can be reused.
self._parent_child_digraph.add(existing_node, myparent)
# If a direct circular dependency is not an unsatisfied
# buildtime dependency then drop it here since otherwise
# it can skew the merge order calculation in an unwanted
# way.
if existing_node != myparent or \
(priority.buildtime and not priority.satisfied):
self.digraph.addnode(existing_node, myparent,
priority=priority)
return 1
else:
if jbigkey in self._slot_collision_nodes:
return 1
# A slot collision has occurred. Sometimes this coincides
# with unresolvable blockers, so the slot collision will be
# shown later if there are no unresolvable blockers.
self._slot_collision_info.add((slot_atom, myroot))
self._slot_collision_nodes.add(jbigkey)
slot_collision = True
if slot_collision:
# Now add this node to the graph so that self.display()
# can show use flags and --tree output. This node is
# only being partially added to the graph. It must not be
# allowed to interfere with the other nodes that have been
# added. Do not overwrite data for existing nodes in
# self.pkg_node_map and self.mydbapi since that data will
# be used for blocker validation.
self.pkg_node_map[myroot].setdefault(mykey, jbigkey)
# Even though the graph is now invalid, continue to process
# dependencies so that things like --fetchonly can still
# function despite collisions.
else:
self.mydbapi[myroot].cpv_inject(mykey, metadata=metadata)
self._slot_node_map[myroot][slot_atom] = jbigkey
self.pkg_node_map[myroot][mykey] = jbigkey
if reinstall_for_flags:
self._reinstall_nodes[jbigkey] = reinstall_for_flags
if rev_dep and myparent:
self.digraph.addnode(myparent, jbigkey,
priority=priority)
else:
self.digraph.addnode(jbigkey, myparent,
priority=priority)
if mytype != "installed":
# Allow this package to satisfy old-style virtuals in case it
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
pkgsettings.setinst(mykey, metadata)
# For consistency, also update the global virtuals.
settings = self.roots[myroot].settings
settings.unlock()
settings.setinst(mykey, metadata)
settings.lock()
except portage_exception.InvalidDependString, e:
show_invalid_depstring_notice(jbigkey, metadata["PROVIDE"], str(e))
del e
return 0
built = mytype != "ebuild"
installed = mytype == "installed"
if installed:
# Warn if all matching ebuilds are masked or
# the installed package itself is masked. Do
# not warn if there are simply no matching
# ebuilds since that would be annoying in some
# cases:
#
# - binary packages installed from an overlay
# that is not listed in PORTDIR_OVERLAY
#
# - multi-slot atoms listed in the world file
# to prevent depclean from removing them
if arg:
all_ebuilds_masked = bool(
portdb.xmatch("match-all", arg) and
not portdb.xmatch("bestmatch-visible", arg))
if all_ebuilds_masked:
self._missing_args.append(arg)
if "selective" not in self.myparams:
self._show_unsatisfied_dep(
myroot, arg, myparent=myparent)
return 0
pkg = Package(type_name=mytype, root=myroot,
cpv=mykey, built=built, installed=installed,
metadata=metadata)
if not visible(pkgsettings, pkg.cpv, pkg.metadata,
built=pkg.built, installed=pkg.installed):
self._masked_installed.append((pkg, pkgsettings))
if arg:
self._set_nodes.add(jbigkey)
# Do this even when addme is False (--onlydeps) so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
self._parent_child_digraph.add(jbigkey, myparent)
""" This section determines whether we go deeper into dependencies or not.
We want to go deeper on a few occasions:
Installing package A, we need to make sure package A's deps are met.
emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
"""
if "deep" not in self.myparams and not merging and \
not ("--update" in self.myopts and arg and merging):
return 1
elif "recurse" not in self.myparams:
return 1
self.spinner.update()
""" Check DEPEND/RDEPEND/PDEPEND/SLOT
Pull from bintree if it's binary package, porttree if it's ebuild.
Binpkg's can be either remote or local. """
edepend={}
depkeys = ["DEPEND","RDEPEND","PDEPEND"]
for k in depkeys:
edepend[k] = metadata[k]
if mytype == "ebuild":
if "--buildpkgonly" in self.myopts:
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
bdeps_satisfied = False
if mytype in ("installed", "binary"):
if self.myopts.get("--with-bdeps", "n") == "y":
# Pull in build time deps as requested, but marked them as
# "satisfied" since they are not strictly required. This allows
# more freedom in the merge order calculation for solving
# circular dependencies. Don't convert to PDEPEND since that
# could make --with-bdeps=y less effective if it is used to
# adjust merge order to prevent built_with_use() calls from
# failing.
bdeps_satisfied = True
else:
# built packages do not have build time dependencies.
edepend["DEPEND"] = ""
""" We have retrieve the dependency information, now we need to recursively
process them. DEPEND gets processed for root = "/", {R,P}DEPEND in myroot. """
mp = tuple(mybigkey)
try:
if not self.select_dep("/", edepend["DEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(buildtime=True,
satisfied=bdeps_satisfied),
parent_arg=arg):
return 0
"""RDEPEND is soft by definition. However, in order to ensure
correct merge order, we make it a hard dependency. Otherwise, a
build time dependency might not be usable due to it's run time
dependencies not being installed yet.
"""
if not self.select_dep(myroot,edepend["RDEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(runtime=True),
parent_arg=arg):
return 0
if edepend.has_key("PDEPEND") and edepend["PDEPEND"]:
# Post Depend -- Add to the list without a parent, as it depends
# on a package being present AND must be built after that package.
if not self.select_dep(myroot, edepend["PDEPEND"], myparent=mp,
myuse=myuse, priority=DepPriority(runtime_post=True),
parent_arg=arg):
return 0
except ValueError, e:
pkgs = e.args[0]
portage.writemsg("\n\n!!! An atom in the dependencies " + \
"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
for cpv in pkgs:
portage.writemsg(" %s\n" % cpv, noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
if mytype == "binary":
portage.writemsg(
"!!! This binary package cannot be installed: '%s'\n" % \
mykey, noiselevel=-1)
elif mytype == "ebuild":
myebuild, mylocation = portdb.findname2(mykey)
portage.writemsg("!!! This ebuild cannot be installed: " + \
"'%s'\n" % myebuild, noiselevel=-1)
portage.writemsg("!!! Please notify the package maintainer " + \
"that atoms must be fully-qualified.\n", noiselevel=-1)
return 0
return 1
def select_files(self,myfiles):
"given a list of .tbz2s, .ebuilds and deps, create the appropriate depgraph and return a favorite list"
myfavorites=[]
myroot = self.target_root
vardb = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
bindb_keys = list(bindb._aux_cache_keys)
pkgsettings = self.pkgsettings[myroot]
arg_atoms = []
for x in myfiles:
ext = os.path.splitext(x)[1]
if ext==".tbz2":
if not os.path.exists(x):
if os.path.exists(
os.path.join(pkgsettings["PKGDIR"], "All", x)):
x = os.path.join(pkgsettings["PKGDIR"], "All", x)
elif os.path.exists(
os.path.join(pkgsettings["PKGDIR"], x)):
x = os.path.join(pkgsettings["PKGDIR"], x)
else:
print "\n\n!!! Binary package '"+str(x)+"' does not exist."
print "!!! Please ensure the tbz2 exists as specified.\n"
return 0, myfavorites
mytbz2=xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
return 0, myfavorites
if not self.create(["binary", myroot, mykey],
addme=("--onlydeps" not in self.myopts), arg=x):
return (0,myfavorites)
arg_atoms.append((x, "="+mykey))
elif ext==".ebuild":
ebuild_path = portage_util.normalize_path(os.path.abspath(x))
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
e = portage_exception.PackageNotFound(
("%s is not in a valid portage tree " + \
"hierarchy or does not exist") % x)
if not portage.isvalidatom(cp):
raise e
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
raise e
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
cp, os.path.basename(ebuild_path)):
print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
return 0, myfavorites
if mykey not in portdb.xmatch(
"match-visible", portage.dep_getkey(mykey)):
print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
print colorize("BAD", "*** page for details.")
countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
raise portage_exception.PackageNotFound(
"%s is not in a valid portage tree hierarchy or does not exist" % x)
if not self.create(["ebuild", myroot, mykey],
None, "--onlydeps" not in self.myopts, arg=x):
return (0,myfavorites)
arg_atoms.append((x, "="+mykey))
else:
if not is_valid_package_atom(x):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
return (0,[])
try:
mykey = None
if "--usepkg" in self.myopts:
mykey = portage.dep_expand(x, mydb=bindb,
settings=pkgsettings)
if ("--usepkgonly" in self.myopts or mykey) and \
not portage.dep_getkey(mykey).startswith("null/"):
arg_atoms.append((x, mykey))
continue
if "--usepkgonly" in self.myopts:
mykey = portage.dep_expand(x, mydb=vardb,
settings=pkgsettings)
arg_atoms.append((x, mykey))
continue
try:
mykey = portage.dep_expand(x,
mydb=portdb, settings=pkgsettings)
except ValueError, e:
mykey = portage.dep_expand(x,
mydb=vardb, settings=pkgsettings)
cp = portage.dep_getkey(mykey)
if cp.startswith("null/") or \
cp not in e[0]:
raise
del e
arg_atoms.append((x, mykey))
except ValueError, errpkgs:
print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
print "!!! one of the following fully-qualified ebuild names instead:\n"
for i in errpkgs[0]:
print " " + green(i)
print
sys.exit(1)
if "--update" in self.myopts:
"""Make sure all installed slots are updated when possible. Do this
with --emptytree also, to ensure that all slots are remerged."""
vardb = self.trees[self.target_root]["vartree"].dbapi
greedy_atoms = []
for myarg, myatom in arg_atoms:
greedy_atoms.append((myarg, myatom))
myslots = set()
for cpv in vardb.match(myatom):
myslots.add(vardb.aux_get(cpv, ["SLOT"])[0])
if myslots:
best_pkgs = []
if "--usepkg" in self.myopts:
best_pkg = None
for cpv in reversed(bindb.match(myatom)):
metadata = dict(izip(bindb_keys,
bindb.aux_get(cpv, bindb_keys)))
if visible(pkgsettings, cpv, metadata, built=True):
best_pkg = cpv
break
if best_pkg:
best_slot = bindb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("binary", best_pkg, best_slot))
if "--usepkgonly" not in self.myopts:
best_pkg = portage.best(portdb.match(myatom))
if best_pkg:
best_slot = portdb.aux_get(best_pkg, ["SLOT"])[0]
best_pkgs.append(("ebuild", best_pkg, best_slot))
if best_pkgs:
best_pkg = portage.best([x[1] for x in best_pkgs])
best_pkgs = [x for x in best_pkgs if x[1] == best_pkg]
best_slot = best_pkgs[0][2]
myslots.add(best_slot)
if len(myslots) > 1:
for myslot in myslots:
myslot_atom = "%s:%s" % \
(portage.dep_getkey(myatom), myslot)
available = False
if "--usepkgonly" not in self.myopts and \
self.trees[self.target_root][
"porttree"].dbapi.match(myslot_atom):
available = True
elif "--usepkg" in self.myopts:
for cpv in bindb.match(myslot_atom):
metadata = dict(izip(bindb_keys,
bindb.aux_get(cpv, bindb_keys)))
if visible(pkgsettings, cpv, metadata, built=True):
available = True
break
if available:
greedy_atoms.append((myarg, myslot_atom))
arg_atoms = greedy_atoms
oneshot = "--oneshot" in self.myopts or \
"--onlydeps" in self.myopts
""" These are used inside self.create() in order to ensure packages
that happen to match arguments are not incorrectly marked as nomerge."""
args_set = self._sets["args"]
for myarg, myatom in arg_atoms:
if myatom in args_set:
continue
args_set.add(myatom)
self._set_atoms.add(myatom)
if not oneshot:
myfavorites.append(myatom)
for myarg, myatom in arg_atoms:
try:
self.mysd = self.select_dep(myroot, myatom, arg=myarg)
except portage_exception.MissingSignature, e:
portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
sys.exit(1)
except portage_exception.InvalidSignature, e:
portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
sys.exit(1)
except SystemExit, e:
raise # Needed else can't exit
except Exception, e:
print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % mykey
print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
raise
if not self.mysd:
return (0,myfavorites)
missing=0
if "--usepkgonly" in self.myopts:
for xs in self.digraph.all_nodes():
if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
if missing == 0:
print
missing += 1
print "Missing binary for:",xs[2]
if not self.validate_blockers():
return False, myfavorites
# We're true here unless we are missing binaries.
return (not missing,myfavorites)
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
xinfo = '"%s"' % atom
if arg:
xinfo='"%s"' % arg
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if myparent:
xfrom = '(dependency required by '+ \
green('"%s"' % myparent[2]) + \
red(' [%s]' % myparent[0]) + ')'
masked_packages = []
missing_licenses = []
have_eapi_mask = False
pkgsettings = self.pkgsettings[root]
root_config = self.roots[root]
portdb = self.roots[root].trees["porttree"].dbapi
dbs = []
portdb = self.trees[root]["porttree"].dbapi
bindb = self.trees[root]["bintree"].dbapi
vardb = self.trees[root]["vartree"].dbapi
# (db, pkg_type, built, installed, db_keys)
if "--usepkgonly" not in self.myopts:
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in self.myopts:
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
db_keys = self._mydbapi_keys
dbs.append((vardb, "installed", True, True, db_keys))
for db, pkg_type, built, installed, db_keys in dbs:
match = db.match
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all", atom)
else:
cpv_list = db.match(atom)
# descending order
cpv_list.reverse()
for cpv in cpv_list:
metadata, mreasons = get_mask_info(root_config, cpv,
pkgsettings, db, pkg_type, built, installed, db_keys)
masked_packages.append(
(root_config, pkgsettings, cpv, metadata, mreasons))
if masked_packages:
print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
print "!!! One of the following masked packages is required to complete your request:"
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
print
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage_const.EAPI
from textwrap import wrap
for line in wrap(msg, 75):
print line
print
print "For more information, see MASKED PACKAGES section in the emerge man page or "
print "refer to the Gentoo Handbook."
else:
print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
if myparent:
print xfrom
print
def select_dep(self, myroot, depstring, myparent=None, arg=None,
myuse=None, raise_on_missing=False, priority=DepPriority(),
rev_deps=False, parent_arg=None):
""" Given a depstring, create the depgraph such that all dependencies are satisfied.
myroot = $ROOT from environment, where {R,P}DEPENDs are merged to.
myparent = the node whose depstring is being passed in
arg = package was specified on the command line, merge even if it's already installed
myuse = USE flags at present
raise_on_missing = Given that the depgraph is not proper, raise an exception if true
else continue trying.
return 1 on success, 0 for failure
"""
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
vardb = self.trees[myroot]["vartree"].dbapi
pkgsettings = self.pkgsettings[myroot]
if myparent:
p_type, p_root, p_key, p_status = myparent
if "--debug" in self.myopts:
print
print "Parent: ",myparent
print "Depstring:",depstring
if rev_deps:
print "Reverse:", rev_deps
print "Priority:", priority
#processing dependencies
""" Call portage.dep_check to evaluate the use? conditionals and make sure all
dependencies are satisfiable. """
if arg:
mymerge = [depstring]
pprovided = pkgsettings.pprovideddict.get(
portage.dep_getkey(depstring))
if pprovided and portage.match_from_list(depstring, pprovided):
mymerge = []
else:
try:
if myparent and p_status == "nomerge":
portage_dep._dep_check_strict = False
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
use_binaries=("--usepkgonly" in self.myopts),
myroot=myroot, trees=self.trees)
finally:
portage_dep._dep_check_strict = True
if not mycheck[0]:
if myparent:
show_invalid_depstring_notice(
myparent, depstring, mycheck[1])
else:
sys.stderr.write("\n%s\n%s\n" % (depstring, mycheck[1]))
return 0
mymerge = mycheck[1]
if not mymerge and arg:
# A provided package has been specified on the command line. The
# package will not be merged and a warning will be displayed.
if depstring in self._set_atoms:
self._pprovided_args.append((arg, depstring))
if "--debug" in self.myopts:
print "Candidates:",mymerge
for x in mymerge:
selected_pkg = None
if x.startswith("!"):
if "--buildpkgonly" not in self.myopts and \
"--nodeps" not in self.myopts and \
myparent not in self._slot_collision_nodes:
p_type, p_root, p_key, p_status = myparent
if p_type != "installed" and p_status != "merge":
# It's safe to ignore blockers from --onlydeps nodes.
continue
self.blocker_parents.setdefault(
("blocks", p_root, x[1:]), set()).add(myparent)
continue
else:
# List of acceptable packages, ordered by type preference.
matched_packages = []
myeb_matches = portdb.xmatch("match-visible", x)
myeb = None
myeb_pkg = None
metadata = None
existing_node = None
if myeb_matches:
myeb = portage.best(myeb_matches)
# For best performance, try to reuse an exising node
# and it's cached metadata. The portdbapi caches SLOT
# metadata in memory so it's really only pulled once.
slot_atom = "%s:%s" % (portage.dep_getkey(myeb),
portdb.aux_get(myeb, ["SLOT"])[0])
existing_node = self._slot_node_map[myroot].get(slot_atom)
if existing_node:
e_type, myroot, e_cpv, e_status = existing_node
metadata = dict(izip(self._mydbapi_keys,
self.mydbapi[myroot].aux_get(e_cpv, self._mydbapi_keys)))
cpv_slot = "%s:%s" % (e_cpv, metadata["SLOT"])
if portage.match_from_list(x, [cpv_slot]):
matched_packages.append(
([e_type, myroot, e_cpv], metadata))
else:
existing_node = None
if not existing_node and \
"--usepkg" in self.myopts:
# The next line assumes the binarytree has been populated.
# XXX: Need to work out how we use the binary tree with roots.
usepkgonly = "--usepkgonly" in self.myopts
chost = pkgsettings["CHOST"]
myeb_pkg_matches = []
bindb_keys = list(bindb._aux_cache_keys)
for pkg in bindb.match(x):
metadata = dict(izip(bindb_keys,
bindb.aux_get(pkg, bindb_keys)))
if not visible(pkgsettings, pkg, metadata, built=True):
continue
myeb_pkg_matches.append(pkg)
if myeb_pkg_matches:
myeb_pkg = portage.best(myeb_pkg_matches)
# For best performance, try to reuse an exising node
# and it's cached metadata. The bindbapi caches SLOT
# metadata in memory so it's really only pulled once.
slot_atom = "%s:%s" % (portage.dep_getkey(myeb_pkg),
bindb.aux_get(myeb_pkg, ["SLOT"])[0])
existing_node = self._slot_node_map[myroot].get(slot_atom)
if existing_node:
e_type, myroot, e_cpv, e_status = existing_node
metadata = dict(izip(self._mydbapi_keys,
self.mydbapi[myroot].aux_get(e_cpv, self._mydbapi_keys)))
cpv_slot = "%s:%s" % (e_cpv, metadata["SLOT"])
if portage.match_from_list(x, [cpv_slot]):
myeb_pkg = None
matched_packages.append(
([e_type, myroot, e_cpv], metadata))
else:
existing_node = None
if not existing_node:
# For best performance, avoid pulling
# metadata whenever possible.
metadata = dict(izip(self._mydbapi_keys,
bindb.aux_get(myeb_pkg, self._mydbapi_keys)))
if not existing_node and \
myeb_pkg and \
("--newuse" in self.myopts or \
"--reinstall" in self.myopts):
iuses = set(filter_iuse_defaults(metadata["IUSE"].split()))
old_use = metadata["USE"].split()
mydb = None
if "--usepkgonly" not in self.myopts and myeb:
mydb = portdb
if myeb:
pkgsettings.setcpv(myeb, mydb=mydb)
else:
pkgsettings.setcpv(myeb_pkg, mydb=mydb)
now_use = pkgsettings["USE"].split()
forced_flags = set()
forced_flags.update(pkgsettings.useforce)
forced_flags.update(pkgsettings.usemask)
cur_iuse = iuses
if "--usepkgonly" not in self.myopts and myeb:
cur_iuse = set(filter_iuse_defaults(
portdb.aux_get(myeb, ["IUSE"])[0].split()))
if self._reinstall_for_flags(
forced_flags, old_use, iuses, now_use, cur_iuse):
myeb_pkg = None
if myeb_pkg:
matched_packages.append(
(["binary", myroot, myeb_pkg], metadata))
if not existing_node and \
myeb and \
"--usepkgonly" not in self.myopts:
metadata = dict(izip(self._mydbapi_keys,
portdb.aux_get(myeb, self._mydbapi_keys)))
pkgsettings.setcpv(myeb, mydb=portdb)
metadata["USE"] = pkgsettings["USE"]
matched_packages.append(
(["ebuild", myroot, myeb], metadata))
if not matched_packages and \
not (arg and "selective" not in self.myparams):
"""Fall back to the installed package database. This is a
last resort because the metadata tends to diverge from that
of the ebuild in the tree."""
myeb_inst_matches = vardb.match(x)
myeb_inst = None
if myeb_inst_matches:
myeb_inst = portage.best(myeb_inst_matches)
if myeb_inst:
metadata = dict(izip(self._mydbapi_keys,
vardb.aux_get(myeb_inst, self._mydbapi_keys)))
matched_packages.append(
(["installed", myroot, myeb_inst], metadata))
if not matched_packages:
if raise_on_missing:
raise portage_exception.PackageNotFound(x)
self._show_unsatisfied_dep(myroot, x,
myparent=myparent)
return 0
if "--debug" in self.myopts:
for pkg, metadata in matched_packages:
print (pkg[0] + ":").rjust(10), pkg[2]
if len(matched_packages) > 1:
bestmatch = portage.best(
[pkg[2] for pkg, metadata in matched_packages])
matched_packages = [pkg for pkg in matched_packages \
if pkg[0][2] == bestmatch]
# ordered by type preference ("ebuild" type is the last resort)
selected_pkg = matched_packages[0]
# In some cases, dep_check will return deps that shouldn't
# be proccessed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
if myparent and not arg and vardb.match(x) and \
not existing_node and \
"empty" not in self.myparams and \
"deep" not in self.myparams and \
not ("--update" in self.myopts and parent_arg):
(mytype, myroot, mykey), metadata = selected_pkg
myarg = None
if myroot == self.target_root:
try:
myarg = self._set_atoms.findAtomForPackage(
mykey, metadata)
except portage_exception.InvalidDependString:
# This is already handled inside
# self.create() when necessary.
pass
if not myarg:
continue
if myparent:
#we are a dependency, so we want to be unconditionally added
mypriority = priority.copy()
if vardb.match(x):
mypriority.satisfied = True
if not self.create(selected_pkg[0], myparent=myparent,
metadata=selected_pkg[1], priority=mypriority,
rev_dep=rev_deps, arg=arg):
return 0
else:
#if mysource is not set, then we are a command-line dependency and should not be added
#if --onlydeps is specified.
if not self.create(selected_pkg[0], myparent=myparent,
addme=("--onlydeps" not in self.myopts),
metadata=selected_pkg[1], rev_dep=rev_deps, arg=arg):
return 0
if "--debug" in self.myopts:
print "Exiting...",myparent
return 1
def validate_blockers(self):
"""Remove any blockers from the digraph that do not match any of the
packages within the graph. If necessary, create hard deps to ensure
correct merge order such that mutually blocking packages are never
installed simultaneously."""
if "--buildpkgonly" in self.myopts or \
"--nodeps" in self.myopts:
return True
modified_slots = {}
for myroot in self.trees:
myslots = {}
modified_slots[myroot] = myslots
final_db = self.mydbapi[myroot]
slot_node_map = self._slot_node_map[myroot]
for slot_atom, mynode in slot_node_map.iteritems():
mytype, myroot, mycpv, mystatus = mynode
if mystatus == "merge":
myslots[slot_atom] = mycpv
#if "deep" in self.myparams:
if True:
# Pull in blockers from all installed packages that haven't already
# been pulled into the depgraph. This is not enabled by default
# due to the performance penalty that is incurred by all the
# additional dep_check calls that are required.
# Optimization hack for dep_check calls that minimizes the
# available matches by replacing the portdb with a fakedbapi
# instance.
class FakePortageTree(object):
def __init__(self, mydb):
self.dbapi = mydb
dep_check_trees = {}
for myroot in self.trees:
dep_check_trees[myroot] = self.trees[myroot].copy()
dep_check_trees[myroot]["porttree"] = \
FakePortageTree(self.mydbapi[myroot])
dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
for myroot in self.trees:
pkg_node_map = self.pkg_node_map[myroot]
vardb = self.trees[myroot]["vartree"].dbapi
portdb = self.trees[myroot]["porttree"].dbapi
pkgsettings = self.pkgsettings[myroot]
final_db = self.mydbapi[myroot]
cpv_all_installed = self.trees[myroot]["vartree"].dbapi.cpv_all()
blocker_cache = BlockerCache(myroot, vardb)
for pkg in cpv_all_installed:
blocker_atoms = None
matching_node = pkg_node_map.get(pkg, None)
if matching_node and \
matching_node[3] == "nomerge":
continue
# If this node has any blockers, create a "nomerge"
# node for it so that they can be enforced.
self.spinner.update()
blocker_data = blocker_cache.get(pkg)
if blocker_data:
blocker_atoms = blocker_data.atoms
else:
dep_vals = vardb.aux_get(pkg, dep_keys)
myuse = vardb.aux_get(pkg, ["USE"])[0].split()
depstr = " ".join(dep_vals)
# It is crucial to pass in final_db here in order to
# optimize dep_check calls by eliminating atoms via
# dep_wordreduce and dep_eval calls.
try:
portage_dep._dep_check_strict = False
try:
success, atoms = portage.dep_check(depstr,
final_db, pkgsettings, myuse=myuse,