blob: 41ca19c856732e9ebd7975a69c84c5c722fd8cb4 [file] [log] [blame]
# Copyright 1998-2007 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id$
__all__ = ["portdbapi", "close_portdbapi_caches", "portagetree"]
from portage.cache.cache_errors import CacheError
from portage.cache.mappings import slot_dict_class
from portage.const import REPO_NAME_LOC
from portage.data import portage_gid, secpass
from portage.dbapi import dbapi
from portage.dep import use_reduce, paren_reduce, dep_getkey, match_from_list
from portage.exception import PortageException, \
UntrustedSignature, SecurityViolation, InvalidSignature, MissingSignature, \
FileNotFound, InvalidDependString, InvalidPackageName
from portage.manifest import Manifest
from portage.util import ensure_dirs, writemsg
from portage.versions import pkgsplit, catpkgsplit, best, ver_regexp
import portage.gpg, portage.checksum
from portage import eclass_cache, auxdbkeys, doebuild, flatten, \
listdir, dep_expand, eapi_is_supported, key_expand, dep_check, \
_eapi_is_deprecated
import os, stat
from itertools import izip
def _src_uri_validate(cpv, eapi, src_uri):
"""
Take a SRC_URI structure as returned by paren_reduce or use_reduce
and validate it. Raises InvalidDependString if a problem is detected,
such as missing operand for a -> operator.
"""
uri = None
operator = None
for x in src_uri:
if isinstance(x, list):
if operator is not None:
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI arrow missing " + \
"right operand") % (cpv,))
uri = None
_src_uri_validate(cpv, eapi, x)
continue
if x[:-1] == "?":
if operator is not None:
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI arrow missing " + \
"right operand") % (cpv,))
uri = None
continue
if uri is None:
if x == "->":
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI arrow missing " + \
"left operand") % (cpv,))
uri = x
continue
if x == "->":
if eapi in ("0", "1"):
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI arrows are not " + \
"supported with EAPI='%s'") % (cpv, eapi))
operator = x
continue
if operator is not None:
if "/" in x:
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI '/' character in " + \
"file name: '%s'") % (cpv, x))
if x[-1] == "?":
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI arrow missing " + \
"right operand") % (cpv,))
uri = None
operator = None
if operator is not None:
raise portage.exception.InvalidDependString(
"getFetchMap(): '%s' SRC_URI arrow missing right operand" % \
(cpv,))
class portdbapi(dbapi):
"""this tree will scan a portage directory located at root (passed to init)"""
portdbapi_instances = []
_use_mutable = True
def __init__(self, porttree_root, mysettings=None):
portdbapi.portdbapi_instances.append(self)
from portage import config
if mysettings:
self.mysettings = mysettings
else:
from portage import settings
self.mysettings = config(clone=settings)
self._iuse_implicit = self.mysettings._get_implicit_iuse()
self._categories = set(self.mysettings.categories)
# This is strictly for use in aux_get() doebuild calls when metadata
# is generated by the depend phase. It's safest to use a clone for
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.mysettings)
self.manifestVerifyLevel = None
self.manifestVerifier = None
self.manifestCache = {} # {location: [stat, md5]}
self.manifestMissingCache = []
if "gpg" in self.mysettings.features:
self.manifestVerifyLevel = portage.gpg.EXISTS
if "strict" in self.mysettings.features:
self.manifestVerifyLevel = portage.gpg.MARGINAL
self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
elif "severe" in self.mysettings.features:
self.manifestVerifyLevel = portage.gpg.TRUSTED
self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
else:
self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
#self.root=settings["PORTDIR"]
self.porttree_root = porttree_root
if porttree_root:
self.porttree_root = os.path.realpath(porttree_root)
self.depcachedir = os.path.realpath(self.mysettings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
# Make api consumers exempt from sandbox violations
# when doing metadata cache updates.
sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
if self.depcachedir not in sandbox_write:
sandbox_write.append(self.depcachedir)
os.environ["SANDBOX_WRITE"] = \
":".join(filter(None, sandbox_write))
self.eclassdb = eclass_cache.cache(self.porttree_root,
overlays=self.mysettings["PORTDIR_OVERLAY"].split())
# This is used as sanity check for aux_get(). If there is no
# root eclass dir, we assume that PORTDIR is invalid or
# missing. This check allows aux_get() to detect a missing
# portage tree and return early by raising a KeyError.
self._have_root_eclass_dir = os.path.isdir(
os.path.join(self.porttree_root, "eclass"))
self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
#if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
self.xcache = {}
self.frozen = 0
self.porttrees = [self.porttree_root] + \
[os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
self.treemap = {}
self._repository_map = {}
for path in self.porttrees:
repo_name_path = os.path.join(path, REPO_NAME_LOC)
try:
repo_name = open(repo_name_path, 'r').readline().strip()
self.treemap[repo_name] = path
self._repository_map[path] = repo_name
except (OSError,IOError):
# warn about missing repo_name at some other time, since we
# don't want to see a warning every time the portage module is
# imported.
pass
self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
self.auxdb = {}
self._pregen_auxdb = {}
self._init_cache_dirs()
# XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
# ~harring
filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
filtered_auxdbkeys.sort()
if secpass < 1:
from portage.cache import metadata_overlay, volatile
for x in self.porttrees:
db_ro = self.auxdbmodule(self.depcachedir, x,
filtered_auxdbkeys, gid=portage_gid, readonly=True)
self.auxdb[x] = metadata_overlay.database(
self.depcachedir, x, filtered_auxdbkeys,
gid=portage_gid, db_rw=volatile.database,
db_ro=db_ro)
else:
for x in self.porttrees:
# location, label, auxdbkeys
self.auxdb[x] = self.auxdbmodule(
self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
if "metadata-transfer" not in self.mysettings.features:
for x in self.porttrees:
if os.path.isdir(os.path.join(x, "metadata", "cache")):
self._pregen_auxdb[x] = self.metadbmodule(
x, "metadata/cache", filtered_auxdbkeys, readonly=True)
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
"RESTRICT", "SLOT"])
# Repoman modifies _aux_cache_keys, so delay _aux_cache_slot_dict
# initialization until the first aux_get call.
self._aux_cache_slot_dict = None
self._aux_cache = {}
self._broken_ebuilds = set()
def _init_cache_dirs(self):
"""Create /var/cache/edb/dep and adjust permissions for the portage
group."""
dirmode = 02070
filemode = 060
modemask = 02
try:
ensure_dirs(self.depcachedir, gid=portage_gid,
mode=dirmode, mask=modemask)
except PortageException, e:
pass
def close_caches(self):
if not hasattr(self, "auxdb"):
# unhandled exception thrown from constructor
return
for x in self.auxdb:
self.auxdb[x].sync()
self.auxdb.clear()
def flush_cache(self):
for x in self.auxdb.values():
x.sync()
def findLicensePath(self, license_name):
mytrees = self.porttrees[:]
mytrees.reverse()
for x in mytrees:
license_path = os.path.join(x, "licenses", license_name)
if os.access(license_path, os.R_OK):
return license_path
return None
def findname(self,mycpv):
return self.findname2(mycpv)[0]
def getRepositoryPath(self, repository_id):
"""
This function is required for GLEP 42 compliance; given a valid repository ID
it must return a path to the repository
TreeMap = { id:path }
"""
if repository_id in self.treemap:
return self.treemap[repository_id]
return None
def getRepositories(self):
"""
This function is required for GLEP 42 compliance; it will return a list of
repository ID's
TreeMap = {id: path}
"""
return [k for k in self.treemap if k]
def findname2(self, mycpv, mytree=None):
"""
Returns the location of the CPV, and what overlay it was in.
Searches overlays first, then PORTDIR; this allows us to return the first
matching file. As opposed to starting in portdir and then doing overlays
second, we would have to exhaustively search the overlays until we found
the file we wanted.
"""
if not mycpv:
return "",0
mysplit = mycpv.split("/")
psplit = pkgsplit(mysplit[1])
if psplit is None or len(mysplit) != 2:
raise InvalidPackageName(mycpv)
if mytree:
mytrees = [mytree]
else:
mytrees = self.porttrees[:]
mytrees.reverse()
if psplit:
for x in mytrees:
file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
if os.access(file, os.R_OK):
return[file, x]
return None, 0
def _metadata_process(self, cpv, ebuild_path, repo_path):
"""
Create an EbuildMetadataPhase instance to generate metadata for the
give ebuild.
@rtype: EbuildMetadataPhase
@returns: A new EbuildMetadataPhase instance, or None if the
metadata cache is already valid.
"""
metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path)
if metadata is not None:
return None
import _emerge
process = _emerge.EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
ebuild_mtime=emtime, metadata_callback=self._metadata_callback,
portdb=self, repo_path=repo_path, settings=self.doebuild_settings)
return process
def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):
i = metadata
if hasattr(metadata, "iteritems"):
i = metadata.iteritems()
metadata = dict(i)
if "EAPI" not in metadata or not metadata["EAPI"].strip():
metadata["EAPI"] = "0"
if not eapi_is_supported(metadata["EAPI"]):
# if newer version, wipe everything and negate eapi
eapi = metadata["EAPI"]
metadata = {}
for x in self._known_keys:
metadata.setdefault(x, "")
metadata["EAPI"] = "-" + eapi
if metadata.get("INHERITED", False):
metadata["_eclasses_"] = \
self.eclassdb.get_eclass_data(metadata["INHERITED"].split())
else:
metadata["_eclasses_"] = {}
metadata.pop("INHERITED", None)
metadata["_mtime_"] = mtime
self.auxdb[repo_path][cpv] = metadata
def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
try:
st = os.stat(ebuild_path)
emtime = st[stat.ST_MTIME]
except OSError:
writemsg("!!! aux_get(): ebuild for " + \
"'%s' does not exist at:\n" % (cpv,), noiselevel=-1)
writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
raise KeyError(cpv)
# Pull pre-generated metadata from the metadata/cache/
# directory if it exists and is valid, otherwise fall
# back to the normal writable cache.
auxdbs = []
pregen_auxdb = self._pregen_auxdb.get(repo_path)
if pregen_auxdb is not None:
auxdbs.append(pregen_auxdb)
auxdbs.append(self.auxdb[repo_path])
doregen = True
for auxdb in auxdbs:
try:
metadata = auxdb[cpv]
eapi = metadata.get("EAPI","").strip()
if not eapi:
eapi = "0"
if eapi.startswith("-") and eapi_is_supported(eapi[1:]):
pass
elif emtime != int(metadata.get("_mtime_", 0)):
pass
elif len(metadata.get("_eclasses_", [])) > 0:
if self.eclassdb.is_eclass_data_valid(
metadata["_eclasses_"]):
doregen = False
else:
doregen = False
except KeyError:
pass
except CacheError:
if auxdb is not pregen_auxdb:
try:
del auxdb[cpv]
except KeyError:
pass
if not doregen:
break
if doregen:
metadata = None
return (metadata, st, emtime)
def aux_get(self, mycpv, mylist, mytree=None):
"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
cache_me = False
if not mytree:
cache_me = True
if not mytree and not self._known_keys.intersection(
mylist).difference(self._aux_cache_keys):
aux_cache = self._aux_cache.get(mycpv)
if aux_cache is not None:
return [aux_cache.get(x, "") for x in mylist]
cache_me = True
global auxdbkeys, auxdbkeylen
cat,pkg = mycpv.split("/", 1)
myebuild, mylocation = self.findname2(mycpv, mytree)
if not myebuild:
writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
noiselevel=1)
writemsg("!!! %s\n" % myebuild, noiselevel=1)
raise KeyError(mycpv)
myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
if "gpg" in self.mysettings.features:
try:
mys = portage.gpg.fileStats(myManifestPath)
if (myManifestPath in self.manifestCache) and \
(self.manifestCache[myManifestPath] == mys):
pass
elif self.manifestVerifier:
if not self.manifestVerifier.verify(myManifestPath):
# Verification failed the desired level.
raise UntrustedSignature(
"Untrusted Manifest: %(manifest)s" % \
{"manifest" : myManifestPath})
if ("severe" in self.mysettings.features) and \
(mys != portage.gpg.fileStats(myManifestPath)):
raise SecurityViolation(
"Manifest changed: %(manifest)s" % \
{"manifest":myManifestPath})
except InvalidSignature, e:
if ("strict" in self.mysettings.features) or \
("severe" in self.mysettings.features):
raise
writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
except MissingSignature, e:
if ("severe" in self.mysettings.features):
raise
if ("strict" in self.mysettings.features):
if myManifestPath not in self.manifestMissingCache:
writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
self.manifestMissingCache.insert(0,myManifestPath)
except (OSError, FileNotFound), e:
if ("strict" in self.mysettings.features) or \
("severe" in self.mysettings.features):
raise SecurityViolation(
"Error in verification of signatures: " + \
"%(errormsg)s" % {"errormsg" : str(e)})
writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
noiselevel=-1)
mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
doregen = mydata is None
writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
if doregen:
if myebuild in self._broken_ebuilds:
raise KeyError(mycpv)
if not self._have_root_eclass_dir:
raise KeyError(mycpv)
writemsg("doregen: %s %s\n" % (doregen, mycpv), 2)
writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n", 1)
self.doebuild_settings.reset()
mydata = {}
myret = doebuild(myebuild, "depend",
self.doebuild_settings["ROOT"], self.doebuild_settings,
dbkey=mydata, tree="porttree", mydbapi=self)
if myret != os.EX_OK:
self._broken_ebuilds.add(myebuild)
raise KeyError(mycpv)
self._metadata_callback(
mycpv, myebuild, mylocation, mydata, emtime)
if mydata.get("INHERITED", False):
mydata["_eclasses_"] = \
self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
else:
mydata["_eclasses_"] = {}
if not mydata.setdefault("EAPI", "0"):
mydata["EAPI"] = "0"
# do we have a origin repository name for the current package
mydata["repository"] = self._repository_map.get(
os.path.sep.join(myebuild.split(os.path.sep)[:-3]), "")
mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
mydata["_mtime_"] = st.st_mtime
#finally, we look at our internal cache entry and return the requested data.
returnme = [mydata.get(x, "") for x in mylist]
if cache_me:
if self._aux_cache_slot_dict is None:
self._aux_cache_slot_dict = \
slot_dict_class(self._aux_cache_keys)
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
aux_cache[x] = mydata.get(x, "")
self._aux_cache[mycpv] = aux_cache
return returnme
def getFetchMap(self, mypkg, useflags=None, mytree=None):
"""
Get the SRC_URI metadata as a dict which maps each file name to a
set of alternative URIs.
@param mypkg: cpv for an ebuild
@type mypkg: String
@param useflags: a collection of enabled USE flags, for evaluation of
conditionals
@type useflags: set, or None to enable all conditionals
@param mytree: The canonical path of the tree in which the ebuild
is located, or None for automatic lookup
@type mypkg: String
@returns: A dict which maps each file name to a set of alternative
URIs.
@rtype: dict
"""
try:
eapi, myuris = self.aux_get(mypkg,
["EAPI", "SRC_URI"], mytree=mytree)
except KeyError:
# Convert this to an InvalidDependString exception since callers
# already handle it.
raise portage.exception.InvalidDependString(
"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
if not eapi_is_supported(eapi):
# Convert this to an InvalidDependString exception
# since callers already handle it.
raise portage.exception.InvalidDependString(
"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
(mypkg, eapi.lstrip("-")))
myuris = paren_reduce(myuris)
_src_uri_validate(mypkg, eapi, myuris)
myuris = use_reduce(myuris, uselist=useflags,
matchall=(useflags is None))
myuris = flatten(myuris)
uri_map = {}
myuris.reverse()
while myuris:
uri = myuris.pop()
if myuris and myuris[-1] == "->":
operator = myuris.pop()
distfile = myuris.pop()
else:
distfile = os.path.basename(uri)
if not distfile:
raise portage.exception.InvalidDependString(
("getFetchMap(): '%s' SRC_URI has no file " + \
"name: '%s'") % (mypkg, uri))
uri_set = uri_map.get(distfile)
if uri_set is None:
uri_set = set()
uri_map[distfile] = uri_set
uri_set.add(uri)
uri = None
operator = None
return uri_map
def getfetchlist(self, mypkg, useflags=None, mysettings=None,
all=0, mytree=None):
writemsg("!!! pordbapi.getfetchlist() is deprecated, " + \
"use getFetchMap() instead.\n", noiselevel=-1)
if all:
useflags = None
elif useflags is None:
if mysettings is None:
mysettings = self.doebuild_settings
mysettings.setcpv(mypkg, mydb=self)
useflags = mysettings["PORTAGE_USE"].split()
uri_map = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
uris = set()
for uri_set in uri_map.itervalues():
uris.update(uri_set)
return [list(uris), uri_map.keys()]
def getfetchsizes(self, mypkg, useflags=None, debug=0):
# returns a filename:size dictionnary of remaining downloads
myebuild = self.findname(mypkg)
pkgdir = os.path.dirname(myebuild)
mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
checksums = mf.getDigests()
if not checksums:
if debug:
print "[empty/missing/bad digest]: "+mypkg
return None
filesdict={}
myfiles = self.getFetchMap(mypkg, useflags=useflags)
#XXX: maybe this should be improved: take partial downloads
# into account? check checksums?
for myfile in myfiles:
if myfile not in checksums:
if debug:
writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
continue
file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
mystat = None
try:
mystat = os.stat(file_path)
except OSError, e:
pass
if mystat is None:
existing_size = 0
else:
existing_size = mystat.st_size
remaining_size = int(checksums[myfile]["size"]) - existing_size
if remaining_size > 0:
# Assume the download is resumable.
filesdict[myfile] = remaining_size
elif remaining_size < 0:
# The existing file is too large and therefore corrupt.
filesdict[myfile] = int(checksums[myfile]["size"])
return filesdict
def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
if all:
useflags = None
elif useflags is None:
if mysettings:
useflags = mysettings["USE"].split()
myfiles = self.getFetchMap(mypkg, useflags=useflags)
myebuild = self.findname(mypkg)
pkgdir = os.path.dirname(myebuild)
mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
mysums = mf.getDigests()
failures = {}
for x in myfiles:
if not mysums or x not in mysums:
ok = False
reason = "digest missing"
else:
try:
ok, reason = portage.checksum.verify_all(
os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
except FileNotFound, e:
ok = False
reason = "File Not Found: '%s'" % str(e)
if not ok:
failures[x] = reason
if failures:
return False
return True
def cpv_exists(self, mykey):
"Tells us whether an actual ebuild exists on disk (no masking)"
cps2 = mykey.split("/")
cps = catpkgsplit(mykey, silent=0)
if not cps:
#invalid cat/pkg-v
return 0
if self.findname(cps[0] + "/" + cps2[1]):
return 1
else:
return 0
def cp_all(self):
"returns a list of all keys in our tree"
d = {}
for x in self.mysettings.categories:
for oroot in self.porttrees:
for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
if not self._pkg_dir_name_re.match(y) or \
y == "CVS":
continue
d[x+"/"+y] = None
l = d.keys()
l.sort()
return l
def cp_list(self, mycp, use_cache=1, mytree=None):
if self.frozen and mytree is None:
cachelist = self.xcache["cp-list"].get(mycp)
if cachelist is not None:
# Try to propagate this to the match-all cache here for
# repoman since he uses separate match-all caches for each
# profile (due to old-style virtuals). Do not propagate
# old-style virtuals since cp_list() doesn't expand them.
if not (not cachelist and mycp.startswith("virtual/")):
self.xcache["match-all"][mycp] = cachelist
return cachelist[:]
mysplit = mycp.split("/")
invalid_category = mysplit[0] not in self._categories
d={}
if mytree:
mytrees = [mytree]
else:
mytrees = self.porttrees
for oroot in mytrees:
try:
file_list = os.listdir(os.path.join(oroot, mycp))
except OSError:
continue
for x in file_list:
if x.endswith(".ebuild"):
pf = x[:-7]
ps = pkgsplit(pf)
if not ps:
writemsg("\nInvalid ebuild name: %s\n" % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
if ps[0] != mysplit[1]:
writemsg("\nInvalid ebuild name: %s\n" % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
ver_match = ver_regexp.match("-".join(ps[1:]))
if ver_match is None or not ver_match.groups():
writemsg("\nInvalid ebuild version: %s\n" % \
os.path.join(oroot, mycp, x), noiselevel=-1)
continue
d[mysplit[0]+"/"+pf] = None
if invalid_category and d:
writemsg(("\n!!! '%s' has a category that is not listed in " + \
"/etc/portage/categories\n") % mycp, noiselevel=-1)
mylist = []
else:
mylist = d.keys()
# Always sort in ascending order here since it's handy
# and the result can be easily cached and reused.
self._cpv_sort_ascending(mylist)
if self.frozen and mytree is None:
cachelist = mylist[:]
self.xcache["cp-list"][mycp] = cachelist
# Do not propagate old-style virtuals since
# cp_list() doesn't expand them.
if not (not cachelist and mycp.startswith("virtual/")):
self.xcache["match-all"][mycp] = cachelist
return mylist
def freeze(self):
for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \
"match-visible", "minimum-all", "minimum-visible":
self.xcache[x]={}
self.frozen=1
def melt(self):
self.xcache = {}
self.frozen = 0
def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
"caching match function; very trick stuff"
#if no updates are being made to the tree, we can consult our xcache...
if self.frozen:
try:
return self.xcache[level][origdep][:]
except KeyError:
pass
if not mydep:
#this stuff only runs on first call of xmatch()
#create mydep, mykey from origdep
mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
mykey = dep_getkey(mydep)
if level == "list-visible":
#a list of all visible packages, not called directly (just by xmatch())
#myval = self.visible(self.cp_list(mykey))
myval = self.gvisible(self.visible(self.cp_list(mykey)))
elif level == "minimum-all":
# Find the minimum matching version. This is optimized to
# minimize the number of metadata accesses (improves performance
# especially in cases where metadata needs to be generated).
cpv_iter = iter(self.cp_list(mykey))
if mydep != mykey:
cpv_iter = self._iter_match(mydep, cpv_iter)
try:
myval = cpv_iter.next()
except StopIteration:
myval = ""
elif level in ("minimum-visible", "bestmatch-visible"):
# Find the minimum matching visible version. This is optimized to
# minimize the number of metadata accesses (improves performance
# especially in cases where metadata needs to be generated).
if mydep == mykey:
mylist = self.cp_list(mykey)
else:
mylist = match_from_list(mydep, self.cp_list(mykey))
myval = ""
settings = self.mysettings
local_config = settings.local_config
aux_keys = list(self._aux_cache_keys)
if level == "minimum-visible":
iterfunc = iter
else:
iterfunc = reversed
for cpv in iterfunc(mylist):
try:
metadata = dict(izip(aux_keys,
self.aux_get(cpv, aux_keys)))
except KeyError:
# ebuild masked by corruption
continue
if not eapi_is_supported(metadata["EAPI"]):
continue
if mydep.slot and mydep.slot != metadata["SLOT"]:
continue
if settings._getMissingKeywords(cpv, metadata):
continue
if settings._getMaskAtom(cpv, metadata):
continue
if settings._getProfileMaskAtom(cpv, metadata):
continue
if local_config:
metadata["USE"] = ""
if "?" in metadata["LICENSE"]:
self.doebuild_settings.setcpv(cpv, mydb=metadata)
metadata["USE"] = self.doebuild_settings.get("USE", "")
try:
if settings._getMissingLicenses(cpv, metadata):
continue
except InvalidDependString:
continue
if mydep.use:
has_iuse = False
for has_iuse in self._iter_match_use(mydep, [cpv]):
break
if not has_iuse:
continue
myval = cpv
break
elif level == "bestmatch-list":
#dep match -- find best match but restrict search to sublist
#no point in calling xmatch again since we're not caching list deps
myval = best(list(self._iter_match(mydep, mylist)))
elif level == "match-list":
#dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
myval = list(self._iter_match(mydep, mylist))
elif level == "match-visible":
#dep match -- find all visible matches
#get all visible packages, then get the matching ones
myval = list(self._iter_match(mydep,
self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey)))
elif level == "match-all":
#match *all* visible *and* masked packages
if mydep == mykey:
myval = self.cp_list(mykey)
else:
myval = list(self._iter_match(mydep, self.cp_list(mykey)))
else:
print "ERROR: xmatch doesn't handle", level, "query!"
raise KeyError
if self.frozen and (level not in ["match-list", "bestmatch-list"]):
self.xcache[level][mydep] = myval
if origdep and origdep != mydep:
self.xcache[level][origdep] = myval
return myval[:]
def match(self, mydep, use_cache=1):
return self.xmatch("match-visible", mydep)
def visible(self, mylist):
"""two functions in one. Accepts a list of cpv values and uses the package.mask *and*
packages file to remove invisible entries, returning remaining items. This function assumes
that all entries in mylist have the same category and package name."""
if not mylist:
return []
db_keys = ["SLOT"]
visible = []
getMaskAtom = self.mysettings._getMaskAtom
getProfileMaskAtom = self.mysettings._getProfileMaskAtom
for cpv in mylist:
try:
metadata = dict(izip(db_keys, self.aux_get(cpv, db_keys)))
except KeyError:
# masked by corruption
continue
if not metadata["SLOT"]:
continue
if getMaskAtom(cpv, metadata):
continue
if getProfileMaskAtom(cpv, metadata):
continue
visible.append(cpv)
return visible
def gvisible(self,mylist):
"strip out group-masked (not in current group) entries"
if mylist is None:
return []
newlist=[]
aux_keys = list(self._aux_cache_keys)
metadata = {}
local_config = self.mysettings.local_config
for mycpv in mylist:
metadata.clear()
try:
metadata.update(izip(aux_keys, self.aux_get(mycpv, aux_keys)))
except KeyError:
continue
except PortageException, e:
writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
noiselevel=-1)
writemsg("!!! %s\n" % str(e), noiselevel=-1)
del e
continue
eapi = metadata["EAPI"]
if not eapi_is_supported(eapi):
continue
if _eapi_is_deprecated(eapi):
continue
if self.mysettings._getMissingKeywords(mycpv, metadata):
continue
if local_config:
metadata["USE"] = ""
if "?" in metadata["LICENSE"]:
self.doebuild_settings.setcpv(mycpv, mydb=metadata)
metadata["USE"] = self.doebuild_settings.get("USE", "")
try:
if self.mysettings._getMissingLicenses(mycpv, metadata):
continue
except InvalidDependString:
continue
newlist.append(mycpv)
return newlist
def close_portdbapi_caches():
for i in portdbapi.portdbapi_instances:
i.close_caches()
class portagetree(object):
def __init__(self, root="/", virtual=None, clone=None, settings=None):
"""
Constructor for a PortageTree
@param root: ${ROOT}, defaults to '/', see make.conf(5)
@type root: String/Path
@param virtual: UNUSED
@type virtual: No Idea
@param clone: Set this if you want a copy of Clone
@type clone: Existing portagetree Instance
@param settings: Portage Configuration object (portage.settings)
@type settings: Instance of portage.config
"""
if clone:
writemsg("portagetree.__init__(): deprecated " + \
"use of clone parameter\n", noiselevel=-1)
self.root = clone.root
self.portroot = clone.portroot
self.pkglines = clone.pkglines
else:
self.root = root
if settings is None:
from portage import settings
self.settings = settings
self.portroot = settings["PORTDIR"]
self.virtual = virtual
self.dbapi = portdbapi(
settings["PORTDIR"], mysettings=settings)
def dep_bestmatch(self,mydep):
"compatibility method"
mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
if mymatch is None:
return ""
return mymatch
def dep_match(self,mydep):
"compatibility method"
mymatch = self.dbapi.xmatch("match-visible",mydep)
if mymatch is None:
return []
return mymatch
def exists_specific(self,cpv):
return self.dbapi.cpv_exists(cpv)
def getallnodes(self):
"""new behavior: these are all *unmasked* nodes. There may or may not be available
masked package for nodes in this nodes list."""
return self.dbapi.cp_all()
def getname(self, pkgname):
"returns file location for this particular package (DEPRECATED)"
if not pkgname:
return ""
mysplit = pkgname.split("/")
psplit = pkgsplit(mysplit[1])
return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
def resolve_specific(self, myspec):
cps = catpkgsplit(myspec)
if not cps:
return None
mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
settings=self.settings)
mykey = mykey + "-" + cps[2]
if cps[3] != "r0":
mykey = mykey + "-" + cps[3]
return mykey
def depcheck(self, mycheck, use="yes", myusesplit=None):
return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
def getslot(self,mycatpkg):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
except SystemExit, e:
raise
except Exception, e:
pass
return myslot