| #!/usr/bin/python -O |
| # Copyright 1999-2006 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| # $Id: emerge 5976 2007-02-17 09:14:53Z genone $ |
| |
| import sys |
| # This block ensures that ^C interrupts are handled quietly. |
| try: |
| import signal |
| |
| def exithandler(signum,frame): |
| signal.signal(signal.SIGINT, signal.SIG_IGN) |
| signal.signal(signal.SIGTERM, signal.SIG_IGN) |
| sys.exit(1) |
| |
| signal.signal(signal.SIGINT, exithandler) |
| signal.signal(signal.SIGTERM, exithandler) |
| signal.signal(signal.SIGPIPE, signal.SIG_DFL) |
| |
| except KeyboardInterrupt: |
| sys.exit(1) |
| |
| import array |
| from collections import deque |
| import fcntl |
| import formatter |
| import fpformat |
| import logging |
| import select |
| import shlex |
| import shutil |
| import textwrap |
| import urlparse |
| import weakref |
| import gc |
| import os, stat |
| import platform |
| |
| try: |
| import portage |
| except ImportError: |
| from os import path as osp |
| sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")) |
| import portage |
| |
| from portage import digraph, portdbapi |
| from portage.const import NEWS_LIB_PATH, CACHE_PATH, PRIVATE_PATH, USER_CONFIG_PATH, GLOBAL_CONFIG_PATH |
| |
| import _emerge.help |
| import portage.xpak, commands, errno, re, socket, time, types |
| from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \ |
| havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \ |
| xtermTitleReset, yellow |
| from portage.output import create_color_func |
| good = create_color_func("GOOD") |
| bad = create_color_func("BAD") |
| # white looks bad on terminals with white background |
| from portage.output import bold as white |
| |
| import portage.elog |
| import portage.dep |
| portage.dep._dep_check_strict = True |
| import portage.util |
| import portage.locks |
| import portage.exception |
| from portage.data import secpass |
| from portage.elog.messages import eerror |
| from portage.util import normalize_path as normpath |
| from portage.util import writemsg, writemsg_level |
| from portage.sets import load_default_config, SETPREFIX |
| from portage.sets.base import InternalPackageSet |
| |
| from itertools import chain, izip |
| from UserDict import DictMixin |
| |
| try: |
| import cPickle |
| except ImportError: |
| import pickle as cPickle |
| |
| try: |
| import cStringIO as StringIO |
| except ImportError: |
| import StringIO |
| |
| class stdout_spinner(object): |
| scroll_msgs = [ |
| "Gentoo Rocks ("+platform.system()+")", |
| "Thank you for using Gentoo. :)", |
| "Are you actually trying to read this?", |
| "How many times have you stared at this?", |
| "We are generating the cache right now", |
| "You are paying too much attention.", |
| "A theory is better than its explanation.", |
| "Phasers locked on target, Captain.", |
| "Thrashing is just virtual crashing.", |
| "To be is to program.", |
| "Real Users hate Real Programmers.", |
| "When all else fails, read the instructions.", |
| "Functionality breeds Contempt.", |
| "The future lies ahead.", |
| "3.1415926535897932384626433832795028841971694", |
| "Sometimes insanity is the only alternative.", |
| "Inaccuracy saves a world of explanation.", |
| ] |
| |
| twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|" |
| |
| def __init__(self): |
| self.spinpos = 0 |
| self.update = self.update_twirl |
| self.scroll_sequence = self.scroll_msgs[ |
| int(time.time() * 100) % len(self.scroll_msgs)] |
| self.last_update = 0 |
| self.min_display_latency = 0.05 |
| |
| def _return_early(self): |
| """ |
| Flushing ouput to the tty too frequently wastes cpu time. Therefore, |
| each update* method should return without doing any output when this |
| method returns True. |
| """ |
| cur_time = time.time() |
| if cur_time - self.last_update < self.min_display_latency: |
| return True |
| self.last_update = cur_time |
| return False |
| |
| def update_basic(self): |
| self.spinpos = (self.spinpos + 1) % 500 |
| if self._return_early(): |
| return |
| if (self.spinpos % 100) == 0: |
| if self.spinpos == 0: |
| sys.stdout.write(". ") |
| else: |
| sys.stdout.write(".") |
| sys.stdout.flush() |
| |
| def update_scroll(self): |
| if self._return_early(): |
| return |
| if(self.spinpos >= len(self.scroll_sequence)): |
| sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[ |
| len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))])) |
| else: |
| sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos])) |
| sys.stdout.flush() |
| self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence)) |
| |
| def update_twirl(self): |
| self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence) |
| if self._return_early(): |
| return |
| sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos]) |
| sys.stdout.flush() |
| |
| def update_quiet(self): |
| return |
| |
| def userquery(prompt, responses=None, colours=None): |
| """Displays a prompt and a set of responses, then waits for a response |
| which is checked against the responses and the first to match is |
| returned. An empty response will match the first value in responses. The |
| input buffer is *not* cleared prior to the prompt! |
| |
| prompt: a String. |
| responses: a List of Strings. |
| colours: a List of Functions taking and returning a String, used to |
| process the responses for display. Typically these will be functions |
| like red() but could be e.g. lambda x: "DisplayString". |
| If responses is omitted, defaults to ["Yes", "No"], [green, red]. |
| If only colours is omitted, defaults to [bold, ...]. |
| |
| Returns a member of the List responses. (If called without optional |
| arguments, returns "Yes" or "No".) |
| KeyboardInterrupt is converted to SystemExit to avoid tracebacks being |
| printed.""" |
| if responses is None: |
| responses = ["Yes", "No"] |
| colours = [ |
| create_color_func("PROMPT_CHOICE_DEFAULT"), |
| create_color_func("PROMPT_CHOICE_OTHER") |
| ] |
| elif colours is None: |
| colours=[bold] |
| colours=(colours*len(responses))[:len(responses)] |
| print bold(prompt), |
| try: |
| while True: |
| response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ") |
| for key in responses: |
| # An empty response will match the first value in responses. |
| if response.upper()==key[:len(response)].upper(): |
| return key |
| print "Sorry, response '%s' not understood." % response, |
| except (EOFError, KeyboardInterrupt): |
| print "Interrupted." |
| sys.exit(1) |
| |
| actions=[ |
| "clean", "config", "depclean", |
| "info", "metadata", |
| "prune", "regen", "search", |
| "sync", "unmerge", |
| ] |
| options=[ |
| "--ask", "--alphabetical", |
| "--buildpkg", "--buildpkgonly", |
| "--changelog", "--columns", |
| "--complete-graph", |
| "--debug", "--deep", |
| "--digest", |
| "--emptytree", |
| "--fetchonly", "--fetch-all-uri", |
| "--getbinpkg", "--getbinpkgonly", |
| "--help", "--ignore-default-opts", |
| "--keep-going", |
| "--noconfmem", |
| "--newuse", "--nocolor", |
| "--nodeps", "--noreplace", |
| "--nospinner", "--oneshot", |
| "--onlydeps", "--pretend", |
| "--quiet", "--resume", |
| "--searchdesc", "--selective", |
| "--skipfirst", |
| "--tree", |
| "--update", |
| "--usepkg", "--usepkgonly", |
| "--verbose", "--version" |
| ] |
| |
| shortmapping={ |
| "1":"--oneshot", |
| "a":"--ask", |
| "b":"--buildpkg", "B":"--buildpkgonly", |
| "c":"--clean", "C":"--unmerge", |
| "d":"--debug", "D":"--deep", |
| "e":"--emptytree", |
| "f":"--fetchonly", "F":"--fetch-all-uri", |
| "g":"--getbinpkg", "G":"--getbinpkgonly", |
| "h":"--help", |
| "k":"--usepkg", "K":"--usepkgonly", |
| "l":"--changelog", |
| "n":"--noreplace", "N":"--newuse", |
| "o":"--onlydeps", "O":"--nodeps", |
| "p":"--pretend", "P":"--prune", |
| "q":"--quiet", |
| "s":"--search", "S":"--searchdesc", |
| "t":"--tree", |
| "u":"--update", |
| "v":"--verbose", "V":"--version" |
| } |
| |
| def emergelog(xterm_titles, mystr, short_msg=None): |
| if xterm_titles and short_msg: |
| if "HOSTNAME" in os.environ: |
| short_msg = os.environ["HOSTNAME"]+": "+short_msg |
| xtermTitle(short_msg) |
| try: |
| file_path = "/var/log/emerge.log" |
| mylogfile = open(file_path, "a") |
| portage.util.apply_secpass_permissions(file_path, |
| uid=portage.portage_uid, gid=portage.portage_gid, |
| mode=0660) |
| mylock = None |
| try: |
| mylock = portage.locks.lockfile(mylogfile) |
| # seek because we may have gotten held up by the lock. |
| # if so, we may not be positioned at the end of the file. |
| mylogfile.seek(0, 2) |
| mylogfile.write(str(time.time())[:10]+": "+mystr+"\n") |
| mylogfile.flush() |
| finally: |
| if mylock: |
| portage.locks.unlockfile(mylock) |
| mylogfile.close() |
| except (IOError,OSError,portage.exception.PortageException), e: |
| if secpass >= 1: |
| print >> sys.stderr, "emergelog():",e |
| |
| def countdown(secs=5, doing="Starting"): |
| if secs: |
| print ">>> Waiting",secs,"seconds before starting..." |
| print ">>> (Control-C to abort)...\n"+doing+" in: ", |
| ticks=range(secs) |
| ticks.reverse() |
| for sec in ticks: |
| sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" ")) |
| sys.stdout.flush() |
| time.sleep(1) |
| print |
| |
| # formats a size given in bytes nicely |
| def format_size(mysize): |
| if type(mysize) not in [types.IntType,types.LongType]: |
| return str(mysize) |
| if 0 != mysize % 1024: |
| # Always round up to the next kB so that it doesn't show 0 kB when |
| # some small file still needs to be fetched. |
| mysize += 1024 - mysize % 1024 |
| mystr=str(mysize/1024) |
| mycount=len(mystr) |
| while (mycount > 3): |
| mycount-=3 |
| mystr=mystr[:mycount]+","+mystr[mycount:] |
| return mystr+" kB" |
| |
| |
| def getgccversion(chost): |
| """ |
| rtype: C{str} |
| return: the current in-use gcc version |
| """ |
| |
| gcc_ver_command = 'gcc -dumpversion' |
| gcc_ver_prefix = 'gcc-' |
| |
| gcc_not_found_error = red( |
| "!!! No gcc found. You probably need to 'source /etc/profile'\n" + |
| "!!! to update the environment of this terminal and possibly\n" + |
| "!!! other terminals also.\n" |
| ) |
| |
| mystatus, myoutput = commands.getstatusoutput("gcc-config -c") |
| if mystatus == os.EX_OK and myoutput.startswith(chost + "-"): |
| return myoutput.replace(chost + "-", gcc_ver_prefix, 1) |
| |
| mystatus, myoutput = commands.getstatusoutput( |
| chost + "-" + gcc_ver_command) |
| if mystatus == os.EX_OK: |
| return gcc_ver_prefix + myoutput |
| |
| mystatus, myoutput = commands.getstatusoutput(gcc_ver_command) |
| if mystatus == os.EX_OK: |
| return gcc_ver_prefix + myoutput |
| |
| portage.writemsg(gcc_not_found_error, noiselevel=-1) |
| return "[unavailable]" |
| |
| def getportageversion(portdir, target_root, profile, chost, vardb): |
| profilever = "unavailable" |
| if profile: |
| realpath = os.path.realpath(profile) |
| basepath = os.path.realpath(os.path.join(portdir, "profiles")) |
| if realpath.startswith(basepath): |
| profilever = realpath[1 + len(basepath):] |
| else: |
| try: |
| profilever = "!" + os.readlink(profile) |
| except (OSError): |
| pass |
| del realpath, basepath |
| |
| libcver=[] |
| libclist = vardb.match("virtual/libc") |
| libclist += vardb.match("virtual/glibc") |
| libclist = portage.util.unique_array(libclist) |
| for x in libclist: |
| xs=portage.catpkgsplit(x) |
| if libcver: |
| libcver+=","+"-".join(xs[1:]) |
| else: |
| libcver="-".join(xs[1:]) |
| if libcver==[]: |
| libcver="unavailable" |
| |
| gccver = getgccversion(chost) |
| unameout=platform.release()+" "+platform.machine() |
| |
| return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")" |
| |
| def create_depgraph_params(myopts, myaction): |
| #configure emerge engine parameters |
| # |
| # self: include _this_ package regardless of if it is merged. |
| # selective: exclude the package if it is merged |
| # recurse: go into the dependencies |
| # deep: go into the dependencies of already merged packages |
| # empty: pretend nothing is merged |
| # complete: completely account for all known dependencies |
| # remove: build graph for use in removing packages |
| myparams = set(["recurse"]) |
| |
| if myaction == "remove": |
| myparams.add("remove") |
| myparams.add("complete") |
| return myparams |
| |
| if "--update" in myopts or \ |
| "--newuse" in myopts or \ |
| "--reinstall" in myopts or \ |
| "--noreplace" in myopts: |
| myparams.add("selective") |
| if "--emptytree" in myopts: |
| myparams.add("empty") |
| myparams.discard("selective") |
| if "--nodeps" in myopts: |
| myparams.discard("recurse") |
| if "--deep" in myopts: |
| myparams.add("deep") |
| if "--complete-graph" in myopts: |
| myparams.add("complete") |
| return myparams |
| |
| # search functionality |
| class search(object): |
| |
| # |
| # class constants |
| # |
| VERSION_SHORT=1 |
| VERSION_RELEASE=2 |
| |
| # |
| # public interface |
| # |
| def __init__(self, root_config, spinner, searchdesc, |
| verbose, usepkg, usepkgonly): |
| """Searches the available and installed packages for the supplied search key. |
| The list of available and installed packages is created at object instantiation. |
| This makes successive searches faster.""" |
| self.settings = root_config.settings |
| self.vartree = root_config.trees["vartree"] |
| self.spinner = spinner |
| self.verbose = verbose |
| self.searchdesc = searchdesc |
| self.root_config = root_config |
| self.setconfig = root_config.setconfig |
| |
| def fake_portdb(): |
| pass |
| self.portdb = fake_portdb |
| for attrib in ("aux_get", "cp_all", |
| "xmatch", "findname", "getfetchlist"): |
| setattr(fake_portdb, attrib, getattr(self, "_"+attrib)) |
| |
| self._dbs = [] |
| |
| portdb = root_config.trees["porttree"].dbapi |
| bindb = root_config.trees["bintree"].dbapi |
| vardb = root_config.trees["vartree"].dbapi |
| |
| if not usepkgonly and portdb._have_root_eclass_dir: |
| self._dbs.append(portdb) |
| |
| if (usepkg or usepkgonly) and bindb.cp_all(): |
| self._dbs.append(bindb) |
| |
| self._dbs.append(vardb) |
| self._portdb = portdb |
| |
| def _cp_all(self): |
| cp_all = set() |
| for db in self._dbs: |
| cp_all.update(db.cp_all()) |
| return list(sorted(cp_all)) |
| |
| def _aux_get(self, *args, **kwargs): |
| for db in self._dbs: |
| try: |
| return db.aux_get(*args, **kwargs) |
| except KeyError: |
| pass |
| raise |
| |
| def _findname(self, *args, **kwargs): |
| for db in self._dbs: |
| if db is not self._portdb: |
| # We don't want findname to return anything |
| # unless it's an ebuild in a portage tree. |
| # Otherwise, it's already built and we don't |
| # care about it. |
| continue |
| func = getattr(db, "findname", None) |
| if func: |
| value = func(*args, **kwargs) |
| if value: |
| return value |
| return None |
| |
| def _getfetchlist(self, *args, **kwargs): |
| for db in self._dbs: |
| func = getattr(db, "getfetchlist", None) |
| if func: |
| value = func(*args, **kwargs) |
| if value: |
| return value |
| return [], [] |
| |
| def _visible(self, db, cpv, metadata): |
| installed = db is self.vartree.dbapi |
| built = installed or db is not self._portdb |
| pkg_type = "ebuild" |
| if installed: |
| pkg_type = "installed" |
| elif built: |
| pkg_type = "binary" |
| return visible(self.settings, |
| Package(type_name=pkg_type, root_config=self.root_config, |
| cpv=cpv, built=built, installed=installed, metadata=metadata)) |
| |
| def _xmatch(self, level, atom): |
| """ |
| This method does not expand old-style virtuals because it |
| is restricted to returning matches for a single ${CATEGORY}/${PN} |
| and old-style virual matches unreliable for that when querying |
| multiple package databases. If necessary, old-style virtuals |
| can be performed on atoms prior to calling this method. |
| """ |
| cp = portage.dep_getkey(atom) |
| if level == "match-all": |
| matches = set() |
| for db in self._dbs: |
| if hasattr(db, "xmatch"): |
| matches.update(db.xmatch(level, atom)) |
| else: |
| matches.update(db.match(atom)) |
| result = list(x for x in matches if portage.cpv_getkey(x) == cp) |
| db._cpv_sort_ascending(result) |
| elif level == "match-visible": |
| matches = set() |
| for db in self._dbs: |
| if hasattr(db, "xmatch"): |
| matches.update(db.xmatch(level, atom)) |
| else: |
| db_keys = list(db._aux_cache_keys) |
| for cpv in db.match(atom): |
| metadata = izip(db_keys, |
| db.aux_get(cpv, db_keys)) |
| if not self._visible(db, cpv, metadata): |
| continue |
| matches.add(cpv) |
| result = list(x for x in matches if portage.cpv_getkey(x) == cp) |
| db._cpv_sort_ascending(result) |
| elif level == "bestmatch-visible": |
| result = None |
| for db in self._dbs: |
| if hasattr(db, "xmatch"): |
| cpv = db.xmatch("bestmatch-visible", atom) |
| if not cpv or portage.cpv_getkey(cpv) != cp: |
| continue |
| if not result or cpv == portage.best([cpv, result]): |
| result = cpv |
| else: |
| db_keys = list(db._aux_cache_keys) |
| # break out of this loop with highest visible |
| # match, checked in descending order |
| for cpv in reversed(db.match(atom)): |
| if portage.cpv_getkey(cpv) != cp: |
| continue |
| metadata = izip(db_keys, |
| db.aux_get(cpv, db_keys)) |
| if not self._visible(db, cpv, metadata): |
| continue |
| if not result or cpv == portage.best([cpv, result]): |
| result = cpv |
| break |
| else: |
| raise NotImplementedError(level) |
| return result |
| |
| def execute(self,searchkey): |
| """Performs the search for the supplied search key""" |
| match_category = 0 |
| self.searchkey=searchkey |
| self.packagematches = [] |
| if self.searchdesc: |
| self.searchdesc=1 |
| self.matches = {"pkg":[], "desc":[], "set":[]} |
| else: |
| self.searchdesc=0 |
| self.matches = {"pkg":[], "set":[]} |
| print "Searching... ", |
| |
| regexsearch = False |
| if self.searchkey.startswith('%'): |
| regexsearch = True |
| self.searchkey = self.searchkey[1:] |
| if self.searchkey.startswith('@'): |
| match_category = 1 |
| self.searchkey = self.searchkey[1:] |
| if regexsearch: |
| self.searchre=re.compile(self.searchkey,re.I) |
| else: |
| self.searchre=re.compile(re.escape(self.searchkey), re.I) |
| for package in self.portdb.cp_all(): |
| self.spinner.update() |
| |
| if match_category: |
| match_string = package[:] |
| else: |
| match_string = package.split("/")[-1] |
| |
| masked=0 |
| if self.searchre.search(match_string): |
| if not self.portdb.xmatch("match-visible", package): |
| masked=1 |
| self.matches["pkg"].append([package,masked]) |
| elif self.searchdesc: # DESCRIPTION searching |
| full_package = self.portdb.xmatch("bestmatch-visible", package) |
| if not full_package: |
| #no match found; we don't want to query description |
| full_package = portage.best( |
| self.portdb.xmatch("match-all", package)) |
| if not full_package: |
| continue |
| else: |
| masked=1 |
| try: |
| full_desc = self.portdb.aux_get( |
| full_package, ["DESCRIPTION"])[0] |
| except KeyError: |
| print "emerge: search: aux_get() failed, skipping" |
| continue |
| if self.searchre.search(full_desc): |
| self.matches["desc"].append([full_package,masked]) |
| |
| self.sdict = self.setconfig.getSets() |
| for setname in self.sdict: |
| self.spinner.update() |
| if match_category: |
| match_string = setname |
| else: |
| match_string = setname.split("/")[-1] |
| |
| if self.searchre.search(match_string): |
| self.matches["set"].append([setname, False]) |
| elif self.searchdesc: |
| if self.searchre.search( |
| self.sdict[setname].getMetadata("DESCRIPTION")): |
| self.matches["set"].append([setname, False]) |
| |
| self.mlen=0 |
| for mtype in self.matches: |
| self.matches[mtype].sort() |
| self.mlen += len(self.matches[mtype]) |
| |
| def output(self): |
| """Outputs the results of the search.""" |
| print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]" |
| print "[ Applications found : "+white(str(self.mlen))+" ]" |
| print " " |
| vardb = self.vartree.dbapi |
| for mtype in self.matches: |
| for match,masked in self.matches[mtype]: |
| full_package = None |
| if mtype == "pkg": |
| catpack = match |
| full_package = self.portdb.xmatch( |
| "bestmatch-visible", match) |
| if not full_package: |
| #no match found; we don't want to query description |
| masked=1 |
| full_package = portage.best( |
| self.portdb.xmatch("match-all",match)) |
| elif mtype == "desc": |
| full_package = match |
| match = portage.cpv_getkey(match) |
| elif mtype == "set": |
| print green("*")+" "+white(match) |
| print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION") |
| print |
| if full_package: |
| try: |
| desc, homepage, license = self.portdb.aux_get( |
| full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"]) |
| except KeyError: |
| print "emerge: search: aux_get() failed, skipping" |
| continue |
| if masked: |
| print green("*")+" "+white(match)+" "+red("[ Masked ]") |
| else: |
| print green("*")+" "+white(match) |
| myversion = self.getVersion(full_package, search.VERSION_RELEASE) |
| |
| mysum = [0,0] |
| file_size_str = None |
| mycat = match.split("/")[0] |
| mypkg = match.split("/")[1] |
| mycpv = match + "-" + myversion |
| myebuild = self.portdb.findname(mycpv) |
| if myebuild: |
| pkgdir = os.path.dirname(myebuild) |
| from portage import manifest |
| mf = manifest.Manifest( |
| pkgdir, self.settings["DISTDIR"]) |
| fetchlist = self.portdb.getfetchlist(mycpv, |
| mysettings=self.settings, all=True)[1] |
| try: |
| mysum[0] = mf.getDistfilesSize(fetchlist) |
| except KeyError, e: |
| file_size_str = "Unknown (missing digest for %s)" % \ |
| str(e) |
| |
| available = False |
| for db in self._dbs: |
| if db is not vardb and \ |
| db.cpv_exists(mycpv): |
| available = True |
| if not myebuild and hasattr(db, "bintree"): |
| myebuild = db.bintree.getname(mycpv) |
| try: |
| mysum[0] = os.stat(myebuild).st_size |
| except OSError: |
| myebuild = None |
| break |
| |
| if myebuild and file_size_str is None: |
| mystr = str(mysum[0] / 1024) |
| mycount = len(mystr) |
| while (mycount > 3): |
| mycount -= 3 |
| mystr = mystr[:mycount] + "," + mystr[mycount:] |
| file_size_str = mystr + " kB" |
| |
| if self.verbose: |
| if available: |
| print " ", darkgreen("Latest version available:"),myversion |
| print " ", self.getInstallationStatus(mycat+'/'+mypkg) |
| if myebuild: |
| print " %s %s" % \ |
| (darkgreen("Size of files:"), file_size_str) |
| print " ", darkgreen("Homepage:")+" ",homepage |
| print " ", darkgreen("Description:")+" ",desc |
| print " ", darkgreen("License:")+" ",license |
| print |
| print |
| # |
| # private interface |
| # |
| def getInstallationStatus(self,package): |
| installed_package = self.vartree.dep_bestmatch(package) |
| result = "" |
| version = self.getVersion(installed_package,search.VERSION_RELEASE) |
| if len(version) > 0: |
| result = darkgreen("Latest version installed:")+" "+version |
| else: |
| result = darkgreen("Latest version installed:")+" [ Not Installed ]" |
| return result |
| |
| def getVersion(self,full_package,detail): |
| if len(full_package) > 1: |
| package_parts = portage.catpkgsplit(full_package) |
| if detail == search.VERSION_RELEASE and package_parts[3] != 'r0': |
| result = package_parts[2]+ "-" + package_parts[3] |
| else: |
| result = package_parts[2] |
| else: |
| result = "" |
| return result |
| |
| class RootConfig(object): |
| """This is used internally by depgraph to track information about a |
| particular $ROOT.""" |
| |
| pkg_tree_map = { |
| "ebuild" : "porttree", |
| "binary" : "bintree", |
| "installed" : "vartree" |
| } |
| |
| tree_pkg_map = {} |
| for k, v in pkg_tree_map.iteritems(): |
| tree_pkg_map[v] = k |
| |
| def __init__(self, settings, trees, setconfig): |
| self.trees = trees |
| self.settings = settings |
| self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse())) |
| self.root = self.settings["ROOT"] |
| self.setconfig = setconfig |
| self.sets = self.setconfig.getSets() |
| self.visible_pkgs = PackageVirtualDbapi(self.settings) |
| |
| def create_world_atom(pkg, args_set, root_config): |
| """Create a new atom for the world file if one does not exist. If the |
| argument atom is precise enough to identify a specific slot then a slot |
| atom will be returned. Atoms that are in the system set may also be stored |
| in world since system atoms can only match one slot while world atoms can |
| be greedy with respect to slots. Unslotted system packages will not be |
| stored in world.""" |
| |
| arg_atom = args_set.findAtomForPackage(pkg) |
| if not arg_atom: |
| return None |
| cp = portage.dep_getkey(arg_atom) |
| new_world_atom = cp |
| sets = root_config.sets |
| portdb = root_config.trees["porttree"].dbapi |
| vardb = root_config.trees["vartree"].dbapi |
| available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \ |
| for cpv in portdb.match(cp)) |
| slotted = len(available_slots) > 1 or \ |
| (len(available_slots) == 1 and "0" not in available_slots) |
| if not slotted: |
| # check the vdb in case this is multislot |
| available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \ |
| for cpv in vardb.match(cp)) |
| slotted = len(available_slots) > 1 or \ |
| (len(available_slots) == 1 and "0" not in available_slots) |
| if slotted and arg_atom != cp: |
| # If the user gave a specific atom, store it as a |
| # slot atom in the world file. |
| slot_atom = pkg.slot_atom |
| |
| # For USE=multislot, there are a couple of cases to |
| # handle here: |
| # |
| # 1) SLOT="0", but the real SLOT spontaneously changed to some |
| # unknown value, so just record an unslotted atom. |
| # |
| # 2) SLOT comes from an installed package and there is no |
| # matching SLOT in the portage tree. |
| # |
| # Make sure that the slot atom is available in either the |
| # portdb or the vardb, since otherwise the user certainly |
| # doesn't want the SLOT atom recorded in the world file |
| # (case 1 above). If it's only available in the vardb, |
| # the user may be trying to prevent a USE=multislot |
| # package from being removed by --depclean (case 2 above). |
| |
| mydb = portdb |
| if not portdb.match(slot_atom): |
| # SLOT seems to come from an installed multislot package |
| mydb = vardb |
| # If there is no installed package matching the SLOT atom, |
| # it probably changed SLOT spontaneously due to USE=multislot, |
| # so just record an unslotted atom. |
| if vardb.match(slot_atom): |
| # Now verify that the argument is precise |
| # enough to identify a specific slot. |
| matches = mydb.match(arg_atom) |
| matched_slots = set() |
| for cpv in matches: |
| matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0]) |
| if len(matched_slots) == 1: |
| new_world_atom = slot_atom |
| |
| if new_world_atom == sets["world"].findAtomForPackage(pkg): |
| # Both atoms would be identical, so there's nothing to add. |
| return None |
| if not slotted: |
| # Unlike world atoms, system atoms are not greedy for slots, so they |
| # can't be safely excluded from world if they are slotted. |
| system_atom = sets["system"].findAtomForPackage(pkg) |
| if system_atom: |
| if not portage.dep_getkey(system_atom).startswith("virtual/"): |
| return None |
| # System virtuals aren't safe to exclude from world since they can |
| # match multiple old-style virtuals but only one of them will be |
| # pulled in by update or depclean. |
| providers = portdb.mysettings.getvirtuals().get( |
| portage.dep_getkey(system_atom)) |
| if providers and len(providers) == 1 and providers[0] == cp: |
| return None |
| return new_world_atom |
| |
| def filter_iuse_defaults(iuse): |
| for flag in iuse: |
| if flag.startswith("+") or flag.startswith("-"): |
| yield flag[1:] |
| else: |
| yield flag |
| |
| class SlotObject(object): |
| __slots__ = ("__weakref__",) |
| |
| def __init__(self, **kwargs): |
| classes = [self.__class__] |
| while classes: |
| c = classes.pop() |
| if c is SlotObject: |
| continue |
| classes.extend(c.__bases__) |
| slots = getattr(c, "__slots__", None) |
| if not slots: |
| continue |
| for myattr in slots: |
| myvalue = kwargs.get(myattr, None) |
| setattr(self, myattr, myvalue) |
| |
| def copy(self): |
| """ |
| Create a new instance and copy all attributes |
| defined from __slots__ (including those from |
| inherited classes). |
| """ |
| obj = self.__class__() |
| |
| classes = [self.__class__] |
| while classes: |
| c = classes.pop() |
| if c is SlotObject: |
| continue |
| classes.extend(c.__bases__) |
| slots = getattr(c, "__slots__", None) |
| if not slots: |
| continue |
| for myattr in slots: |
| setattr(obj, myattr, getattr(self, myattr)) |
| |
| return obj |
| |
| class AbstractDepPriority(SlotObject): |
| __slots__ = ("buildtime", "runtime", "runtime_post") |
| |
| def __lt__(self, other): |
| return self.__int__() < other |
| |
| def __le__(self, other): |
| return self.__int__() <= other |
| |
| def __eq__(self, other): |
| return self.__int__() == other |
| |
| def __ne__(self, other): |
| return self.__int__() != other |
| |
| def __gt__(self, other): |
| return self.__int__() > other |
| |
| def __ge__(self, other): |
| return self.__int__() >= other |
| |
| def copy(self): |
| import copy |
| return copy.copy(self) |
| |
| class DepPriority(AbstractDepPriority): |
| """ |
| This class generates an integer priority level based of various |
| attributes of the dependency relationship. Attributes can be assigned |
| at any time and the new integer value will be generated on calls to the |
| __int__() method. Rich comparison operators are supported. |
| |
| The boolean attributes that affect the integer value are "satisfied", |
| "buildtime", "runtime", and "system". Various combinations of |
| attributes lead to the following priority levels: |
| |
| Combination of properties Priority Category |
| |
| not satisfied and buildtime 0 HARD |
| not satisfied and runtime -1 MEDIUM |
| not satisfied and runtime_post -2 MEDIUM_SOFT |
| satisfied and buildtime and rebuild -3 SOFT |
| satisfied and buildtime -4 SOFT |
| satisfied and runtime -5 SOFT |
| satisfied and runtime_post -6 SOFT |
| (none of the above) -6 SOFT |
| |
| Several integer constants are defined for categorization of priority |
| levels: |
| |
| MEDIUM The upper boundary for medium dependencies. |
| MEDIUM_SOFT The upper boundary for medium-soft dependencies. |
| SOFT The upper boundary for soft dependencies. |
| MIN The lower boundary for soft dependencies. |
| """ |
| __slots__ = ("satisfied", "rebuild") |
| MEDIUM = -1 |
| MEDIUM_SOFT = -2 |
| SOFT = -3 |
| MIN = -6 |
| |
| def __int__(self): |
| if not self.satisfied: |
| if self.buildtime: |
| return 0 |
| if self.runtime: |
| return -1 |
| if self.runtime_post: |
| return -2 |
| if self.buildtime: |
| if self.rebuild: |
| return -3 |
| return -4 |
| if self.runtime: |
| return -5 |
| if self.runtime_post: |
| return -6 |
| return -6 |
| |
| def __str__(self): |
| myvalue = self.__int__() |
| if myvalue > self.MEDIUM: |
| return "hard" |
| if myvalue > self.MEDIUM_SOFT: |
| return "medium" |
| if myvalue > self.SOFT: |
| return "medium-soft" |
| return "soft" |
| |
| class BlockerDepPriority(DepPriority): |
| __slots__ = () |
| def __int__(self): |
| return 0 |
| |
| BlockerDepPriority.instance = BlockerDepPriority() |
| |
| class UnmergeDepPriority(AbstractDepPriority): |
| __slots__ = ("satisfied",) |
| """ |
| Combination of properties Priority Category |
| |
| runtime 0 HARD |
| runtime_post -1 HARD |
| buildtime -2 SOFT |
| (none of the above) -2 SOFT |
| """ |
| |
| MAX = 0 |
| SOFT = -2 |
| MIN = -2 |
| |
| def __int__(self): |
| if self.runtime: |
| return 0 |
| if self.runtime_post: |
| return -1 |
| if self.buildtime: |
| return -2 |
| return -2 |
| |
| def __str__(self): |
| myvalue = self.__int__() |
| if myvalue > self.SOFT: |
| return "hard" |
| return "soft" |
| |
| class FakeVartree(portage.vartree): |
| """This is implements an in-memory copy of a vartree instance that provides |
| all the interfaces required for use by the depgraph. The vardb is locked |
| during the constructor call just long enough to read a copy of the |
| installed package information. This allows the depgraph to do it's |
| dependency calculations without holding a lock on the vardb. It also |
| allows things like vardb global updates to be done in memory so that the |
| user doesn't necessarily need write access to the vardb in cases where |
| global updates are necessary (updates are performed when necessary if there |
| is not a matching ebuild in the tree).""" |
| def __init__(self, root_config, pkg_cache=None, acquire_lock=1): |
| self._root_config = root_config |
| if pkg_cache is None: |
| pkg_cache = {} |
| real_vartree = root_config.trees["vartree"] |
| portdb = root_config.trees["porttree"].dbapi |
| self.root = real_vartree.root |
| self.settings = real_vartree.settings |
| mykeys = list(real_vartree.dbapi._aux_cache_keys) |
| self._pkg_cache = pkg_cache |
| self.dbapi = PackageVirtualDbapi(real_vartree.settings) |
| vdb_path = os.path.join(self.root, portage.VDB_PATH) |
| try: |
| # At least the parent needs to exist for the lock file. |
| portage.util.ensure_dirs(vdb_path) |
| except portage.exception.PortageException: |
| pass |
| vdb_lock = None |
| try: |
| if acquire_lock and os.access(vdb_path, os.W_OK): |
| vdb_lock = portage.locks.lockdir(vdb_path) |
| real_dbapi = real_vartree.dbapi |
| slot_counters = {} |
| for cpv in real_dbapi.cpv_all(): |
| cache_key = ("installed", self.root, cpv, "nomerge") |
| pkg = self._pkg_cache.get(cache_key) |
| if pkg is not None: |
| metadata = pkg.metadata |
| else: |
| metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys))) |
| myslot = metadata["SLOT"] |
| mycp = portage.dep_getkey(cpv) |
| myslot_atom = "%s:%s" % (mycp, myslot) |
| try: |
| mycounter = long(metadata["COUNTER"]) |
| except ValueError: |
| mycounter = 0 |
| metadata["COUNTER"] = str(mycounter) |
| other_counter = slot_counters.get(myslot_atom, None) |
| if other_counter is not None: |
| if other_counter > mycounter: |
| continue |
| slot_counters[myslot_atom] = mycounter |
| if pkg is None: |
| pkg = Package(built=True, cpv=cpv, |
| installed=True, metadata=metadata, |
| root_config=root_config, type_name="installed") |
| self._pkg_cache[pkg] = pkg |
| self.dbapi.cpv_inject(pkg) |
| real_dbapi.flush_cache() |
| finally: |
| if vdb_lock: |
| portage.locks.unlockdir(vdb_lock) |
| # Populate the old-style virtuals using the cached values. |
| if not self.settings.treeVirtuals: |
| self.settings.treeVirtuals = portage.util.map_dictlist_vals( |
| portage.getCPFromCPV, self.get_all_provides()) |
| |
| # Intialize variables needed for lazy cache pulls of the live ebuild |
| # metadata. This ensures that the vardb lock is released ASAP, without |
| # being delayed in case cache generation is triggered. |
| self._aux_get = self.dbapi.aux_get |
| self.dbapi.aux_get = self._aux_get_wrapper |
| self._match = self.dbapi.match |
| self.dbapi.match = self._match_wrapper |
| self._aux_get_history = set() |
| self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
| self._portdb = portdb |
| self._global_updates = None |
| |
| def _match_wrapper(self, cpv, use_cache=1): |
| """ |
| Make sure the metadata in Package instances gets updated for any |
| cpv that is returned from a match() call, since the metadata can |
| be accessed directly from the Package instance instead of via |
| aux_get(). |
| """ |
| matches = self._match(cpv, use_cache=use_cache) |
| for cpv in matches: |
| if cpv in self._aux_get_history: |
| continue |
| self._aux_get_wrapper(cpv, []) |
| return matches |
| |
| def _aux_get_wrapper(self, pkg, wants): |
| if pkg in self._aux_get_history: |
| return self._aux_get(pkg, wants) |
| self._aux_get_history.add(pkg) |
| try: |
| # Use the live ebuild metadata if possible. |
| live_metadata = dict(izip(self._portdb_keys, |
| self._portdb.aux_get(pkg, self._portdb_keys))) |
| self.dbapi.aux_update(pkg, live_metadata) |
| except (KeyError, portage.exception.PortageException): |
| if self._global_updates is None: |
| self._global_updates = \ |
| grab_global_updates(self._portdb.porttree_root) |
| perform_global_updates( |
| pkg, self.dbapi, self._global_updates) |
| return self._aux_get(pkg, wants) |
| |
| def sync(self, acquire_lock=1): |
| """ |
| Call this method to synchronize state with the real vardb |
| after one or more packages may have been installed or |
| uninstalled. |
| """ |
| vdb_path = os.path.join(self.root, portage.VDB_PATH) |
| try: |
| # At least the parent needs to exist for the lock file. |
| portage.util.ensure_dirs(vdb_path) |
| except portage.exception.PortageException: |
| pass |
| vdb_lock = None |
| try: |
| if acquire_lock and os.access(vdb_path, os.W_OK): |
| vdb_lock = portage.locks.lockdir(vdb_path) |
| self._sync() |
| finally: |
| if vdb_lock: |
| portage.locks.unlockdir(vdb_lock) |
| |
| def _sync(self): |
| |
| real_vardb = self._root_config.trees["vartree"].dbapi |
| current_cpv_set = frozenset(real_vardb.cpv_all()) |
| pkg_vardb = self.dbapi |
| aux_get_history = self._aux_get_history |
| |
| # Remove any packages that have been uninstalled. |
| for pkg in list(pkg_vardb): |
| if pkg.cpv not in current_cpv_set: |
| pkg_vardb.cpv_remove(pkg) |
| aux_get_history.discard(pkg.cpv) |
| |
| # Validate counters and timestamps. |
| slot_counters = {} |
| root = self.root |
| validation_keys = ["COUNTER", "_mtime_"] |
| for cpv in current_cpv_set: |
| |
| pkg_hash_key = ("installed", root, cpv, "nomerge") |
| pkg = pkg_vardb.get(pkg_hash_key) |
| if pkg is not None: |
| counter, mtime = real_vardb.aux_get(cpv, validation_keys) |
| |
| if counter != pkg.metadata["COUNTER"] or \ |
| mtime != pkg.mtime: |
| pkg_vardb.cpv_remove(pkg) |
| aux_get_history.discard(pkg.cpv) |
| pkg = None |
| |
| if pkg is None: |
| pkg = self._pkg(cpv) |
| |
| other_counter = slot_counters.get(pkg.slot_atom) |
| if other_counter is not None: |
| if other_counter > pkg.counter: |
| continue |
| |
| slot_counters[pkg.slot_atom] = pkg.counter |
| pkg_vardb.cpv_inject(pkg) |
| |
| real_vardb.flush_cache() |
| |
| def _pkg(self, cpv): |
| root_config = self._root_config |
| real_vardb = root_config.trees["vartree"].dbapi |
| db_keys = list(real_vardb._aux_cache_keys) |
| pkg = Package(cpv=cpv, installed=True, |
| metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)), |
| root_config=root_config, |
| type_name="installed") |
| return pkg |
| |
| def grab_global_updates(portdir): |
| from portage.update import grab_updates, parse_updates |
| updpath = os.path.join(portdir, "profiles", "updates") |
| try: |
| rawupdates = grab_updates(updpath) |
| except portage.exception.DirectoryNotFound: |
| rawupdates = [] |
| upd_commands = [] |
| for mykey, mystat, mycontent in rawupdates: |
| commands, errors = parse_updates(mycontent) |
| upd_commands.extend(commands) |
| return upd_commands |
| |
| def perform_global_updates(mycpv, mydb, mycommands): |
| from portage.update import update_dbentries |
| aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
| aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys))) |
| updates = update_dbentries(mycommands, aux_dict) |
| if updates: |
| mydb.aux_update(mycpv, updates) |
| |
| def visible(pkgsettings, pkg): |
| """ |
| Check if a package is visible. This can raise an InvalidDependString |
| exception if LICENSE is invalid. |
| TODO: optionally generate a list of masking reasons |
| @rtype: Boolean |
| @returns: True if the package is visible, False otherwise. |
| """ |
| if not pkg.metadata["SLOT"]: |
| return False |
| if pkg.built and not pkg.installed and "CHOST" in pkg.metadata: |
| if not pkgsettings._accept_chost(pkg): |
| return False |
| if not portage.eapi_is_supported(pkg.metadata["EAPI"]): |
| return False |
| if not pkg.installed and \ |
| pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata): |
| return False |
| if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata): |
| return False |
| if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata): |
| return False |
| try: |
| if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata): |
| return False |
| except portage.exception.InvalidDependString: |
| return False |
| return True |
| |
| def get_masking_status(pkg, pkgsettings, root_config): |
| |
| mreasons = portage.getmaskingstatus( |
| pkg, settings=pkgsettings, |
| portdb=root_config.trees["porttree"].dbapi) |
| |
| if pkg.built and not pkg.installed and "CHOST" in pkg.metadata: |
| if not pkgsettings._accept_chost(pkg): |
| mreasons.append("CHOST: %s" % \ |
| pkg.metadata["CHOST"]) |
| |
| if not pkg.metadata["SLOT"]: |
| mreasons.append("invalid: SLOT is undefined") |
| |
| return mreasons |
| |
| def get_mask_info(root_config, cpv, pkgsettings, |
| db, pkg_type, built, installed, db_keys): |
| eapi_masked = False |
| try: |
| metadata = dict(izip(db_keys, |
| db.aux_get(cpv, db_keys))) |
| except KeyError: |
| metadata = None |
| if metadata and not built: |
| pkgsettings.setcpv(cpv, mydb=metadata) |
| metadata["USE"] = pkgsettings["PORTAGE_USE"] |
| if metadata is None: |
| mreasons = ["corruption"] |
| else: |
| pkg = Package(type_name=pkg_type, root_config=root_config, |
| cpv=cpv, built=built, installed=installed, metadata=metadata) |
| mreasons = get_masking_status(pkg, pkgsettings, root_config) |
| return metadata, mreasons |
| |
| def show_masked_packages(masked_packages): |
| shown_licenses = set() |
| shown_comments = set() |
| # Maybe there is both an ebuild and a binary. Only |
| # show one of them to avoid redundant appearance. |
| shown_cpvs = set() |
| have_eapi_mask = False |
| for (root_config, pkgsettings, cpv, |
| metadata, mreasons) in masked_packages: |
| if cpv in shown_cpvs: |
| continue |
| shown_cpvs.add(cpv) |
| comment, filename = None, None |
| if "package.mask" in mreasons: |
| comment, filename = \ |
| portage.getmaskingreason( |
| cpv, metadata=metadata, |
| settings=pkgsettings, |
| portdb=root_config.trees["porttree"].dbapi, |
| return_location=True) |
| missing_licenses = [] |
| if metadata: |
| if not portage.eapi_is_supported(metadata["EAPI"]): |
| have_eapi_mask = True |
| try: |
| missing_licenses = \ |
| pkgsettings._getMissingLicenses( |
| cpv, metadata) |
| except portage.exception.InvalidDependString: |
| # This will have already been reported |
| # above via mreasons. |
| pass |
| |
| print "- "+cpv+" (masked by: "+", ".join(mreasons)+")" |
| if comment and comment not in shown_comments: |
| print filename+":" |
| print comment |
| shown_comments.add(comment) |
| portdb = root_config.trees["porttree"].dbapi |
| for l in missing_licenses: |
| l_path = portdb.findLicensePath(l) |
| if l in shown_licenses: |
| continue |
| msg = ("A copy of the '%s' license" + \ |
| " is located at '%s'.") % (l, l_path) |
| print msg |
| print |
| shown_licenses.add(l) |
| return have_eapi_mask |
| |
| class Task(SlotObject): |
| __slots__ = ("_hash_key", "_hash_value") |
| |
| def _get_hash_key(self): |
| hash_key = getattr(self, "_hash_key", None) |
| if hash_key is None: |
| raise NotImplementedError(self) |
| return hash_key |
| |
| def __eq__(self, other): |
| return self._get_hash_key() == other |
| |
| def __ne__(self, other): |
| return self._get_hash_key() != other |
| |
| def __hash__(self): |
| hash_value = getattr(self, "_hash_value", None) |
| if hash_value is None: |
| self._hash_value = hash(self._get_hash_key()) |
| return self._hash_value |
| |
| def __len__(self): |
| return len(self._get_hash_key()) |
| |
| def __getitem__(self, key): |
| return self._get_hash_key()[key] |
| |
| def __iter__(self): |
| return iter(self._get_hash_key()) |
| |
| def __contains__(self, key): |
| return key in self._get_hash_key() |
| |
| def __str__(self): |
| return str(self._get_hash_key()) |
| |
| class Blocker(Task): |
| |
| __hash__ = Task.__hash__ |
| __slots__ = ("root", "atom", "cp", "satisfied") |
| |
| def __init__(self, **kwargs): |
| Task.__init__(self, **kwargs) |
| self.cp = portage.dep_getkey(self.atom) |
| |
| def _get_hash_key(self): |
| hash_key = getattr(self, "_hash_key", None) |
| if hash_key is None: |
| self._hash_key = \ |
| ("blocks", self.root, self.atom) |
| return self._hash_key |
| |
| class Package(Task): |
| |
| __hash__ = Task.__hash__ |
| __slots__ = ("built", "cpv", "depth", |
| "installed", "metadata", "onlydeps", "operation", |
| "root_config", "type_name", |
| "category", "counter", "cp", "cpv_split", |
| "inherited", "iuse", "mtime", |
| "pf", "pv_split", "root", "slot", "slot_atom", "use") |
| |
| metadata_keys = [ |
| "CHOST", "COUNTER", "DEPEND", "EAPI", |
| "INHERITED", "IUSE", "KEYWORDS", |
| "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", |
| "repository", "RESTRICT", "SLOT", "USE", "_mtime_"] |
| |
| def __init__(self, **kwargs): |
| Task.__init__(self, **kwargs) |
| self.root = self.root_config.root |
| self.metadata = _PackageMetadataWrapper(self, self.metadata) |
| self.cp = portage.cpv_getkey(self.cpv) |
| self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot)) |
| self.category, self.pf = portage.catsplit(self.cpv) |
| self.cpv_split = portage.catpkgsplit(self.cpv) |
| self.pv_split = self.cpv_split[1:] |
| |
| class _use(object): |
| |
| __slots__ = ("__weakref__", "enabled") |
| |
| def __init__(self, use): |
| self.enabled = frozenset(use) |
| |
| class _iuse(object): |
| |
| __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens") |
| |
| def __init__(self, tokens, iuse_implicit): |
| self.tokens = tuple(tokens) |
| self.iuse_implicit = iuse_implicit |
| enabled = [] |
| disabled = [] |
| other = [] |
| for x in tokens: |
| prefix = x[:1] |
| if prefix == "+": |
| enabled.append(x[1:]) |
| elif prefix == "-": |
| disabled.append(x[1:]) |
| else: |
| other.append(x) |
| self.enabled = frozenset(enabled) |
| self.disabled = frozenset(disabled) |
| self.all = frozenset(chain(enabled, disabled, other)) |
| |
| def __getattribute__(self, name): |
| if name == "regex": |
| try: |
| return object.__getattribute__(self, "regex") |
| except AttributeError: |
| all = object.__getattribute__(self, "all") |
| iuse_implicit = object.__getattribute__(self, "iuse_implicit") |
| # Escape anything except ".*" which is supposed |
| # to pass through from _get_implicit_iuse() |
| regex = (re.escape(x) for x in chain(all, iuse_implicit)) |
| regex = "^(%s)$" % "|".join(regex) |
| regex = regex.replace("\\.\\*", ".*") |
| self.regex = re.compile(regex) |
| return object.__getattribute__(self, name) |
| |
| def _get_hash_key(self): |
| hash_key = getattr(self, "_hash_key", None) |
| if hash_key is None: |
| if self.operation is None: |
| self.operation = "merge" |
| if self.onlydeps or self.installed: |
| self.operation = "nomerge" |
| self._hash_key = \ |
| (self.type_name, self.root, self.cpv, self.operation) |
| return self._hash_key |
| |
| def __cmp__(self, other): |
| if self > other: |
| return 1 |
| elif self < other: |
| return -1 |
| return 0 |
| |
| def __lt__(self, other): |
| if other.cp != self.cp: |
| return False |
| if portage.pkgcmp(self.pv_split, other.pv_split) < 0: |
| return True |
| return False |
| |
| def __le__(self, other): |
| if other.cp != self.cp: |
| return False |
| if portage.pkgcmp(self.pv_split, other.pv_split) <= 0: |
| return True |
| return False |
| |
| def __gt__(self, other): |
| if other.cp != self.cp: |
| return False |
| if portage.pkgcmp(self.pv_split, other.pv_split) > 0: |
| return True |
| return False |
| |
| def __ge__(self, other): |
| if other.cp != self.cp: |
| return False |
| if portage.pkgcmp(self.pv_split, other.pv_split) >= 0: |
| return True |
| return False |
| |
| _all_metadata_keys = set(x for x in portage.auxdbkeys \ |
| if not x.startswith("UNUSED_")) |
| _all_metadata_keys.discard("CDEPEND") |
| _all_metadata_keys.update(Package.metadata_keys) |
| |
| from portage.cache.mappings import slot_dict_class |
| _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys) |
| |
| class _PackageMetadataWrapper(_PackageMetadataWrapperBase): |
| """ |
| Detect metadata updates and synchronize Package attributes. |
| """ |
| |
| __slots__ = ("_pkg",) |
| _wrapped_keys = frozenset( |
| ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"]) |
| |
| def __init__(self, pkg, metadata): |
| _PackageMetadataWrapperBase.__init__(self) |
| self._pkg = pkg |
| self.update(metadata) |
| |
| def __setitem__(self, k, v): |
| _PackageMetadataWrapperBase.__setitem__(self, k, v) |
| if k in self._wrapped_keys: |
| getattr(self, "_set_" + k.lower())(k, v) |
| |
| def _set_inherited(self, k, v): |
| if isinstance(v, basestring): |
| v = frozenset(v.split()) |
| self._pkg.inherited = v |
| |
| def _set_iuse(self, k, v): |
| self._pkg.iuse = self._pkg._iuse( |
| v.split(), self._pkg.root_config.iuse_implicit) |
| |
| def _set_slot(self, k, v): |
| self._pkg.slot = v |
| |
| def _set_use(self, k, v): |
| self._pkg.use = self._pkg._use(v.split()) |
| |
| def _set_counter(self, k, v): |
| if isinstance(v, basestring): |
| try: |
| v = int(v.strip()) |
| except ValueError: |
| v = 0 |
| self._pkg.counter = v |
| |
| def _set__mtime_(self, k, v): |
| if isinstance(v, basestring): |
| try: |
| v = float(v.strip()) |
| except ValueError: |
| v = 0 |
| self._pkg.mtime = v |
| |
| class EbuildFetchPretend(SlotObject): |
| |
| __slots__ = ("fetch_all", "pkg", "settings") |
| |
| def execute(self): |
| portdb = self.pkg.root_config.trees["porttree"].dbapi |
| ebuild_path = portdb.findname(self.pkg.cpv) |
| debug = self.settings.get("PORTAGE_DEBUG") == "1" |
| |
| retval = portage.doebuild(ebuild_path, "fetch", |
| self.settings["ROOT"], self.settings, debug=debug, |
| listonly=1, fetchonly=1, fetchall=self.fetch_all, |
| mydbapi=portdb, tree="porttree") |
| return retval |
| |
| class AsynchronousTask(SlotObject): |
| """ |
| Subclasses override _wait() and _poll() so that calls |
| to public methods can be wrapped for implementing |
| hooks such as exit listener notification. |
| |
| Sublasses should call self.wait() to notify exit listeners after |
| the task is complete and self.returncode has been set. |
| """ |
| |
| __slots__ = ("background", "cancelled", "returncode") + \ |
| ("_exit_listeners", "_exit_listener_stack", "_start_listeners") |
| |
| def start(self): |
| """ |
| Start an asynchronous task and then return as soon as possible. |
| """ |
| self._start() |
| self._start_hook() |
| |
| def _start(self): |
| raise NotImplementedError(self) |
| |
| def isAlive(self): |
| return self.returncode is None |
| |
| def poll(self): |
| self._wait_hook() |
| return self._poll() |
| |
| def _poll(self): |
| return self.returncode |
| |
| def wait(self): |
| if self.returncode is None: |
| self._wait() |
| self._wait_hook() |
| return self.returncode |
| |
| def _wait(self): |
| return self.returncode |
| |
| def cancel(self): |
| self.cancelled = True |
| self.wait() |
| |
| def addStartListener(self, f): |
| """ |
| The function will be called with one argument, a reference to self. |
| """ |
| if self._start_listeners is None: |
| self._start_listeners = [] |
| self._start_listeners.append(f) |
| |
| def removeStartListener(self, f): |
| if self._start_listeners is None: |
| return |
| self._start_listeners.remove(f) |
| |
| def _start_hook(self): |
| if self._start_listeners is not None: |
| start_listeners = self._start_listeners |
| self._start_listeners = None |
| |
| for f in start_listeners: |
| f(self) |
| |
| def addExitListener(self, f): |
| """ |
| The function will be called with one argument, a reference to self. |
| """ |
| if self._exit_listeners is None: |
| self._exit_listeners = [] |
| self._exit_listeners.append(f) |
| |
| def removeExitListener(self, f): |
| if self._exit_listeners is None: |
| if self._exit_listener_stack is not None: |
| self._exit_listener_stack.remove(f) |
| return |
| self._exit_listeners.remove(f) |
| |
| def _wait_hook(self): |
| """ |
| Call this method after the task completes, just before returning |
| the returncode from wait() or poll(). This hook is |
| used to trigger exit listeners when the returncode first |
| becomes available. |
| """ |
| if self.returncode is not None and \ |
| self._exit_listeners is not None: |
| |
| # This prevents recursion, in case one of the |
| # exit handlers triggers this method again by |
| # calling wait(). Use a stack that gives |
| # removeExitListener() an opportunity to consume |
| # listeners from the stack, before they can get |
| # called below. This is necessary because a call |
| # to one exit listener may result in a call to |
| # removeExitListener() for another listener on |
| # the stack. That listener needs to be removed |
| # from the stack since it would be inconsistent |
| # to call it after it has been been passed into |
| # removeExitListener(). |
| self._exit_listener_stack = self._exit_listeners |
| self._exit_listeners = None |
| |
| self._exit_listener_stack.reverse() |
| while self._exit_listener_stack: |
| self._exit_listener_stack.pop()(self) |
| |
| class PipeReader(AsynchronousTask): |
| |
| """ |
| Reads output from one or more files and saves it in memory, |
| for retrieval via the getvalue() method. This is driven by |
| the scheduler's poll() loop, so it runs entirely within the |
| current process. |
| """ |
| |
| __slots__ = ("input_files", "scheduler",) + \ |
| ("pid", "_read_data", "_registered", "_reg_ids") |
| |
| _bufsize = 4096 |
| |
| def _start(self): |
| self._reg_ids = set() |
| self._read_data = [] |
| for k, f in self.input_files.iteritems(): |
| fcntl.fcntl(f.fileno(), fcntl.F_SETFL, |
| fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) |
| self._reg_ids.add(self.scheduler.register(f.fileno(), |
| PollConstants.POLLIN, self._output_handler)) |
| self._registered = True |
| |
| def isAlive(self): |
| return self._registered |
| |
| def _wait(self): |
| if self.returncode is not None: |
| return self.returncode |
| |
| if self._registered: |
| self.scheduler.schedule(self._reg_ids) |
| self._unregister() |
| |
| self.returncode = os.EX_OK |
| return self.returncode |
| |
| def getvalue(self): |
| """Retrieve the entire contents""" |
| return "".join(self._read_data) |
| |
| def close(self): |
| """Free the memory buffer.""" |
| self._read_data = None |
| |
| def _output_handler(self, fd, event): |
| files = self.input_files |
| for f in files.itervalues(): |
| if fd == f.fileno(): |
| break |
| |
| buf = array.array('B') |
| try: |
| buf.fromfile(f, self._bufsize) |
| except EOFError: |
| pass |
| |
| if buf: |
| self._read_data.append(buf.tostring()) |
| else: |
| self._unregister() |
| self.wait() |
| |
| return self._registered |
| |
| def _unregister(self): |
| """ |
| Unregister from the scheduler and close open files. |
| """ |
| |
| self._registered = False |
| |
| if self._reg_ids is not None: |
| for reg_id in self._reg_ids: |
| self.scheduler.unregister(reg_id) |
| self._reg_ids = None |
| |
| if self.input_files is not None: |
| for f in self.input_files.itervalues(): |
| f.close() |
| self.input_files = None |
| |
| class CompositeTask(AsynchronousTask): |
| |
| __slots__ = ("scheduler",) + ("_current_task",) |
| |
| def isAlive(self): |
| return self._current_task is not None |
| |
| def cancel(self): |
| self.cancelled = True |
| if self._current_task is not None: |
| self._current_task.cancel() |
| |
| def _poll(self): |
| """ |
| This does a loop calling self._current_task.poll() |
| repeatedly as long as the value of self._current_task |
| keeps changing. It calls poll() a maximum of one time |
| for a given self._current_task instance. This is useful |
| since calling poll() on a task can trigger advance to |
| the next task could eventually lead to the returncode |
| being set in cases when polling only a single task would |
| not have the same effect. |
| """ |
| |
| prev = None |
| while True: |
| task = self._current_task |
| if task is None or task is prev: |
| # don't poll the same task more than once |
| break |
| task.poll() |
| prev = task |
| |
| return self.returncode |
| |
| def _wait(self): |
| |
| prev = None |
| while True: |
| task = self._current_task |
| if task is None: |
| # don't wait for the same task more than once |
| break |
| if task is prev: |
| # Before the task.wait() method returned, an exit |
| # listener should have set self._current_task to either |
| # a different task or None. Something is wrong. |
| raise AssertionError("self._current_task has not " + \ |
| "changed since calling wait", self, task) |
| task.wait() |
| prev = task |
| |
| return self.returncode |
| |
| def _assert_current(self, task): |
| """ |
| Raises an AssertionError if the given task is not the |
| same one as self._current_task. This can be useful |
| for detecting bugs. |
| """ |
| if task is not self._current_task: |
| raise AssertionError("Unrecognized task: %s" % (task,)) |
| |
| def _default_exit(self, task): |
| """ |
| Calls _assert_current() on the given task and then sets the |
| composite returncode attribute if task.returncode != os.EX_OK. |
| If the task failed then self._current_task will be set to None. |
| Subclasses can use this as a generic task exit callback. |
| |
| @rtype: int |
| @returns: The task.returncode attribute. |
| """ |
| self._assert_current(task) |
| if task.returncode != os.EX_OK: |
| self.returncode = task.returncode |
| self._current_task = None |
| return task.returncode |
| |
| def _final_exit(self, task): |
| """ |
| Assumes that task is the final task of this composite task. |
| Calls _default_exit() and sets self.returncode to the task's |
| returncode and sets self._current_task to None. |
| """ |
| self._default_exit(task) |
| self._current_task = None |
| self.returncode = task.returncode |
| return self.returncode |
| |
| def _default_final_exit(self, task): |
| """ |
| This calls _final_exit() and then wait(). |
| |
| Subclasses can use this as a generic final task exit callback. |
| |
| """ |
| self._final_exit(task) |
| return self.wait() |
| |
| def _start_task(self, task, exit_handler): |
| """ |
| Register exit handler for the given task, set it |
| as self._current_task, and call task.start(). |
| |
| Subclasses can use this as a generic way to start |
| a task. |
| |
| """ |
| task.addExitListener(exit_handler) |
| self._current_task = task |
| task.start() |
| |
| class TaskSequence(CompositeTask): |
| """ |
| A collection of tasks that executes sequentially. Each task |
| must have a addExitListener() method that can be used as |
| a means to trigger movement from one task to the next. |
| """ |
| |
| __slots__ = ("_task_queue",) |
| |
| def __init__(self, **kwargs): |
| AsynchronousTask.__init__(self, **kwargs) |
| self._task_queue = deque() |
| |
| def add(self, task): |
| self._task_queue.append(task) |
| |
| def _start(self): |
| self._start_next_task() |
| |
| def cancel(self): |
| self._task_queue.clear() |
| CompositeTask.cancel(self) |
| |
| def _start_next_task(self): |
| self._start_task(self._task_queue.popleft(), |
| self._task_exit_handler) |
| |
| def _task_exit_handler(self, task): |
| if self._default_exit(task) != os.EX_OK: |
| self.wait() |
| elif self._task_queue: |
| self._start_next_task() |
| else: |
| self._final_exit(task) |
| self.wait() |
| |
| class SubProcess(AsynchronousTask): |
| |
| __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id") |
| |
| # A file descriptor is required for the scheduler to monitor changes from |
| # inside a poll() loop. When logging is not enabled, create a pipe just to |
| # serve this purpose alone. |
| _dummy_pipe_fd = 9 |
| |
| def _poll(self): |
| if self.returncode is not None: |
| return self.returncode |
| if self.pid is None: |
| return self.returncode |
| if self._registered: |
| return self.returncode |
| |
| try: |
| retval = os.waitpid(self.pid, os.WNOHANG) |
| except OSError, e: |
| if e.errno != errno.ECHILD: |
| raise |
| del e |
| retval = (self.pid, 1) |
| |
| if retval == (0, 0): |
| return None |
| self._set_returncode(retval) |
| return self.returncode |
| |
| def cancel(self): |
| if self.isAlive(): |
| try: |
| os.kill(self.pid, signal.SIGTERM) |
| except OSError, e: |
| if e.errno != errno.ESRCH: |
| raise |
| del e |
| |
| self.cancelled = True |
| if self.pid is not None: |
| self.wait() |
| return self.returncode |
| |
| def isAlive(self): |
| return self.pid is not None and \ |
| self.returncode is None |
| |
| def _wait(self): |
| |
| if self.returncode is not None: |
| return self.returncode |
| |
| if self._registered: |
| self.scheduler.schedule(self._reg_id) |
| self._unregister() |
| if self.returncode is not None: |
| return self.returncode |
| |
| try: |
| wait_retval = os.waitpid(self.pid, 0) |
| except OSError, e: |
| if e.errno != errno.ECHILD: |
| raise |
| del e |
| self._set_returncode((self.pid, 1)) |
| else: |
| self._set_returncode(wait_retval) |
| |
| return self.returncode |
| |
| def _unregister(self): |
| """ |
| Unregister from the scheduler and close open files. |
| """ |
| |
| self._registered = False |
| |
| if self._reg_id is not None: |
| self.scheduler.unregister(self._reg_id) |
| self._reg_id = None |
| |
| if self._files is not None: |
| for f in self._files.itervalues(): |
| f.close() |
| self._files = None |
| |
| def _set_returncode(self, wait_retval): |
| |
| retval = wait_retval[1] |
| |
| if retval != os.EX_OK: |
| if retval & 0xff: |
| retval = (retval & 0xff) << 8 |
| else: |
| retval = retval >> 8 |
| |
| self.returncode = retval |
| |
| class SpawnProcess(SubProcess): |
| |
| """ |
| Constructor keyword args are passed into portage.process.spawn(). |
| The required "args" keyword argument will be passed as the first |
| spawn() argument. |
| """ |
| |
| _spawn_kwarg_names = ("env", "opt_name", "fd_pipes", |
| "uid", "gid", "groups", "umask", "logfile", |
| "path_lookup", "pre_exec") |
| |
| __slots__ = ("args",) + \ |
| _spawn_kwarg_names |
| |
| _file_names = ("log", "process", "stdout") |
| _files_dict = slot_dict_class(_file_names, prefix="") |
| _bufsize = 4096 |
| |
| def _start(self): |
| |
| if self.cancelled: |
| return |
| |
| if self.fd_pipes is None: |
| self.fd_pipes = {} |
| fd_pipes = self.fd_pipes |
| fd_pipes.setdefault(0, sys.stdin.fileno()) |
| fd_pipes.setdefault(1, sys.stdout.fileno()) |
| fd_pipes.setdefault(2, sys.stderr.fileno()) |
| |
| # flush any pending output |
| for fd in fd_pipes.itervalues(): |
| if fd == sys.stdout.fileno(): |
| sys.stdout.flush() |
| if fd == sys.stderr.fileno(): |
| sys.stderr.flush() |
| |
| logfile = self.logfile |
| self._files = self._files_dict() |
| files = self._files |
| |
| master_fd, slave_fd = self._pipe(fd_pipes) |
| fcntl.fcntl(master_fd, fcntl.F_SETFL, |
| fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK) |
| |
| null_input = None |
| fd_pipes_orig = fd_pipes.copy() |
| if self.background: |
| # TODO: Use job control functions like tcsetpgrp() to control |
| # access to stdin. Until then, use /dev/null so that any |
| # attempts to read from stdin will immediately return EOF |
| # instead of blocking indefinitely. |
| null_input = open('/dev/null', 'rb') |
| fd_pipes[0] = null_input.fileno() |
| else: |
| fd_pipes[0] = fd_pipes_orig[0] |
| |
| files.process = os.fdopen(master_fd, 'r') |
| if logfile is not None: |
| |
| fd_pipes[1] = slave_fd |
| fd_pipes[2] = slave_fd |
| |
| files.log = open(logfile, "a") |
| portage.util.apply_secpass_permissions(logfile, |
| uid=portage.portage_uid, gid=portage.portage_gid, |
| mode=0660) |
| |
| if not self.background: |
| files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w') |
| |
| output_handler = self._output_handler |
| |
| else: |
| |
| # Create a dummy pipe so the scheduler can monitor |
| # the process from inside a poll() loop. |
| fd_pipes[self._dummy_pipe_fd] = slave_fd |
| if self.background: |
| fd_pipes[1] = slave_fd |
| fd_pipes[2] = slave_fd |
| output_handler = self._dummy_handler |
| |
| kwargs = {} |
| for k in self._spawn_kwarg_names: |
| v = getattr(self, k) |
| if v is not None: |
| kwargs[k] = v |
| |
| kwargs["fd_pipes"] = fd_pipes |
| kwargs["returnpid"] = True |
| kwargs.pop("logfile", None) |
| |
| retval = self._spawn(self.args, **kwargs) |
| |
| os.close(slave_fd) |
| if null_input is not None: |
| null_input.close() |
| |
| if isinstance(retval, int): |
| # spawn failed |
| os.close(master_fd) |
| for f in self.files.values(): |
| f.close() |
| self.returncode = retval |
| self.wait() |
| return |
| |
| self.pid = retval[0] |
| portage.process.spawned_pids.remove(self.pid) |
| |
| self._reg_id = self.scheduler.register(files.process.fileno(), |
| PollConstants.POLLIN, output_handler) |
| self._registered = True |
| |
| def _pipe(self, fd_pipes): |
| """ |
| @type fd_pipes: dict |
| @param fd_pipes: pipes from which to copy terminal size if desired. |
| """ |
| return os.pipe() |
| |
| def _spawn(self, args, **kwargs): |
| return portage.process.spawn(args, **kwargs) |
| |
| def _output_handler(self, fd, event): |
| files = self._files |
| buf = array.array('B') |
| try: |
| buf.fromfile(files.process, self._bufsize) |
| except EOFError: |
| pass |
| if buf: |
| if not self.background: |
| buf.tofile(files.stdout) |
| files.stdout.flush() |
| buf.tofile(files.log) |
| files.log.flush() |
| else: |
| self._unregister() |
| self.wait() |
| return self._registered |
| |
| def _dummy_handler(self, fd, event): |
| """ |
| This method is mainly interested in detecting EOF, since |
| the only purpose of the pipe is to allow the scheduler to |
| monitor the process from inside a poll() loop. |
| """ |
| files = self._files |
| buf = array.array('B') |
| try: |
| buf.fromfile(files.process, self._bufsize) |
| except EOFError: |
| pass |
| if buf: |
| pass |
| else: |
| self._unregister() |
| self.wait() |
| return self._registered |
| |
| class MiscFunctionsProcess(SpawnProcess): |
| """ |
| Spawns misc-functions.sh with an existing ebuild environment. |
| """ |
| |
| __slots__ = ("commands", "phase", "pkg", "settings") |
| |
| def _start(self): |
| settings = self.settings |
| portage_bin_path = settings["PORTAGE_BIN_PATH"] |
| misc_sh_binary = os.path.join(portage_bin_path, |
| os.path.basename(portage.const.MISC_SH_BINARY)) |
| |
| self.args = [portage._shell_quote(misc_sh_binary)] + self.commands |
| self.logfile = settings.get("PORTAGE_LOG_FILE") |
| |
| portage._doebuild_exit_status_unlink( |
| settings.get("EBUILD_EXIT_STATUS_FILE")) |
| |
| SpawnProcess._start(self) |
| |
| def _spawn(self, args, **kwargs): |
| settings = self.settings |
| debug = settings.get("PORTAGE_DEBUG") == "1" |
| return portage.spawn(" ".join(args), settings, |
| debug=debug, **kwargs) |
| |
| def _set_returncode(self, wait_retval): |
| SpawnProcess._set_returncode(self, wait_retval) |
| self.returncode = portage._doebuild_exit_status_check_and_log( |
| self.settings, self.phase, self.returncode) |
| |
| class EbuildFetcher(SpawnProcess): |
| |
| __slots__ = ("fetchonly", "fetchall", "pkg",) |
| |
| def _start(self): |
| |
| root_config = self.pkg.root_config |
| portdb = root_config.trees["porttree"].dbapi |
| ebuild_path = portdb.findname(self.pkg.cpv) |
| settings = root_config.settings |
| phase = "fetch" |
| if self.fetchall: |
| phase = "fetchall" |
| |
| # If any incremental variables have been overridden |
| # via the environment, those values need to be passed |
| # along here so that they are correctly considered by |
| # the config instance in the subproccess. |
| fetch_env = os.environ.copy() |
| |
| fetch_env["PORTAGE_NICENESS"] = "0" |
| if self.fetchonly: |
| fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1" |
| |
| ebuild_binary = os.path.join( |
| settings["PORTAGE_BIN_PATH"], "ebuild") |
| |
| fetch_args = [ebuild_binary, ebuild_path, phase] |
| debug = settings.get("PORTAGE_DEBUG") == "1" |
| if debug: |
| fetch_args.append("--debug") |
| |
| self.args = fetch_args |
| self.env = fetch_env |
| SpawnProcess._start(self) |
| |
| class EbuildBuildDir(SlotObject): |
| |
| __slots__ = ("dir_path", "pkg", "settings", |
| "locked", "_catdir", "_lock_obj") |
| |
| def __init__(self, **kwargs): |
| SlotObject.__init__(self, **kwargs) |
| self.locked = False |
| |
| def lock(self): |
| """ |
| This raises an AlreadyLocked exception if lock() is called |
| while a lock is already held. In order to avoid this, call |
| unlock() or check whether the "locked" attribute is True |
| or False before calling lock(). |
| """ |
| if self._lock_obj is not None: |
| raise self.AlreadyLocked((self._lock_obj,)) |
| |
| dir_path = self.dir_path |
| if dir_path is None: |
| root_config = self.pkg.root_config |
| portdb = root_config.trees["porttree"].dbapi |
| ebuild_path = portdb.findname(self.pkg.cpv) |
| settings = self.settings |
| debug = settings.get("PORTAGE_DEBUG") == "1" |
| use_cache = 1 # always true |
| portage.doebuild_environment(ebuild_path, "setup", root_config.root, |
| self.settings, debug, use_cache, portdb) |
| dir_path = self.settings["PORTAGE_BUILDDIR"] |
| |
| catdir = os.path.dirname(dir_path) |
| self._catdir = catdir |
| |
| portage.util.ensure_dirs(os.path.dirname(catdir), |
| uid=portage.portage_uid, gid=portage.portage_gid, |
| mode=070, mask=0) |
| catdir_lock = None |
| try: |
| catdir_lock = portage.locks.lockdir(catdir) |
| portage.util.ensure_dirs(catdir, |
| gid=portage.portage_gid, |
| mode=070, mask=0) |
| self._lock_obj = portage.locks.lockdir(dir_path) |
| finally: |
| self.locked = self._lock_obj is not None |
| if catdir_lock is not None: |
| portage.locks.unlockdir(catdir_lock) |
| |
| def unlock(self): |
| if self._lock_obj is None: |
| return |
| |
| portage.locks.unlockdir(self._lock_obj) |
| self._lock_obj = None |
| self.locked = False |
| |
| catdir = self._catdir |
| catdir_lock = None |
| try: |
| catdir_lock = portage.locks.lockdir(catdir) |
| finally: |
| if catdir_lock: |
| try: |
| os.rmdir(catdir) |
| except OSError, e: |
| if e.errno not in (errno.ENOENT, |
| errno.ENOTEMPTY, errno.EEXIST): |
| raise |
| del e |
| portage.locks.unlockdir(catdir_lock) |
| |
| class AlreadyLocked(portage.exception.PortageException): |
| pass |
| |
| class EbuildBuild(CompositeTask): |
| |
| __slots__ = ("args_set", "background", "find_blockers", |
| "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count", |
| "prefetcher", "settings", "world_atom") + \ |
| ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree") |
| |
| def _start(self): |
| |
| logger = self.logger |
| opts = self.opts |
| pkg = self.pkg |
| settings = self.settings |
| world_atom = self.world_atom |
| root_config = pkg.root_config |
| tree = "porttree" |
| self._tree = tree |
| portdb = root_config.trees[tree].dbapi |
| settings["EMERGE_FROM"] = pkg.type_name |
| settings.backup_changes("EMERGE_FROM") |
| settings.reset() |
| ebuild_path = portdb.findname(self.pkg.cpv) |
| self._ebuild_path = ebuild_path |
| |
| prefetcher = self.prefetcher |
| if prefetcher is None: |
| pass |
| elif not prefetcher.isAlive(): |
| prefetcher.cancel() |
| elif prefetcher.poll() is None: |
| |
| waiting_msg = "Fetching files " + \ |
| "in the background. " + \ |
| "To view fetch progress, run `tail -f " + \ |
| "/var/log/emerge-fetch.log` in another " + \ |
| "terminal." |
| msg_prefix = colorize("GOOD", " * ") |
| from textwrap import wrap |
| waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \ |
| for line in wrap(waiting_msg, 65)) |
| if not self.background: |
| writemsg(waiting_msg, noiselevel=-1) |
| |
| self._current_task = prefetcher |
| prefetcher.addExitListener(self._prefetch_exit) |
| return |
| |
| self._prefetch_exit(prefetcher) |
| |
| def _prefetch_exit(self, prefetcher): |
| |
| opts = self.opts |
| pkg = self.pkg |
| settings = self.settings |
| |
| if opts.fetchonly and opts.pretend: |
| fetcher = EbuildFetchPretend( |
| fetch_all=opts.fetch_all_uri, |
| pkg=pkg, settings=settings) |
| retval = fetcher.execute() |
| self.returncode = retval |
| self.wait() |
| return |
| |
| fetch_log = None |
| if self.background: |
| fetch_log = self.scheduler.fetch.log_file |
| |
| fetcher = EbuildFetcher(fetchall=opts.fetch_all_uri, |
| fetchonly=opts.fetchonly, |
| background=self.background, logfile=fetch_log, |
| pkg=pkg, scheduler=self.scheduler) |
| |
| if self.background: |
| fetcher.addExitListener(self._fetch_exit) |
| self._current_task = fetcher |
| self.scheduler.fetch.schedule(fetcher) |
| else: |
| self._start_task(fetcher, self._fetch_exit) |
| |
| def _fetch_exit(self, fetcher): |
| |
| opts = self.opts |
| pkg = self.pkg |
| |
| if opts.fetchonly: |
| if self._final_exit(fetcher) != os.EX_OK: |
| if not self.background: |
| eerror("Fetch for %s failed, continuing..." % pkg.cpv, |
| phase="unpack", key=pkg.cpv) |
| self.wait() |
| return |
| |
| if self._default_exit(fetcher) != os.EX_OK: |
| self.wait() |
| return |
| |
| logger = self.logger |
| opts = self.opts |
| pkg_count = self.pkg_count |
| scheduler = self.scheduler |
| settings = self.settings |
| features = settings.features |
| ebuild_path = self._ebuild_path |
| system_set = pkg.root_config.sets["system"] |
| |
| self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings) |
| self._build_dir.lock() |
| |
| # Cleaning is triggered before the setup |
| # phase, in portage.doebuild(). |
| msg = " === (%s of %s) Cleaning (%s::%s)" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
| short_msg = "emerge: (%s of %s) %s Clean" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv) |
| logger.log(msg, short_msg=short_msg) |
| |
| #buildsyspkg: Check if we need to _force_ binary package creation |
| self._issyspkg = "buildsyspkg" in features and \ |
| system_set.findAtomForPackage(pkg) and \ |
| not opts.buildpkg |
| |
| if opts.buildpkg or self._issyspkg: |
| |
| self._buildpkg = True |
| |
| msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
| short_msg = "emerge: (%s of %s) %s Compile" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv) |
| logger.log(msg, short_msg=short_msg) |
| |
| else: |
| msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
| short_msg = "emerge: (%s of %s) %s Compile" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv) |
| logger.log(msg, short_msg=short_msg) |
| |
| build = EbuildExecuter(background=self.background, pkg=pkg, |
| scheduler=scheduler, settings=settings) |
| self._start_task(build, self._build_exit) |
| |
| def _unlock_builddir(self): |
| portage.elog.elog_process(self.pkg.cpv, self.settings) |
| self._build_dir.unlock() |
| |
| def _build_exit(self, build): |
| if self._default_exit(build) != os.EX_OK: |
| self._unlock_builddir() |
| self.wait() |
| return |
| |
| opts = self.opts |
| buildpkg = self._buildpkg |
| |
| if not buildpkg: |
| self._final_exit(build) |
| self.wait() |
| return |
| |
| if self._issyspkg: |
| msg = ">>> This is a system package, " + \ |
| "let's pack a rescue tarball.\n" |
| |
| log_path = self.settings.get("PORTAGE_LOG_FILE") |
| if log_path is not None: |
| log_file = open(log_path, 'a') |
| try: |
| log_file.write(msg) |
| finally: |
| log_file.close() |
| |
| if not self.background: |
| portage.writemsg_stdout(msg, noiselevel=-1) |
| |
| packager = EbuildBinpkg(background=self.background, pkg=self.pkg, |
| scheduler=self.scheduler, settings=self.settings) |
| |
| self._start_task(packager, self._buildpkg_exit) |
| |
| def _buildpkg_exit(self, packager): |
| """ |
| Released build dir lock when there is a failure or |
| when in buildpkgonly mode. Otherwise, the lock will |
| be released when merge() is called. |
| """ |
| |
| if self._default_exit(packager) == os.EX_OK and \ |
| self.opts.buildpkgonly: |
| # Need to call "clean" phase for buildpkgonly mode |
| phase = "clean" |
| clean_phase = EbuildPhase(background=self.background, |
| pkg=self.pkg, phase=phase, |
| scheduler=self.scheduler, settings=self.settings, |
| tree=self._tree) |
| self._start_task(clean_phase, self._clean_exit) |
| return |
| |
| if self._final_exit(packager) != os.EX_OK or \ |
| self.opts.buildpkgonly: |
| self._unlock_builddir() |
| self.wait() |
| |
| def _clean_exit(self, clean_phase): |
| if self._final_exit(clean_phase) != os.EX_OK or \ |
| self.opts.buildpkgonly: |
| self._unlock_builddir() |
| self.wait() |
| |
| def install(self): |
| """ |
| Install the package and then clean up and release locks. |
| Only call this after the build has completed successfully |
| and neither fetchonly nor buildpkgonly mode are enabled. |
| """ |
| |
| find_blockers = self.find_blockers |
| ldpath_mtimes = self.ldpath_mtimes |
| logger = self.logger |
| pkg = self.pkg |
| pkg_count = self.pkg_count |
| settings = self.settings |
| world_atom = self.world_atom |
| ebuild_path = self._ebuild_path |
| tree = self._tree |
| |
| merge = EbuildMerge(find_blockers=self.find_blockers, |
| ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg, |
| pkg_count=pkg_count, pkg_path=ebuild_path, |
| scheduler=self.scheduler, |
| settings=settings, tree=tree, world_atom=world_atom) |
| |
| msg = " === (%s of %s) Merging (%s::%s)" % \ |
| (pkg_count.curval, pkg_count.maxval, |
| pkg.cpv, ebuild_path) |
| short_msg = "emerge: (%s of %s) %s Merge" % \ |
| (pkg_count.curval, pkg_count.maxval, pkg.cpv) |
| logger.log(msg, short_msg=short_msg) |
| |
| try: |
| rval = merge.execute() |
| finally: |
| self._unlock_builddir() |
| |
| return rval |
| |
| class EbuildExecuter(CompositeTask): |
| |
| __slots__ = ("pkg", "scheduler", "settings") + ("_tree",) |
| |
| _phases = ("configure", "compile", "test", "install") |
| |
| _live_eclasses = frozenset([ |
| "cvs", |
| "darcs", |
| "git", |
| "mercurial", |
| "subversion" |
| ]) |
| |
| def _start(self): |
| self._tree = "porttree" |
| pkg = self.pkg |
| phase = "clean" |
| clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase, |
| scheduler=self.scheduler, settings=self.settings, tree=self._tree) |
| self._start_task(clean_phase, self._clean_phase_exit) |
| |
| def _clean_phase_exit(self, clean_phase): |
| |
| if self._default_exit(clean_phase) != os.EX_OK: |
| self.wait() |
| return |
| |
| pkg = self.pkg |
| scheduler = self.scheduler |
| settings = self.settings |
| cleanup = 1 |
| |
| # This initializes PORTAGE_LOG_FILE. |
| portage.prepare_build_dirs(pkg.root, settings, cleanup) |
| |
| setup_phase = EbuildPhase(background=self.background, |
| pkg=pkg, phase="setup", scheduler=scheduler, |
| settings=settings, tree=self._tree) |
| |
| setup_phase.addExitListener(self._setup_exit) |
| self._current_task = setup_phase |
| self.scheduler.scheduleSetup(setup_phase) |
| |
| def _setup_exit(self, setup_phase): |
| |
| if self._default_exit(setup_phase) != os.EX_OK: |
| self.wait() |
| return |
| |
| unpack_phase = EbuildPhase(background=self.background, |
| pkg=self.pkg, phase="unpack", scheduler=self.scheduler, |
| settings=self.settings, tree=self._tree) |
| |
| if self._live_eclasses.intersection(self.pkg.inherited): |
| # Serialize $DISTDIR access for live ebuilds since |
| # otherwise they can interfere with eachother. |
| |
| unpack_phase.addExitListener(self._unpack_exit) |
| self._current_task = unpack_phase |
| self.scheduler.scheduleUnpack(unpack_phase) |
| |
| else: |
| self._start_task(unpack_phase, self._unpack_exit) |
| |
| def _unpack_exit(self, unpack_phase): |
| |