| # portage.py -- core Portage functionality |
| # Copyright 1998-2004 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| # $Id$ |
| |
| |
| VERSION="$Rev$"[6:-2] + "-svn" |
| |
| # =========================================================================== |
| # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT |
| # =========================================================================== |
| |
| try: |
| import sys |
| except ImportError: |
| print "Failed to import sys! Something is _VERY_ wrong with python." |
| raise |
| |
| try: |
| import copy, errno, os, re, shutil, time, types |
| try: |
| import cPickle |
| except ImportError: |
| import pickle as cPickle |
| |
| import stat |
| import commands |
| from time import sleep |
| from random import shuffle |
| import UserDict |
| if getattr(__builtins__, "set", None) is None: |
| from sets import Set as set |
| from itertools import chain, izip |
| except ImportError, e: |
| sys.stderr.write("\n\n") |
| sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n") |
| sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n") |
| sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n") |
| |
| sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n") |
| sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n") |
| sys.stderr.write(" "+str(e)+"\n\n"); |
| raise |
| |
| try: |
| # XXX: This should get renamed to bsd_chflags, I think. |
| import chflags |
| bsd_chflags = chflags |
| except ImportError: |
| bsd_chflags = None |
| |
| try: |
| from cache.cache_errors import CacheError |
| import cvstree |
| import xpak |
| import getbinpkg |
| import portage_dep |
| from portage_dep import dep_getcpv, dep_getkey, get_operator, \ |
| isjustname, isspecific, isvalidatom, \ |
| match_from_list, match_to_list, best_match_to_list |
| |
| # XXX: This needs to get cleaned up. |
| import output |
| from output import bold, colorize, green, red, yellow |
| |
| import portage_const |
| from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \ |
| USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \ |
| PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \ |
| EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \ |
| MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \ |
| DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \ |
| INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\ |
| INCREMENTALS, EAPI, MISC_SH_BINARY |
| |
| from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \ |
| portage_uid, portage_gid, userpriv_groups |
| from portage_manifest import Manifest |
| |
| import portage_util |
| from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \ |
| dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \ |
| map_dictlist_vals, new_protect_filename, normalize_path, \ |
| pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \ |
| unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic |
| import portage_exception |
| import portage_gpg |
| import portage_locks |
| import portage_exec |
| from portage_exec import atexit_register, run_exitfuncs |
| from portage_locks import unlockfile,unlockdir,lockfile,lockdir |
| import portage_checksum |
| from portage_checksum import perform_md5,perform_checksum,prelink_capable |
| import eclass_cache |
| from portage_localization import _ |
| from portage_update import dep_transform, fixdbentries, grab_updates, \ |
| parse_updates, update_config_files, update_dbentries |
| |
| # Need these functions directly in portage namespace to not break every external tool in existence |
| from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \ |
| pkgsplit, vercmp, ververify |
| |
| # endversion and endversion_keys are for backward compatibility only. |
| from portage_versions import endversion_keys |
| from portage_versions import suffix_value as endversion |
| |
| except ImportError, e: |
| sys.stderr.write("\n\n") |
| sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n") |
| sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n") |
| sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n") |
| sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n") |
| sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n") |
| sys.stderr.write("!!! a recovery of portage.\n") |
| sys.stderr.write(" "+str(e)+"\n\n") |
| raise |
| |
| |
| try: |
| import portage_selinux as selinux |
| except OSError, e: |
| writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1) |
| del e |
| except ImportError: |
| pass |
| |
| # =========================================================================== |
| # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END |
| # =========================================================================== |
| |
| |
| def load_mod(name): |
| modname = ".".join(name.split(".")[:-1]) |
| mod = __import__(modname) |
| components = name.split('.') |
| for comp in components[1:]: |
| mod = getattr(mod, comp) |
| return mod |
| |
| def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1): |
| for x in key_order: |
| if top_dict.has_key(x) and top_dict[x].has_key(key): |
| if FullCopy: |
| return copy.deepcopy(top_dict[x][key]) |
| else: |
| return top_dict[x][key] |
| if EmptyOnError: |
| return "" |
| else: |
| raise KeyError, "Key not found in list; '%s'" % key |
| |
| def getcwd(): |
| "this fixes situations where the current directory doesn't exist" |
| try: |
| return os.getcwd() |
| except OSError: #dir doesn't exist |
| os.chdir("/") |
| return "/" |
| getcwd() |
| |
| def abssymlink(symlink): |
| "This reads symlinks, resolving the relative symlinks, and returning the absolute." |
| mylink=os.readlink(symlink) |
| if mylink[0] != '/': |
| mydir=os.path.dirname(symlink) |
| mylink=mydir+"/"+mylink |
| return os.path.normpath(mylink) |
| |
| dircache = {} |
| cacheHit=0 |
| cacheMiss=0 |
| cacheStale=0 |
| def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True): |
| global cacheHit,cacheMiss,cacheStale |
| mypath = normalize_path(my_original_path) |
| if dircache.has_key(mypath): |
| cacheHit += 1 |
| cached_mtime, list, ftype = dircache[mypath] |
| else: |
| cacheMiss += 1 |
| cached_mtime, list, ftype = -1, [], [] |
| try: |
| pathstat = os.stat(mypath) |
| if stat.S_ISDIR(pathstat[stat.ST_MODE]): |
| mtime = pathstat[stat.ST_MTIME] |
| else: |
| raise portage_exception.DirectoryNotFound(mypath) |
| except (IOError,OSError,portage_exception.PortageException): |
| if EmptyOnError: |
| return [], [] |
| return None, None |
| # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid |
| if mtime != cached_mtime or time.time() - mtime < 4: |
| if dircache.has_key(mypath): |
| cacheStale += 1 |
| list = os.listdir(mypath) |
| ftype = [] |
| for x in list: |
| try: |
| if followSymlinks: |
| pathstat = os.stat(mypath+"/"+x) |
| else: |
| pathstat = os.lstat(mypath+"/"+x) |
| |
| if stat.S_ISREG(pathstat[stat.ST_MODE]): |
| ftype.append(0) |
| elif stat.S_ISDIR(pathstat[stat.ST_MODE]): |
| ftype.append(1) |
| elif stat.S_ISLNK(pathstat[stat.ST_MODE]): |
| ftype.append(2) |
| else: |
| ftype.append(3) |
| except (IOError, OSError): |
| ftype.append(3) |
| dircache[mypath] = mtime, list, ftype |
| |
| ret_list = [] |
| ret_ftype = [] |
| for x in range(0, len(list)): |
| if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")): |
| ret_list.append(list[x]) |
| ret_ftype.append(ftype[x]) |
| elif (list[x] not in ignorelist): |
| ret_list.append(list[x]) |
| ret_ftype.append(ftype[x]) |
| |
| writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10) |
| return ret_list, ret_ftype |
| |
| def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True, |
| EmptyOnError=False, dirsonly=False): |
| """ |
| Portage-specific implementation of os.listdir |
| |
| @param mypath: Path whose contents you wish to list |
| @type mypath: String |
| @param recursive: Recursively scan directories contained within mypath |
| @type recursive: Boolean |
| @param filesonly; Only return files, not more directories |
| @type filesonly: Boolean |
| @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS') |
| @type ignorecvs: Boolean |
| @param ignorelist: List of filenames/directories to exclude |
| @type ignorelist: List |
| @param followSymlinks: Follow Symlink'd files and directories |
| @type followSymlinks: Boolean |
| @param EmptyOnError: Return [] if an error occurs. |
| @type EmptyOnError: Boolean |
| @param dirsonly: Only return directories. |
| @type dirsonly: Boolean |
| @rtype: List |
| @returns: A list of files and directories (or just files or just directories) or an empty list. |
| """ |
| |
| list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks) |
| |
| if list is None: |
| list=[] |
| if ftype is None: |
| ftype=[] |
| |
| if not (filesonly or dirsonly or recursive): |
| return list |
| |
| if recursive: |
| x=0 |
| while x<len(ftype): |
| if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')): |
| l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError, |
| followSymlinks) |
| |
| l=l[:] |
| for y in range(0,len(l)): |
| l[y]=list[x]+"/"+l[y] |
| list=list+l |
| ftype=ftype+f |
| x+=1 |
| if filesonly: |
| rlist=[] |
| for x in range(0,len(ftype)): |
| if ftype[x]==0: |
| rlist=rlist+[list[x]] |
| elif dirsonly: |
| rlist = [] |
| for x in range(0, len(ftype)): |
| if ftype[x] == 1: |
| rlist = rlist + [list[x]] |
| else: |
| rlist=list |
| |
| return rlist |
| |
| def flatten(mytokens): |
| """this function now turns a [1,[2,3]] list into |
| a [1,2,3] list and returns it.""" |
| newlist=[] |
| for x in mytokens: |
| if type(x)==types.ListType: |
| newlist.extend(flatten(x)) |
| else: |
| newlist.append(x) |
| return newlist |
| |
| #beautiful directed graph object |
| |
| class digraph: |
| def __init__(self): |
| """Create an empty digraph""" |
| |
| # { node : ( { child : priority } , { parent : priority } ) } |
| self.nodes = {} |
| self.order = [] |
| |
| def add(self, node, parent, priority=0): |
| """Adds the specified node with the specified parent. |
| |
| If the dep is a soft-dep and the node already has a hard |
| relationship to the parent, the relationship is left as hard.""" |
| |
| if node not in self.nodes: |
| self.nodes[node] = ({}, {}) |
| self.order.append(node) |
| |
| if not parent: |
| return |
| |
| if parent not in self.nodes: |
| self.nodes[parent] = ({}, {}) |
| self.order.append(parent) |
| |
| if parent in self.nodes[node][1]: |
| if priority > self.nodes[node][1][parent]: |
| self.nodes[node][1][parent] = priority |
| else: |
| self.nodes[node][1][parent] = priority |
| |
| if node in self.nodes[parent][0]: |
| if priority > self.nodes[parent][0][node]: |
| self.nodes[parent][0][node] = priority |
| else: |
| self.nodes[parent][0][node] = priority |
| |
| def remove(self, node): |
| """Removes the specified node from the digraph, also removing |
| and ties to other nodes in the digraph. Raises KeyError if the |
| node doesn't exist.""" |
| |
| if node not in self.nodes: |
| raise KeyError(node) |
| |
| for parent in self.nodes[node][1]: |
| del self.nodes[parent][0][node] |
| for child in self.nodes[node][0]: |
| del self.nodes[child][1][node] |
| |
| del self.nodes[node] |
| self.order.remove(node) |
| |
| def contains(self, node): |
| """Checks if the digraph contains mynode""" |
| return node in self.nodes |
| |
| def all_nodes(self): |
| """Return a list of all nodes in the graph""" |
| return self.order[:] |
| |
| def child_nodes(self, node, ignore_priority=None): |
| """Return all children of the specified node""" |
| if ignore_priority is None: |
| return self.nodes[node][0].keys() |
| children = [] |
| for child, priority in self.nodes[node][0].iteritems(): |
| if priority > ignore_priority: |
| children.append(child) |
| return children |
| |
| def parent_nodes(self, node): |
| """Return all parents of the specified node""" |
| return self.nodes[node][1].keys() |
| |
| def leaf_nodes(self, ignore_priority=None): |
| """Return all nodes that have no children |
| |
| If ignore_soft_deps is True, soft deps are not counted as |
| children in calculations.""" |
| |
| leaf_nodes = [] |
| for node in self.order: |
| is_leaf_node = True |
| for child in self.nodes[node][0]: |
| if self.nodes[node][0][child] > ignore_priority: |
| is_leaf_node = False |
| break |
| if is_leaf_node: |
| leaf_nodes.append(node) |
| return leaf_nodes |
| |
| def root_nodes(self, ignore_priority=None): |
| """Return all nodes that have no parents. |
| |
| If ignore_soft_deps is True, soft deps are not counted as |
| parents in calculations.""" |
| |
| root_nodes = [] |
| for node in self.order: |
| is_root_node = True |
| for parent in self.nodes[node][1]: |
| if self.nodes[node][1][parent] > ignore_priority: |
| is_root_node = False |
| break |
| if is_root_node: |
| root_nodes.append(node) |
| return root_nodes |
| |
| def is_empty(self): |
| """Checks if the digraph is empty""" |
| return len(self.nodes) == 0 |
| |
| def clone(self): |
| clone = digraph() |
| clone.nodes = copy.deepcopy(self.nodes) |
| clone.order = self.order[:] |
| return clone |
| |
| # Backward compatibility |
| addnode = add |
| allnodes = all_nodes |
| allzeros = leaf_nodes |
| hasnode = contains |
| empty = is_empty |
| copy = clone |
| |
| def delnode(self, node): |
| try: |
| self.remove(node) |
| except KeyError: |
| pass |
| |
| def firstzero(self): |
| leaf_nodes = self.leaf_nodes() |
| if leaf_nodes: |
| return leaf_nodes[0] |
| return None |
| |
| def hasallzeros(self, ignore_priority=None): |
| return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \ |
| len(self.order) |
| |
| def debug_print(self): |
| for node in self.nodes: |
| print node, |
| if self.nodes[node][0]: |
| print "depends on" |
| else: |
| print "(no children)" |
| for child in self.nodes[node][0]: |
| print " ",child, |
| print "(%s)" % self.nodes[node][0][child] |
| |
| |
| _elog_atexit_handlers = [] |
| def elog_process(cpv, mysettings): |
| mylogfiles = listdir(mysettings["T"]+"/logging/") |
| # shortcut for packages without any messages |
| if len(mylogfiles) == 0: |
| return |
| # exploit listdir() file order so we process log entries in chronological order |
| mylogfiles.reverse() |
| mylogentries = {} |
| my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split()) |
| for f in mylogfiles: |
| msgfunction, msgtype = f.split(".") |
| if msgtype.upper() not in my_elog_classes \ |
| and msgtype.lower() not in my_elog_classes: |
| continue |
| if msgfunction not in portage_const.EBUILD_PHASES: |
| writemsg("!!! can't process invalid log file: %s\n" % f, |
| noiselevel=-1) |
| continue |
| if not msgfunction in mylogentries: |
| mylogentries[msgfunction] = [] |
| msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines() |
| mylogentries[msgfunction].append((msgtype, msgcontent)) |
| |
| # in case the filters matched all messages |
| if len(mylogentries) == 0: |
| return |
| |
| # generate a single string with all log messages |
| fulllog = "" |
| for phase in portage_const.EBUILD_PHASES: |
| if not phase in mylogentries: |
| continue |
| for msgtype,msgcontent in mylogentries[phase]: |
| fulllog += "%s: %s\n" % (msgtype, phase) |
| for line in msgcontent: |
| fulllog += line |
| fulllog += "\n" |
| |
| # pass the processing to the individual modules |
| logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split() |
| for s in logsystems: |
| # - is nicer than _ for module names, so allow people to use it. |
| s = s.replace("-", "_") |
| try: |
| # FIXME: ugly ad.hoc import code |
| # TODO: implement a common portage module loader |
| logmodule = __import__("elog_modules.mod_"+s) |
| m = getattr(logmodule, "mod_"+s) |
| def timeout_handler(signum, frame): |
| raise portage_exception.PortageException( |
| "Timeout in elog_process for system '%s'" % s) |
| import signal |
| signal.signal(signal.SIGALRM, timeout_handler) |
| # Timeout after one minute (in case something like the mail |
| # module gets hung). |
| signal.alarm(60) |
| try: |
| m.process(mysettings, cpv, mylogentries, fulllog) |
| finally: |
| signal.alarm(0) |
| if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers: |
| _elog_atexit_handlers.append(m.finalize) |
| atexit_register(m.finalize, mysettings) |
| except (ImportError, AttributeError), e: |
| writemsg("!!! Error while importing logging modules " + \ |
| "while loading \"mod_%s\":\n" % str(s)) |
| writemsg("%s\n" % str(e), noiselevel=-1) |
| except portage_exception.PortageException, e: |
| writemsg("%s\n" % str(e), noiselevel=-1) |
| |
| # clean logfiles to avoid repetitions |
| for f in mylogfiles: |
| try: |
| os.unlink(os.path.join(mysettings["T"], "logging", f)) |
| except OSError: |
| pass |
| |
| #parse /etc/env.d and generate /etc/profile.env |
| |
| def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None): |
| if target_root is None: |
| global root |
| target_root = root |
| if prev_mtimes is None: |
| global mtimedb |
| prev_mtimes = mtimedb["ldpath"] |
| envd_dir = os.path.join(target_root, "etc", "env.d") |
| portage_util.ensure_dirs(envd_dir, mode=0755) |
| fns = listdir(envd_dir, EmptyOnError=1) |
| fns.sort() |
| templist = [] |
| for x in fns: |
| if len(x) < 3: |
| continue |
| if not x[0].isdigit() or not x[1].isdigit(): |
| continue |
| if x.startswith(".") or x.endswith("~") or x.endswith(".bak"): |
| continue |
| templist.append(x) |
| fns = templist |
| del templist |
| |
| space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"]) |
| colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", |
| "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", |
| "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK", |
| "PYTHONPATH", "ROOTPATH"]) |
| |
| config_list = [] |
| |
| for x in fns: |
| file_path = os.path.join(envd_dir, x) |
| try: |
| myconfig = getconfig(file_path, expand=False) |
| except portage_exception.ParseError, e: |
| writemsg("!!! '%s'\n" % str(e), noiselevel=-1) |
| del e |
| continue |
| if myconfig is None: |
| # broken symlink or file removed by a concurrent process |
| writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1) |
| continue |
| config_list.append(myconfig) |
| if "SPACE_SEPARATED" in myconfig: |
| space_separated.update(myconfig["SPACE_SEPARATED"].split()) |
| del myconfig["SPACE_SEPARATED"] |
| if "COLON_SEPARATED" in myconfig: |
| colon_separated.update(myconfig["COLON_SEPARATED"].split()) |
| del myconfig["COLON_SEPARATED"] |
| |
| env = {} |
| specials = {} |
| for var in space_separated: |
| mylist = [] |
| for myconfig in config_list: |
| if var in myconfig: |
| mylist.extend(filter(None, myconfig[var].split())) |
| del myconfig[var] # prepare for env.update(myconfig) |
| if mylist: |
| env[var] = " ".join(mylist) |
| specials[var] = mylist |
| |
| for var in colon_separated: |
| mylist = [] |
| for myconfig in config_list: |
| if var in myconfig: |
| mylist.extend(filter(None, myconfig[var].split(":"))) |
| del myconfig[var] # prepare for env.update(myconfig) |
| if mylist: |
| env[var] = ":".join(mylist) |
| specials[var] = mylist |
| |
| for myconfig in config_list: |
| """Cumulative variables have already been deleted from myconfig so that |
| they won't be overwritten by this dict.update call.""" |
| env.update(myconfig) |
| |
| ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf") |
| try: |
| myld = open(ldsoconf_path) |
| myldlines=myld.readlines() |
| myld.close() |
| oldld=[] |
| for x in myldlines: |
| #each line has at least one char (a newline) |
| if x[0]=="#": |
| continue |
| oldld.append(x[:-1]) |
| except (IOError, OSError), e: |
| if e.errno != errno.ENOENT: |
| raise |
| oldld = None |
| |
| ld_cache_update=False |
| |
| newld = specials["LDPATH"] |
| if (oldld!=newld): |
| #ld.so.conf needs updating and ldconfig needs to be run |
| myfd = atomic_ofstream(ldsoconf_path) |
| myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n") |
| myfd.write("# contents of /etc/env.d directory\n") |
| for x in specials["LDPATH"]: |
| myfd.write(x+"\n") |
| myfd.close() |
| ld_cache_update=True |
| |
| # Update prelink.conf if we are prelink-enabled |
| if prelink_capable: |
| newprelink = atomic_ofstream( |
| os.path.join(target_root, "etc", "prelink.conf")) |
| newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n") |
| newprelink.write("# contents of /etc/env.d directory\n") |
| |
| for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]: |
| newprelink.write("-l "+x+"\n"); |
| for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]: |
| if not x: |
| continue |
| if x[-1]!='/': |
| x=x+"/" |
| plmasked=0 |
| for y in specials["PRELINK_PATH_MASK"]: |
| if not y: |
| continue |
| if y[-1]!='/': |
| y=y+"/" |
| if y==x[0:len(y)]: |
| plmasked=1 |
| break |
| if not plmasked: |
| newprelink.write("-h "+x+"\n") |
| for x in specials["PRELINK_PATH_MASK"]: |
| newprelink.write("-b "+x+"\n") |
| newprelink.close() |
| |
| mtime_changed = False |
| lib_dirs = set() |
| for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']): |
| x = os.path.join(target_root, lib_dir.lstrip(os.sep)) |
| try: |
| newldpathtime = os.stat(x)[stat.ST_MTIME] |
| lib_dirs.add(normalize_path(x)) |
| except OSError, oe: |
| if oe.errno == errno.ENOENT: |
| try: |
| del prev_mtimes[x] |
| except KeyError: |
| pass |
| # ignore this path because it doesn't exist |
| continue |
| raise |
| if x in prev_mtimes: |
| if prev_mtimes[x] == newldpathtime: |
| pass |
| else: |
| prev_mtimes[x] = newldpathtime |
| mtime_changed = True |
| else: |
| prev_mtimes[x] = newldpathtime |
| mtime_changed = True |
| |
| if mtime_changed: |
| ld_cache_update = True |
| |
| if makelinks and \ |
| not ld_cache_update and \ |
| contents is not None: |
| libdir_contents_changed = False |
| for mypath, mydata in contents.iteritems(): |
| if mydata[0] not in ("obj","sym"): |
| continue |
| head, tail = os.path.split(mypath) |
| if head in lib_dirs: |
| libdir_contents_changed = True |
| break |
| if not libdir_contents_changed: |
| makelinks = False |
| |
| # Only run ldconfig as needed |
| if (ld_cache_update or makelinks): |
| # ldconfig has very different behaviour between FreeBSD and Linux |
| if ostype=="Linux" or ostype.lower().endswith("gnu"): |
| # We can't update links if we haven't cleaned other versions first, as |
| # an older package installed ON TOP of a newer version will cause ldconfig |
| # to overwrite the symlinks we just made. -X means no links. After 'clean' |
| # we can safely create links. |
| writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root) |
| if makelinks: |
| commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root) |
| else: |
| commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root) |
| elif ostype in ("FreeBSD","DragonFly"): |
| writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root) |
| commands.getstatusoutput( |
| "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \ |
| (target_root, target_root)) |
| |
| del specials["LDPATH"] |
| |
| penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n" |
| penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n" |
| cenvnotice = penvnotice[:] |
| penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n" |
| cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n" |
| |
| #create /etc/profile.env for bash support |
| outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env")) |
| outfile.write(penvnotice) |
| |
| env_keys = [ x for x in env if x != "LDPATH" ] |
| env_keys.sort() |
| for x in env_keys: |
| outfile.write("export %s='%s'\n" % (x, env[x])) |
| outfile.close() |
| |
| #create /etc/csh.env for (t)csh support |
| outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env")) |
| outfile.write(cenvnotice) |
| for x in env_keys: |
| outfile.write("setenv %s '%s'\n" % (x, env[x])) |
| outfile.close() |
| |
| def ExtractKernelVersion(base_dir): |
| """ |
| Try to figure out what kernel version we are running |
| @param base_dir: Path to sources (usually /usr/src/linux) |
| @type base_dir: string |
| @rtype: tuple( version[string], error[string]) |
| @returns: |
| 1. tuple( version[string], error[string]) |
| Either version or error is populated (but never both) |
| |
| """ |
| lines = [] |
| pathname = os.path.join(base_dir, 'Makefile') |
| try: |
| f = open(pathname, 'r') |
| except OSError, details: |
| return (None, str(details)) |
| except IOError, details: |
| return (None, str(details)) |
| |
| try: |
| for i in range(4): |
| lines.append(f.readline()) |
| except OSError, details: |
| return (None, str(details)) |
| except IOError, details: |
| return (None, str(details)) |
| |
| lines = [l.strip() for l in lines] |
| |
| version = '' |
| |
| #XXX: The following code relies on the ordering of vars within the Makefile |
| for line in lines: |
| # split on the '=' then remove annoying whitespace |
| items = line.split("=") |
| items = [i.strip() for i in items] |
| if items[0] == 'VERSION' or \ |
| items[0] == 'PATCHLEVEL': |
| version += items[1] |
| version += "." |
| elif items[0] == 'SUBLEVEL': |
| version += items[1] |
| elif items[0] == 'EXTRAVERSION' and \ |
| items[-1] != items[0]: |
| version += items[1] |
| |
| # Grab a list of files named localversion* and sort them |
| localversions = os.listdir(base_dir) |
| for x in range(len(localversions)-1,-1,-1): |
| if localversions[x][:12] != "localversion": |
| del localversions[x] |
| localversions.sort() |
| |
| # Append the contents of each to the version string, stripping ALL whitespace |
| for lv in localversions: |
| version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() ) |
| |
| # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace |
| kernelconfig = getconfig(base_dir+"/.config") |
| if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"): |
| version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split()) |
| |
| return (version,None) |
| |
| def autouse(myvartree, use_cache=1, mysettings=None): |
| """ |
| autuse returns a list of USE variables auto-enabled to packages being installed |
| |
| @param myvartree: Instance of the vartree class (from /var/db/pkg...) |
| @type myvartree: vartree |
| @param use_cache: read values from cache |
| @type use_cache: Boolean |
| @param mysettings: Instance of config |
| @type mysettings: config |
| @rtype: string |
| @returns: A string containing a list of USE variables that are enabled via use.defaults |
| """ |
| if mysettings is None: |
| global settings |
| mysettings = settings |
| if mysettings.profile_path is None: |
| return "" |
| myusevars="" |
| usedefaults = mysettings.use_defs |
| for myuse in usedefaults: |
| dep_met = True |
| for mydep in usedefaults[myuse]: |
| if not myvartree.dep_match(mydep,use_cache=True): |
| dep_met = False |
| break |
| if dep_met: |
| myusevars += " "+myuse |
| return myusevars |
| |
| def check_config_instance(test): |
| if not test or (str(test.__class__) != 'portage.config'): |
| raise TypeError, "Invalid type for config object: %s" % test.__class__ |
| |
| class config: |
| """ |
| This class encompasses the main portage configuration. Data is pulled from |
| ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all |
| parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified |
| overrides. |
| |
| Generally if you need data like USE flags, FEATURES, environment variables, |
| virtuals ...etc you look in here. |
| """ |
| |
| def __init__(self, clone=None, mycpv=None, config_profile_path=None, |
| config_incrementals=None, config_root=None, target_root=None, |
| local_config=True): |
| """ |
| @param clone: If provided, init will use deepcopy to copy by value the instance. |
| @type clone: Instance of config class. |
| @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None |
| and then calling instance.setcpv(mycpv). |
| @type mycpv: String |
| @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const) |
| @type config_profile_path: String |
| @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS) |
| @type config_incrementals: List |
| @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT) |
| @type config_root: String |
| @param target_root: __init__ override of $ROOT env variable. |
| @type target_root: String |
| @param local_config: Enables loading of local config (/etc/portage); used most by repoman to |
| ignore local config (keywording and unmasking) |
| @type local_config: Boolean |
| """ |
| |
| debug = os.environ.get("PORTAGE_DEBUG") == "1" |
| |
| self.already_in_regenerate = 0 |
| |
| self.locked = 0 |
| self.mycpv = None |
| self.puse = [] |
| self.modifiedkeys = [] |
| self.uvlist = [] |
| |
| self.virtuals = {} |
| self.virts_p = {} |
| self.dirVirtuals = None |
| self.v_count = 0 |
| |
| # Virtuals obtained from the vartree |
| self.treeVirtuals = {} |
| # Virtuals by user specification. Includes negatives. |
| self.userVirtuals = {} |
| # Virtual negatives from user specifications. |
| self.negVirtuals = {} |
| |
| self.user_profile_dir = None |
| self.local_config = local_config |
| |
| if clone: |
| self.incrementals = copy.deepcopy(clone.incrementals) |
| self.profile_path = copy.deepcopy(clone.profile_path) |
| self.user_profile_dir = copy.deepcopy(clone.user_profile_dir) |
| self.local_config = copy.deepcopy(clone.local_config) |
| |
| self.module_priority = copy.deepcopy(clone.module_priority) |
| self.modules = copy.deepcopy(clone.modules) |
| |
| self.depcachedir = copy.deepcopy(clone.depcachedir) |
| |
| self.packages = copy.deepcopy(clone.packages) |
| self.virtuals = copy.deepcopy(clone.virtuals) |
| |
| self.treeVirtuals = copy.deepcopy(clone.treeVirtuals) |
| self.userVirtuals = copy.deepcopy(clone.userVirtuals) |
| self.negVirtuals = copy.deepcopy(clone.negVirtuals) |
| |
| self.use_defs = copy.deepcopy(clone.use_defs) |
| self.usemask = copy.deepcopy(clone.usemask) |
| self.usemask_list = copy.deepcopy(clone.usemask_list) |
| self.pusemask_list = copy.deepcopy(clone.pusemask_list) |
| self.useforce = copy.deepcopy(clone.useforce) |
| self.useforce_list = copy.deepcopy(clone.useforce_list) |
| self.puseforce_list = copy.deepcopy(clone.puseforce_list) |
| self.puse = copy.deepcopy(clone.puse) |
| self.make_defaults_use = copy.deepcopy(clone.make_defaults_use) |
| self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse) |
| self.mycpv = copy.deepcopy(clone.mycpv) |
| |
| self.configlist = copy.deepcopy(clone.configlist) |
| self.lookuplist = self.configlist[:] |
| self.lookuplist.reverse() |
| self.configdict = { |
| "env.d": self.configlist[0], |
| "pkginternal": self.configlist[1], |
| "globals": self.configlist[2], |
| "defaults": self.configlist[3], |
| "conf": self.configlist[4], |
| "pkg": self.configlist[5], |
| "auto": self.configlist[6], |
| "backupenv": self.configlist[7], |
| "env": self.configlist[8] } |
| self.profiles = copy.deepcopy(clone.profiles) |
| self.backupenv = self.configdict["backupenv"] |
| self.pusedict = copy.deepcopy(clone.pusedict) |
| self.categories = copy.deepcopy(clone.categories) |
| self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict) |
| self.pmaskdict = copy.deepcopy(clone.pmaskdict) |
| self.punmaskdict = copy.deepcopy(clone.punmaskdict) |
| self.prevmaskdict = copy.deepcopy(clone.prevmaskdict) |
| self.pprovideddict = copy.deepcopy(clone.pprovideddict) |
| self.dirVirtuals = copy.deepcopy(clone.dirVirtuals) |
| self.treeVirtuals = copy.deepcopy(clone.treeVirtuals) |
| self.features = copy.deepcopy(clone.features) |
| else: |
| |
| # backupenv is for calculated incremental variables. |
| self.backupenv = os.environ.copy() |
| |
| def check_var_directory(varname, var): |
| if not os.path.isdir(var): |
| writemsg(("!!! Error: %s='%s' is not a directory. " + \ |
| "Please correct this.\n") % (varname, var), |
| noiselevel=-1) |
| raise portage_exception.DirectoryNotFound(var) |
| |
| if config_root is None: |
| config_root = "/" |
| |
| config_root = \ |
| normalize_path(config_root).rstrip(os.path.sep) + os.path.sep |
| |
| check_var_directory("PORTAGE_CONFIGROOT", config_root) |
| |
| self.depcachedir = DEPCACHE_PATH |
| |
| if not config_profile_path: |
| config_profile_path = \ |
| os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep)) |
| if os.path.isdir(config_profile_path): |
| self.profile_path = config_profile_path |
| else: |
| self.profile_path = None |
| else: |
| self.profile_path = config_profile_path[:] |
| |
| if not config_incrementals: |
| writemsg("incrementals not specified to class config\n") |
| self.incrementals = copy.deepcopy(portage_const.INCREMENTALS) |
| else: |
| self.incrementals = copy.deepcopy(config_incrementals) |
| |
| self.module_priority = ["user","default"] |
| self.modules = {} |
| self.modules["user"] = getconfig( |
| os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep))) |
| if self.modules["user"] is None: |
| self.modules["user"] = {} |
| self.modules["default"] = { |
| "portdbapi.metadbmodule": "cache.metadata.database", |
| "portdbapi.auxdbmodule": "cache.flat_hash.database", |
| } |
| |
| self.usemask=[] |
| self.configlist=[] |
| |
| # back up our incremental variables: |
| self.configdict={} |
| # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ] |
| self.configlist.append({}) |
| self.configdict["env.d"] = self.configlist[-1] |
| |
| self.configlist.append({}) |
| self.configdict["pkginternal"] = self.configlist[-1] |
| |
| # The symlink might not exist or might not be a symlink. |
| if self.profile_path is None: |
| self.profiles = [] |
| else: |
| self.profiles = [] |
| def addProfile(currentPath): |
| parentsFile = os.path.join(currentPath, "parent") |
| if os.path.exists(parentsFile): |
| parents = grabfile(parentsFile) |
| if not parents: |
| raise portage_exception.ParseError( |
| "Empty parent file: '%s'" % parents_file) |
| for parentPath in parents: |
| parentPath = normalize_path(os.path.join( |
| currentPath, parentPath)) |
| if os.path.exists(parentPath): |
| addProfile(parentPath) |
| else: |
| raise portage_exception.ParseError( |
| "Parent '%s' not found: '%s'" % \ |
| (parentPath, parentsFile)) |
| self.profiles.append(currentPath) |
| addProfile(os.path.realpath(self.profile_path)) |
| if local_config: |
| custom_prof = os.path.join( |
| config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep)) |
| if os.path.exists(custom_prof): |
| self.user_profile_dir = custom_prof |
| self.profiles.append(custom_prof) |
| del custom_prof |
| |
| self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles] |
| self.packages = stack_lists(self.packages_list, incremental=1) |
| del self.packages_list |
| #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1) |
| |
| # revmaskdict |
| self.prevmaskdict={} |
| for x in self.packages: |
| mycatpkg=dep_getkey(x) |
| if not self.prevmaskdict.has_key(mycatpkg): |
| self.prevmaskdict[mycatpkg]=[x] |
| else: |
| self.prevmaskdict[mycatpkg].append(x) |
| |
| # get profile-masked use flags -- INCREMENTAL Child over parent |
| self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \ |
| for x in self.profiles] |
| self.usemask = set(stack_lists( |
| self.usemask_list, incremental=True)) |
| use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles] |
| self.use_defs = stack_dictlist(use_defs_lists, incremental=True) |
| del use_defs_lists |
| |
| self.pusemask_list = [] |
| rawpusemask = [grabdict_package( |
| os.path.join(x, "package.use.mask")) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawpusemask[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.pusemask_list.append(cpdict) |
| del rawpusemask |
| |
| self.pkgprofileuse = [] |
| rawprofileuse = [grabdict_package( |
| os.path.join(x, "package.use"), juststrings=True) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawprofileuse[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.pkgprofileuse.append(cpdict) |
| del rawprofileuse |
| |
| self.useforce_list = [grabfile(os.path.join(x, "use.force")) \ |
| for x in self.profiles] |
| self.useforce = set(stack_lists( |
| self.useforce_list, incremental=True)) |
| |
| self.puseforce_list = [] |
| rawpuseforce = [grabdict_package( |
| os.path.join(x, "package.use.force")) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawpuseforce[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.puseforce_list.append(cpdict) |
| del rawpuseforce |
| |
| try: |
| self.mygcfg = getconfig(os.path.join(config_root, "etc", "make.globals")) |
| |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| except SystemExit, e: |
| raise |
| except Exception, e: |
| if debug: |
| raise |
| writemsg("!!! %s\n" % (e), noiselevel=-1) |
| if not isinstance(e, EnvironmentError): |
| writemsg("!!! Incorrect multiline literals can cause " + \ |
| "this. Do not use them.\n", noiselevel=-1) |
| sys.exit(1) |
| self.configlist.append(self.mygcfg) |
| self.configdict["globals"]=self.configlist[-1] |
| |
| self.make_defaults_use = [] |
| self.mygcfg = {} |
| if self.profiles: |
| try: |
| mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles] |
| for cfg in mygcfg_dlists: |
| if cfg: |
| self.make_defaults_use.append(cfg.get("USE", "")) |
| else: |
| self.make_defaults_use.append("") |
| self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1) |
| #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig) |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| except SystemExit, e: |
| raise |
| except Exception, e: |
| if debug: |
| raise |
| writemsg("!!! %s\n" % (e), noiselevel=-1) |
| if not isinstance(e, EnvironmentError): |
| writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \ |
| "emerge sync' may fix this. If it does\n", |
| noiselevel=-1) |
| writemsg("!!! not then please report this to " + \ |
| "bugs.gentoo.org and, if possible, a dev\n", |
| noiselevel=-1) |
| writemsg("!!! on #gentoo (irc.freenode.org)\n", |
| noiselevel=-1) |
| sys.exit(1) |
| self.configlist.append(self.mygcfg) |
| self.configdict["defaults"]=self.configlist[-1] |
| |
| try: |
| self.mygcfg = getconfig( |
| os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)), |
| allow_sourcing=True) |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| except SystemExit, e: |
| raise |
| except Exception, e: |
| if debug: |
| raise |
| writemsg("!!! %s\n" % (e), noiselevel=-1) |
| if not isinstance(e, EnvironmentError): |
| writemsg("!!! Incorrect multiline literals can cause " + \ |
| "this. Do not use them.\n", noiselevel=-1) |
| sys.exit(1) |
| |
| # Allow ROOT setting to come from make.conf if it's not overridden |
| # by the constructor argument (from the calling environment). As a |
| # special exception for a very common use case, config_root == "/" |
| # implies that ROOT in make.conf should be ignored. That way, the |
| # user can chroot into $ROOT and the ROOT setting in make.conf will |
| # be automatically ignored (unless config_root is other than "/"). |
| if config_root != "/" and \ |
| target_root is None and "ROOT" in self.mygcfg: |
| target_root = self.mygcfg["ROOT"] |
| |
| self.configlist.append(self.mygcfg) |
| self.configdict["conf"]=self.configlist[-1] |
| |
| self.configlist.append({}) |
| self.configdict["pkg"]=self.configlist[-1] |
| |
| #auto-use: |
| self.configlist.append({}) |
| self.configdict["auto"]=self.configlist[-1] |
| |
| self.configlist.append(self.backupenv) # XXX Why though? |
| self.configdict["backupenv"]=self.configlist[-1] |
| |
| self.configlist.append(os.environ.copy()) |
| self.configdict["env"]=self.configlist[-1] |
| |
| |
| # make lookuplist for loading package.* |
| self.lookuplist=self.configlist[:] |
| self.lookuplist.reverse() |
| |
| # Blacklist vars that could interfere with portage internals. |
| for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]: |
| for cfg in self.lookuplist: |
| try: |
| del cfg[blacklisted] |
| except KeyError: |
| pass |
| del blacklisted, cfg |
| |
| if target_root is None: |
| target_root = "/" |
| |
| target_root = \ |
| normalize_path(target_root).rstrip(os.path.sep) + os.path.sep |
| |
| check_var_directory("ROOT", target_root) |
| |
| env_d = getconfig( |
| os.path.join(target_root, "etc", "profile.env"), expand=False) |
| # env_d will be None if profile.env doesn't exist. |
| if env_d: |
| self.configdict["env.d"].update(env_d) |
| # Remove duplicate values so they don't override updated |
| # profile.env values later (profile.env is reloaded in each |
| # call to self.regenerate). |
| for cfg in (self.configdict["backupenv"], |
| self.configdict["env"]): |
| for k, v in env_d.iteritems(): |
| try: |
| if cfg[k] == v: |
| del cfg[k] |
| except KeyError: |
| pass |
| del cfg, k, v |
| |
| self["PORTAGE_CONFIGROOT"] = config_root |
| self.backup_changes("PORTAGE_CONFIGROOT") |
| self["ROOT"] = target_root |
| self.backup_changes("ROOT") |
| |
| self.pusedict = {} |
| self.pkeywordsdict = {} |
| self.punmaskdict = {} |
| abs_user_config = os.path.join(config_root, |
| USER_CONFIG_PATH.lstrip(os.path.sep)) |
| |
| # locations for "categories" and "arch.list" files |
| locations = [os.path.join(self["PORTDIR"], "profiles")] |
| pmask_locations = [os.path.join(self["PORTDIR"], "profiles")] |
| pmask_locations.extend(self.profiles) |
| |
| """ repoman controls PORTDIR_OVERLAY via the environment, so no |
| special cases are needed here.""" |
| overlay_profiles = [] |
| for ov in self["PORTDIR_OVERLAY"].split(): |
| ov = normalize_path(ov) |
| profiles_dir = os.path.join(ov, "profiles") |
| if os.path.isdir(profiles_dir): |
| overlay_profiles.append(profiles_dir) |
| locations += overlay_profiles |
| |
| pmask_locations.extend(overlay_profiles) |
| |
| if local_config: |
| locations.append(abs_user_config) |
| pmask_locations.append(abs_user_config) |
| pusedict = grabdict_package( |
| os.path.join(abs_user_config, "package.use"), recursive=1) |
| for key in pusedict.keys(): |
| cp = dep_getkey(key) |
| if not self.pusedict.has_key(cp): |
| self.pusedict[cp] = {} |
| self.pusedict[cp][key] = pusedict[key] |
| |
| #package.keywords |
| pkgdict = grabdict_package( |
| os.path.join(abs_user_config, "package.keywords"), |
| recursive=1) |
| for key in pkgdict.keys(): |
| # default to ~arch if no specific keyword is given |
| if not pkgdict[key]: |
| mykeywordlist = [] |
| if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"): |
| groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split() |
| else: |
| groups = [] |
| for keyword in groups: |
| if not keyword[0] in "~-": |
| mykeywordlist.append("~"+keyword) |
| pkgdict[key] = mykeywordlist |
| cp = dep_getkey(key) |
| if not self.pkeywordsdict.has_key(cp): |
| self.pkeywordsdict[cp] = {} |
| self.pkeywordsdict[cp][key] = pkgdict[key] |
| |
| #package.unmask |
| pkgunmasklines = grabfile_package( |
| os.path.join(abs_user_config, "package.unmask"), |
| recursive=1) |
| for x in pkgunmasklines: |
| mycatpkg=dep_getkey(x) |
| if self.punmaskdict.has_key(mycatpkg): |
| self.punmaskdict[mycatpkg].append(x) |
| else: |
| self.punmaskdict[mycatpkg]=[x] |
| |
| #getting categories from an external file now |
| categories = [grabfile(os.path.join(x, "categories")) for x in locations] |
| self.categories = stack_lists(categories, incremental=1) |
| del categories |
| |
| archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations] |
| archlist = stack_lists(archlist, incremental=1) |
| self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist) |
| |
| #package.mask |
| pkgmasklines = [] |
| for x in pmask_locations: |
| pkgmasklines.append(grabfile_package( |
| os.path.join(x, "package.mask"), recursive=1)) |
| pkgmasklines = stack_lists(pkgmasklines, incremental=1) |
| |
| self.pmaskdict = {} |
| for x in pkgmasklines: |
| mycatpkg=dep_getkey(x) |
| if self.pmaskdict.has_key(mycatpkg): |
| self.pmaskdict[mycatpkg].append(x) |
| else: |
| self.pmaskdict[mycatpkg]=[x] |
| |
| pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles] |
| pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1) |
| has_invalid_data = False |
| for x in range(len(pkgprovidedlines)-1, -1, -1): |
| myline = pkgprovidedlines[x] |
| if not isvalidatom("=" + myline): |
| writemsg("Invalid package name in package.provided:" + \ |
| " %s\n" % myline, noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| cpvr = catpkgsplit(pkgprovidedlines[x]) |
| if not cpvr or cpvr[0] == "null": |
| writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n", |
| noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| if cpvr[0] == "virtual": |
| writemsg("Virtual package in package.provided: %s\n" % \ |
| myline, noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| if has_invalid_data: |
| writemsg("See portage(5) for correct package.provided usage.\n", |
| noiselevel=-1) |
| self.pprovideddict = {} |
| for x in pkgprovidedlines: |
| cpv=catpkgsplit(x) |
| if not x: |
| continue |
| mycatpkg=dep_getkey(x) |
| if self.pprovideddict.has_key(mycatpkg): |
| self.pprovideddict[mycatpkg].append(x) |
| else: |
| self.pprovideddict[mycatpkg]=[x] |
| |
| # reasonable defaults; this is important as without USE_ORDER, |
| # USE will always be "" (nothing set)! |
| if "USE_ORDER" not in self: |
| self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal" |
| |
| self["PORTAGE_GID"] = str(portage_gid) |
| self.backup_changes("PORTAGE_GID") |
| |
| if self.get("PORTAGE_DEPCACHEDIR", None): |
| self.depcachedir = self["PORTAGE_DEPCACHEDIR"] |
| self["PORTAGE_DEPCACHEDIR"] = self.depcachedir |
| self.backup_changes("PORTAGE_DEPCACHEDIR") |
| |
| overlays = self.get("PORTDIR_OVERLAY","").split() |
| if overlays: |
| new_ov = [] |
| for ov in overlays: |
| ov = normalize_path(ov) |
| if os.path.isdir(ov): |
| new_ov.append(ov) |
| else: |
| writemsg("!!! Invalid PORTDIR_OVERLAY" + \ |
| " (not a dir): '%s'\n" % ov, noiselevel=-1) |
| self["PORTDIR_OVERLAY"] = " ".join(new_ov) |
| self.backup_changes("PORTDIR_OVERLAY") |
| |
| if "CBUILD" not in self and "CHOST" in self: |
| self["CBUILD"] = self["CHOST"] |
| self.backup_changes("CBUILD") |
| |
| self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH |
| self.backup_changes("PORTAGE_BIN_PATH") |
| self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH |
| self.backup_changes("PORTAGE_PYM_PATH") |
| |
| for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"): |
| try: |
| self[var] = str(int(self.get(var, "0"))) |
| except ValueError: |
| writemsg(("!!! %s='%s' is not a valid integer. " + \ |
| "Falling back to '0'.\n") % (var, self[var]), |
| noiselevel=-1) |
| self[var] = "0" |
| self.backup_changes(var) |
| |
| self.regenerate() |
| self.features = portage_util.unique_array(self["FEATURES"].split()) |
| |
| if "gpg" in self.features: |
| if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \ |
| not os.path.isdir(self["PORTAGE_GPG_DIR"]): |
| writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \ |
| " Removing gpg from FEATURES.\n"), noiselevel=-1) |
| self.features.remove("gpg") |
| |
| if not portage_exec.sandbox_capable and \ |
| ("sandbox" in self.features or "usersandbox" in self.features): |
| if self.profile_path is not None and \ |
| os.path.realpath(self.profile_path) == \ |
| os.path.realpath(PROFILE_PATH): |
| """ Don't show this warning when running repoman and the |
| sandbox feature came from a profile that doesn't belong to |
| the user.""" |
| writemsg(colorize("BAD", "!!! Problem with sandbox" + \ |
| " binary. Disabling...\n\n"), noiselevel=-1) |
| if "sandbox" in self.features: |
| self.features.remove("sandbox") |
| if "usersandbox" in self.features: |
| self.features.remove("usersandbox") |
| |
| self.features.sort() |
| self["FEATURES"] = " ".join(self.features) |
| self.backup_changes("FEATURES") |
| |
| self._init_dirs() |
| |
| if mycpv: |
| self.setcpv(mycpv) |
| |
| def _init_dirs(self): |
| """ |
| Create a few directories that are critical to portage operation |
| """ |
| if not os.access(self["ROOT"], os.W_OK): |
| return |
| |
| dir_mode_map = { |
| "tmp" :(-1, 01777, 0), |
| "var/tmp" :(-1, 01777, 0), |
| "var/lib/portage" :(portage_gid, 02750, 02), |
| "var/cache/edb" :(portage_gid, 0755, 02) |
| } |
| |
| for mypath, (gid, mode, modemask) in dir_mode_map.iteritems(): |
| try: |
| mydir = os.path.join(self["ROOT"], mypath) |
| portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask) |
| except portage_exception.PortageException, e: |
| writemsg("!!! Directory initialization failed: '%s'\n" % mydir, |
| noiselevel=-1) |
| writemsg("!!! %s\n" % str(e), |
| noiselevel=-1) |
| |
| def validate(self): |
| """Validate miscellaneous settings and display warnings if necessary. |
| (This code was previously in the global scope of portage.py)""" |
| |
| groups = self["ACCEPT_KEYWORDS"].split() |
| archlist = self.archlist() |
| if not archlist: |
| writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n") |
| else: |
| for group in groups: |
| if group not in archlist and group[0] != '-': |
| writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group), |
| noiselevel=-1) |
| |
| abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"], |
| PROFILE_PATH.lstrip(os.path.sep)) |
| if not os.path.islink(abs_profile_path) and \ |
| not os.path.exists(os.path.join(abs_profile_path, "parent")) and \ |
| os.path.exists(os.path.join(self["PORTDIR"], "profiles")): |
| writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path, |
| noiselevel=-1) |
| writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"]) |
| writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n") |
| |
| abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"], |
| USER_VIRTUALS_FILE.lstrip(os.path.sep)) |
| if os.path.exists(abs_user_virtuals): |
| writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n") |
| writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n") |
| writemsg("!!! this new location.\n\n") |
| |
| def loadVirtuals(self,root): |
| """Not currently used by portage.""" |
| writemsg("DEPRECATED: portage.config.loadVirtuals\n") |
| self.getvirtuals(root) |
| |
| def load_best_module(self,property_string): |
| best_mod = best_from_dict(property_string,self.modules,self.module_priority) |
| try: |
| mod = load_mod(best_mod) |
| except ImportError: |
| dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0) |
| sys.exit(1) |
| return mod |
| |
| def lock(self): |
| self.locked = 1 |
| |
| def unlock(self): |
| self.locked = 0 |
| |
| def modifying(self): |
| if self.locked: |
| raise Exception, "Configuration is locked." |
| |
| def backup_changes(self,key=None): |
| self.modifying() |
| if key and self.configdict["env"].has_key(key): |
| self.backupenv[key] = copy.deepcopy(self.configdict["env"][key]) |
| else: |
| raise KeyError, "No such key defined in environment: %s" % key |
| |
| def reset(self,keeping_pkg=0,use_cache=1): |
| """ |
| Restore environment from self.backupenv, call self.regenerate() |
| @param keeping_pkg: Should we keep the set_cpv() data or delete it. |
| @type keeping_pkg: Boolean |
| @param use_cache: Should self.regenerate use the cache or not |
| @type use_cache: Boolean |
| @rype: None |
| """ |
| self.modifying() |
| self.configdict["env"].clear() |
| self.configdict["env"].update(self.backupenv) |
| |
| self.modifiedkeys = [] |
| if not keeping_pkg: |
| self.mycpv = None |
| self.puse = "" |
| self.configdict["pkg"].clear() |
| self.configdict["pkginternal"].clear() |
| self.configdict["defaults"]["USE"] = \ |
| " ".join(self.make_defaults_use) |
| self.usemask = set(stack_lists( |
| self.usemask_list, incremental=True)) |
| self.useforce = set(stack_lists( |
| self.useforce_list, incremental=True)) |
| self.regenerate(use_cache=use_cache) |
| |
| def load_infodir(self,infodir): |
| self.modifying() |
| if self.configdict.has_key("pkg"): |
| for x in self.configdict["pkg"].keys(): |
| del self.configdict["pkg"][x] |
| else: |
| writemsg("No pkg setup for settings instance?\n", |
| noiselevel=-1) |
| sys.exit(17) |
| |
| if os.path.exists(infodir): |
| if os.path.exists(infodir+"/environment"): |
| self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment" |
| |
| myre = re.compile('^[A-Z]+$') |
| null_byte = "\0" |
| for filename in listdir(infodir,filesonly=1,EmptyOnError=1): |
| if myre.match(filename): |
| try: |
| file_path = os.path.join(infodir, filename) |
| mydata = open(file_path).read().strip() |
| if len(mydata) < 2048 or filename == "USE": |
| if null_byte in mydata: |
| writemsg("!!! Null byte found in metadata " + \ |
| "file: '%s'\n" % file_path, noiselevel=-1) |
| continue |
| if filename == "USE": |
| binpkg_flags = "-* " + mydata |
| self.configdict["pkg"][filename] = binpkg_flags |
| self.configdict["env"][filename] = mydata |
| else: |
| self.configdict["pkg"][filename] = mydata |
| self.configdict["env"][filename] = mydata |
| # CATEGORY is important because it's used in doebuild |
| # to infer the cpv. If it's corrupted, it leads to |
| # strange errors later on, so we'll validate it and |
| # print a warning if necessary. |
| if filename == "CATEGORY": |
| matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata) |
| if not matchobj or matchobj.start() != 0 or \ |
| matchobj.end() != len(mydata): |
| writemsg("!!! CATEGORY file is corrupt: %s\n" % \ |
| os.path.join(infodir, filename), noiselevel=-1) |
| except (OSError, IOError): |
| writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename, |
| noiselevel=-1) |
| pass |
| return 1 |
| return 0 |
| |
| def setcpv(self, mycpv, use_cache=1, mydb=None): |
| """ |
| Load a particular CPV into the config, this lets us see the |
| Default USE flags for a particular ebuild as well as the USE |
| flags from package.use. |
| |
| @param mycpv: A cpv to load |
| @type mycpv: string |
| @param use_cache: Enables caching |
| @type use_cache: Boolean |
| @param mydb: a dbapi instance that supports aux_get with the IUSE key. |
| @type mydb: dbapi or derivative. |
| @rtype: None |
| """ |
| |
| self.modifying() |
| if self.mycpv == mycpv: |
| return |
| has_changed = False |
| self.mycpv = mycpv |
| cp = dep_getkey(mycpv) |
| pkginternaluse = "" |
| if mydb: |
| pkginternaluse = " ".join([x[1:] \ |
| for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \ |
| if x.startswith("+")]) |
| if pkginternaluse != self.configdict["pkginternal"].get("USE", ""): |
| self.configdict["pkginternal"]["USE"] = pkginternaluse |
| has_changed = True |
| defaults = [] |
| for i in xrange(len(self.profiles)): |
| defaults.append(self.make_defaults_use[i]) |
| cpdict = self.pkgprofileuse[i].get(cp, None) |
| if cpdict: |
| best_match = best_match_to_list(self.mycpv, cpdict.keys()) |
| if best_match: |
| defaults.append(cpdict[best_match]) |
| defaults = " ".join(defaults) |
| if defaults != self.configdict["defaults"].get("USE",""): |
| self.configdict["defaults"]["USE"] = defaults |
| has_changed = True |
| useforce = [] |
| for i in xrange(len(self.profiles)): |
| useforce.append(self.useforce_list[i]) |
| cpdict = self.puseforce_list[i].get(cp, None) |
| if cpdict: |
| best_match = best_match_to_list(self.mycpv, cpdict.keys()) |
| if best_match: |
| useforce.append(cpdict[best_match]) |
| useforce = set(stack_lists(useforce, incremental=True)) |
| if useforce != self.useforce: |
| self.useforce = useforce |
| has_changed = True |
| usemask = [] |
| for i in xrange(len(self.profiles)): |
| usemask.append(self.usemask_list[i]) |
| cpdict = self.pusemask_list[i].get(cp, None) |
| if cpdict: |
| best_match = best_match_to_list(self.mycpv, cpdict.keys()) |
| if best_match: |
| usemask.append(cpdict[best_match]) |
| usemask = set(stack_lists(usemask, incremental=True)) |
| if usemask != self.usemask: |
| self.usemask = usemask |
| has_changed = True |
| oldpuse = self.puse |
| self.puse = "" |
| if self.pusedict.has_key(cp): |
| self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys()) |
| if self.pusekey: |
| self.puse = " ".join(self.pusedict[cp][self.pusekey]) |
| if oldpuse != self.puse: |
| has_changed = True |
| self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file |
| self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE |
| # CATEGORY is essential for doebuild calls |
| self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0] |
| if has_changed: |
| self.reset(keeping_pkg=1,use_cache=use_cache) |
| |
| def setinst(self,mycpv,mydbapi): |
| self.modifying() |
| if len(self.virtuals) == 0: |
| self.getvirtuals() |
| # Grab the virtuals this package provides and add them into the tree virtuals. |
| provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0] |
| if isinstance(mydbapi, portdbapi): |
| myuse = self["USE"] |
| else: |
| myuse = mydbapi.aux_get(mycpv, ["USE"])[0] |
| virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split())) |
| |
| cp = dep_getkey(mycpv) |
| for virt in virts: |
| virt = dep_getkey(virt) |
| if not self.treeVirtuals.has_key(virt): |
| self.treeVirtuals[virt] = [] |
| # XXX: Is this bad? -- It's a permanent modification |
| if cp not in self.treeVirtuals[virt]: |
| self.treeVirtuals[virt].append(cp) |
| |
| self.virtuals = self.__getvirtuals_compile() |
| |
| |
| def regenerate(self,useonly=0,use_cache=1): |
| """ |
| Regenerate settings |
| This involves regenerating valid USE flags, re-expanding USE_EXPAND flags |
| re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL |
| variables. This also updates the env.d configdict; useful in case an ebuild |
| changes the environment. |
| |
| If FEATURES has already stacked, it is not stacked twice. |
| |
| @param useonly: Only regenerate USE flags (not any other incrementals) |
| @type useonly: Boolean |
| @param use_cache: Enable Caching (only for autouse) |
| @type use_cache: Boolean |
| @rtype: None |
| """ |
| |
| self.modifying() |
| if self.already_in_regenerate: |
| # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops. |
| writemsg("!!! Looping in regenerate.\n",1) |
| return |
| else: |
| self.already_in_regenerate = 1 |
| |
| # We grab the latest profile.env here since it changes frequently. |
| self.configdict["env.d"].clear() |
| env_d = getconfig( |
| os.path.join(self["ROOT"], "etc", "profile.env"), expand=False) |
| if env_d: |
| # env_d will be None if profile.env doesn't exist. |
| self.configdict["env.d"].update(env_d) |
| |
| if useonly: |
| myincrementals=["USE"] |
| else: |
| myincrementals = self.incrementals |
| myincrementals = set(myincrementals) |
| # If self.features exists, it has already been stacked and may have |
| # been mutated, so don't stack it again or else any mutations will be |
| # reverted. |
| if "FEATURES" in myincrementals and hasattr(self, "features"): |
| myincrementals.remove("FEATURES") |
| |
| if "USE" in myincrementals: |
| # Process USE last because it depends on USE_EXPAND which is also |
| # an incremental! |
| myincrementals.remove("USE") |
| |
| for mykey in myincrementals: |
| |
| mydbs=self.configlist[:-1] |
| |
| myflags=[] |
| for curdb in mydbs: |
| if mykey not in curdb: |
| continue |
| #variables are already expanded |
| mysplit = curdb[mykey].split() |
| |
| for x in mysplit: |
| if x=="-*": |
| # "-*" is a special "minus" var that means "unset all settings". |
| # so USE="-* gnome" will have *just* gnome enabled. |
| myflags = [] |
| continue |
| |
| if x[0]=="+": |
| # Not legal. People assume too much. Complain. |
| writemsg(red("USE flags should not start with a '+': %s\n" % x), |
| noiselevel=-1) |
| x=x[1:] |
| if not x: |
| continue |
| |
| if (x[0]=="-"): |
| if (x[1:] in myflags): |
| # Unset/Remove it. |
| del myflags[myflags.index(x[1:])] |
| continue |
| |
| # We got here, so add it now. |
| if x not in myflags: |
| myflags.append(x) |
| |
| myflags.sort() |
| #store setting in last element of configlist, the original environment: |
| if myflags or mykey in self: |
| self.configlist[-1][mykey] = " ".join(myflags) |
| del myflags |
| |
| # Do the USE calculation last because it depends on USE_EXPAND. |
| if "auto" in self["USE_ORDER"].split(":"): |
| self.configdict["auto"]["USE"] = autouse( |
| vartree(root=self["ROOT"], categories=self.categories, |
| settings=self), |
| use_cache=use_cache, mysettings=self) |
| else: |
| self.configdict["auto"]["USE"] = "" |
| |
| use_expand_protected = [] |
| use_expand = self.get("USE_EXPAND", "").split() |
| for var in use_expand: |
| var_lower = var.lower() |
| for x in self.get(var, "").split(): |
| # Any incremental USE_EXPAND variables have already been |
| # processed, so leading +/- operators are invalid here. |
| if x[0] == "+": |
| writemsg(colorize("BAD", "Invalid '+' operator in " + \ |
| "non-incremental variable '%s': '%s'\n" % (var, x)), |
| noiselevel=-1) |
| x = x[1:] |
| if x[0] == "-": |
| writemsg(colorize("BAD", "Invalid '-' operator in " + \ |
| "non-incremental variable '%s': '%s'\n" % (var, x)), |
| noiselevel=-1) |
| continue |
| mystr = var_lower + "_" + x |
| if mystr not in use_expand_protected: |
| use_expand_protected.append(mystr) |
| |
| if not self.uvlist: |
| for x in self["USE_ORDER"].split(":"): |
| if x in self.configdict: |
| self.uvlist.append(self.configdict[x]) |
| self.uvlist.reverse() |
| |
| myflags = use_expand_protected[:] |
| for curdb in self.uvlist: |
| if "USE" not in curdb: |
| continue |
| mysplit = curdb["USE"].split() |
| for x in mysplit: |
| if x == "-*": |
| myflags = use_expand_protected[:] |
| continue |
| |
| if x[0] == "+": |
| writemsg(colorize("BAD", "USE flags should not start " + \ |
| "with a '+': %s\n" % x), noiselevel=-1) |
| x = x[1:] |
| if not x: |
| continue |
| |
| if x[0] == "-": |
| try: |
| myflags.remove(x[1:]) |
| except ValueError: |
| pass |
| continue |
| |
| if x not in myflags: |
| myflags.append(x) |
| |
| myflags = set(myflags) |
| myflags.update(self.useforce) |
| |
| # FEATURES=test should imply USE=test |
| if "test" in self.configlist[-1].get("FEATURES","").split(): |
| myflags.add("test") |
| |
| usesplit = [ x for x in myflags if \ |
| x not in self.usemask] |
| |
| usesplit.sort() |
| |
| # Use the calculated USE flags to regenerate the USE_EXPAND flags so |
| # that they are consistent. |
| for var in use_expand: |
| prefix = var.lower() + "_" |
| prefix_len = len(prefix) |
| expand_flags = set([ x[prefix_len:] for x in usesplit \ |
| if x.startswith(prefix) ]) |
| var_split = self.get(var, "").split() |
| # Preserve the order of var_split because it can matter for things |
| # like LINGUAS. |
| var_split = [ x for x in var_split if x in expand_flags ] |
| var_split.extend(expand_flags.difference(var_split)) |
| if var_split or var in self: |
| # Don't export empty USE_EXPAND vars unless the user config |
| # exports them as empty. This is required for vars such as |
| # LINGUAS, where unset and empty have different meanings. |
| self[var] = " ".join(var_split) |
| |
| # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch. |
| if self.configdict["defaults"].has_key("ARCH"): |
| if self.configdict["defaults"]["ARCH"]: |
| if self.configdict["defaults"]["ARCH"] not in usesplit: |
| usesplit.insert(0,self.configdict["defaults"]["ARCH"]) |
| |
| self.configlist[-1]["USE"]= " ".join(usesplit) |
| |
| self.already_in_regenerate = 0 |
| |
| def get_virts_p(self, myroot): |
| if self.virts_p: |
| return self.virts_p |
| virts = self.getvirtuals(myroot) |
| if virts: |
| myvkeys = virts.keys() |
| for x in myvkeys: |
| vkeysplit = x.split("/") |
| if not self.virts_p.has_key(vkeysplit[1]): |
| self.virts_p[vkeysplit[1]] = virts[x] |
| return self.virts_p |
| |
| def getvirtuals(self, myroot=None): |
| """myroot is now ignored because, due to caching, it has always been |
| broken for all but the first call.""" |
| myroot = self["ROOT"] |
| if self.virtuals: |
| return self.virtuals |
| |
| virtuals_list = [] |
| for x in self.profiles: |
| virtuals_file = os.path.join(x, "virtuals") |
| virtuals_dict = grabdict(virtuals_file) |
| for k in virtuals_dict.keys(): |
| if not isvalidatom(k) or dep_getkey(k) != k: |
| writemsg("--- Invalid virtuals atom in %s: %s\n" % \ |
| (virtuals_file, k), noiselevel=-1) |
| del virtuals_dict[k] |
| continue |
| myvalues = virtuals_dict[k] |
| for x in myvalues: |
| myatom = x |
| if x.startswith("-"): |
| # allow incrementals |
| myatom = x[1:] |
| if not isvalidatom(myatom): |
| writemsg("--- Invalid atom in %s: %s\n" % \ |
| (virtuals_file, x), noiselevel=-1) |
| myvalues.remove(x) |
| if not myvalues: |
| del virtuals_dict[k] |
| if virtuals_dict: |
| virtuals_list.append(virtuals_dict) |
| |
| self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True) |
| del virtuals_list |
| |
| for virt in self.dirVirtuals: |
| # Preference for virtuals decreases from left to right. |
| self.dirVirtuals[virt].reverse() |
| |
| # Repoman does not use user or tree virtuals. |
| if self.local_config and not self.treeVirtuals: |
| temp_vartree = vartree(myroot, None, |
| categories=self.categories, settings=self) |
| # Reduce the provides into a list by CP. |
| self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides()) |
| |
| self.virtuals = self.__getvirtuals_compile() |
| return self.virtuals |
| |
| def __getvirtuals_compile(self): |
| """Stack installed and profile virtuals. Preference for virtuals |
| decreases from left to right. |
| Order of preference: |
| 1. installed and in profile |
| 2. installed only |
| 3. profile only |
| """ |
| |
| # Virtuals by profile+tree preferences. |
| ptVirtuals = {} |
| |
| for virt, installed_list in self.treeVirtuals.iteritems(): |
| profile_list = self.dirVirtuals.get(virt, None) |
| if not profile_list: |
| continue |
| for cp in installed_list: |
| if cp in profile_list: |
| ptVirtuals.setdefault(virt, []) |
| ptVirtuals[virt].append(cp) |
| |
| virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals, |
| self.dirVirtuals]) |
| return virtuals |
| |
| def __delitem__(self,mykey): |
| self.modifying() |
| for x in self.lookuplist: |
| if x != None: |
| if mykey in x: |
| del x[mykey] |
| |
| def __getitem__(self,mykey): |
| match = '' |
| for x in self.lookuplist: |
| if x is None: |
| writemsg("!!! lookuplist is null.\n") |
| elif x.has_key(mykey): |
| match = x[mykey] |
| break |
| return match |
| |
| def has_key(self,mykey): |
| for x in self.lookuplist: |
| if x.has_key(mykey): |
| return 1 |
| return 0 |
| |
| def __contains__(self, mykey): |
| """Called to implement membership test operators (in and not in).""" |
| return bool(self.has_key(mykey)) |
| |
| def setdefault(self, k, x=None): |
| if k in self: |
| return self[k] |
| else: |
| self[k] = x |
| return x |
| |
| def get(self, k, x=None): |
| if k in self: |
| return self[k] |
| else: |
| return x |
| |
| def keys(self): |
| return unique_array(flatten([x.keys() for x in self.lookuplist])) |
| |
| def __setitem__(self,mykey,myvalue): |
| "set a value; will be thrown away at reset() time" |
| if type(myvalue) != types.StringType: |
| raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue))) |
| self.modifying() |
| self.modifiedkeys += [mykey] |
| self.configdict["env"][mykey]=myvalue |
| |
| def environ(self): |
| "return our locally-maintained environment" |
| mydict={} |
| for x in self.keys(): |
| myvalue = self[x] |
| if not isinstance(myvalue, basestring): |
| writemsg("!!! Non-string value in config: %s=%s\n" % \ |
| (x, myvalue), noiselevel=-1) |
| continue |
| mydict[x] = myvalue |
| if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"): |
| writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n") |
| mydict["HOME"]=mydict["BUILD_PREFIX"][:] |
| |
| return mydict |
| |
| def thirdpartymirrors(self): |
| if getattr(self, "_thirdpartymirrors", None) is None: |
| profileroots = [os.path.join(self["PORTDIR"], "profiles")] |
| for x in self["PORTDIR_OVERLAY"].split(): |
| profileroots.insert(0, os.path.join(x, "profiles")) |
| thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots] |
| self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True) |
| return self._thirdpartymirrors |
| |
| def archlist(self): |
| return flatten([[myarch, "~" + myarch] \ |
| for myarch in self["PORTAGE_ARCHLIST"].split()]) |
| |
| def selinux_enabled(self): |
| if getattr(self, "_selinux_enabled", None) is None: |
| self._selinux_enabled = 0 |
| if "selinux" in self["USE"].split(): |
| if "selinux" in globals(): |
| if selinux.is_selinux_enabled() == 1: |
| self._selinux_enabled = 1 |
| else: |
| self._selinux_enabled = 0 |
| else: |
| writemsg("!!! SELinux module not found. Please verify that it was installed.\n", |
| noiselevel=-1) |
| self._selinux_enabled = 0 |
| if self._selinux_enabled == 0: |
| try: |
| del sys.modules["selinux"] |
| except KeyError: |
| pass |
| return self._selinux_enabled |
| |
| # XXX This would be to replace getstatusoutput completely. |
| # XXX Issue: cannot block execution. Deadlock condition. |
| def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords): |
| """ |
| Spawn a subprocess with extra portage-specific options. |
| Optiosn include: |
| |
| Sandbox: Sandbox means the spawned process will be limited in its ability t |
| read and write files (normally this means it is restricted to ${IMAGE}/) |
| SElinux Sandbox: Enables sandboxing on SElinux |
| Reduced Privileges: Drops privilages such that the process runs as portage:portage |
| instead of as root. |
| |
| Notes: os.system cannot be used because it messes with signal handling. Instead we |
| use the portage_exec spawn* family of functions. |
| |
| This function waits for the process to terminate. |
| |
| @param mystring: Command to run |
| @type mystring: String |
| @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config |
| @type mysettings: Dictionary or config instance |
| @param debug: Ignored |
| @type debug: Boolean |
| @param free: Enable sandboxing for this process |
| @type free: Boolean |
| @param droppriv: Drop to portage:portage when running this command |
| @type droppriv: Boolean |
| @param sesandbox: Enable SELinux Sandboxing (toggles a context switch) |
| @type sesandbox: Boolean |
| @param keywords: Extra options encoded as a dict, to be passed to spawn |
| @type keywords: Dictionary |
| @rtype: Integer |
| @returns: |
| 1. The return code of the spawned process. |
| """ |
| |
| if type(mysettings) == types.DictType: |
| env=mysettings |
| keywords["opt_name"]="[ %s ]" % "portage" |
| else: |
| check_config_instance(mysettings) |
| env=mysettings.environ() |
| keywords["opt_name"]="[%s]" % mysettings["PF"] |
| |
| # The default policy for the sesandbox domain only allows entry (via exec) |
| # from shells and from binaries that belong to portage (the number of entry |
| # points is minimized). The "tee" binary is not among the allowed entry |
| # points, so it is spawned outside of the sesandbox domain and reads from a |
| # pipe between two domains. |
| logfile = keywords.get("logfile") |
| mypids = [] |
| pw = None |
| if logfile: |
| del keywords["logfile"] |
| fd_pipes = keywords.get("fd_pipes") |
| if fd_pipes is None: |
| fd_pipes = {0:0, 1:1, 2:2} |
| elif 1 not in fd_pipes or 2 not in fd_pipes: |
| raise ValueError(fd_pipes) |
| pr, pw = os.pipe() |
| mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile), |
| returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]})) |
| os.close(pr) |
| fd_pipes[1] = pw |
| fd_pipes[2] = pw |
| keywords["fd_pipes"] = fd_pipes |
| |
| features = mysettings.features |
| # XXX: Negative RESTRICT word |
| droppriv=(droppriv and ("userpriv" in features) and not \ |
| (("nouserpriv" in mysettings["RESTRICT"].split()) or \ |
| ("userpriv" in mysettings["RESTRICT"].split()))) |
| |
| if droppriv and not uid and portage_gid and portage_uid: |
| keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002}) |
| |
| if not free: |
| free=((droppriv and "usersandbox" not in features) or \ |
| (not droppriv and "sandbox" not in features and "usersandbox" not in features)) |
| |
| if free: |
| keywords["opt_name"] += " bash" |
| spawn_func = portage_exec.spawn_bash |
| else: |
| keywords["opt_name"] += " sandbox" |
| spawn_func = portage_exec.spawn_sandbox |
| |
| if sesandbox: |
| con = selinux.getcontext() |
| con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"]) |
| selinux.setexec(con) |
| |
| returnpid = keywords.get("returnpid") |
| keywords["returnpid"] = True |
| try: |
| mypids.extend(spawn_func(mystring, env=env, **keywords)) |
| finally: |
| if pw: |
| os.close(pw) |
| if sesandbox: |
| selinux.setexec(None) |
| |
| if returnpid: |
| return mypids |
| |
| while mypids: |
| pid = mypids.pop(0) |
| retval = os.waitpid(pid, 0)[1] |
| portage_exec.spawned_pids.remove(pid) |
| if retval != os.EX_OK: |
| for pid in mypids: |
| if os.waitpid(pid, os.WNOHANG) == (0,0): |
| os.kill(pid, signal.SIGTERM) |
| os.waitpid(pid, 0) |
| portage_exec.spawned_pids.remove(pid) |
| if retval & 0xff: |
| return (retval & 0xff) << 8 |
| return retval >> 8 |
| return os.EX_OK |
| |
| def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1): |
| "fetch files. Will use digest file if available." |
| |
| features = mysettings.features |
| # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. |
| if ("mirror" in mysettings["RESTRICT"].split()) or \ |
| ("nomirror" in mysettings["RESTRICT"].split()): |
| if ("mirror" in features) and ("lmirror" not in features): |
| # lmirror should allow you to bypass mirror restrictions. |
| # XXX: This is not a good thing, and is temporary at best. |
| print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch." |
| return 1 |
| |
| thirdpartymirrors = mysettings.thirdpartymirrors() |
| |
| check_config_instance(mysettings) |
| |
| custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], |
| CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1) |
| |
| mymirrors=[] |
| |
| if listonly or ("distlocks" not in features): |
| use_locks = 0 |
| |
| fetch_to_ro = 0 |
| if "skiprocheck" in features: |
| fetch_to_ro = 1 |
| |
| if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro: |
| if use_locks: |
| writemsg(red("!!! For fetching to a read-only filesystem, " + \ |
| "locking should be turned off.\n"), noiselevel=-1) |
| writemsg("!!! This can be done by adding -distlocks to " + \ |
| "FEATURES in /etc/make.conf\n", noiselevel=-1) |
| # use_locks = 0 |
| |
| # local mirrors are always added |
| if custommirrors.has_key("local"): |
| mymirrors += custommirrors["local"] |
| |
| if ("nomirror" in mysettings["RESTRICT"].split()) or \ |
| ("mirror" in mysettings["RESTRICT"].split()): |
| # We don't add any mirrors. |
| pass |
| else: |
| if try_mirrors: |
| mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] |
| |
| mydigests = Manifest( |
| mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST") |
| |
| fsmirrors = [] |
| for x in range(len(mymirrors)-1,-1,-1): |
| if mymirrors[x] and mymirrors[x][0]=='/': |
| fsmirrors += [mymirrors[x]] |
| del mymirrors[x] |
| |
| restrict_fetch = "fetch" in mysettings["RESTRICT"].split() |
| custom_local_mirrors = custommirrors.get("local", []) |
| if restrict_fetch: |
| # With fetch restriction, a normal uri may only be fetched from |
| # custom local mirrors (if available). A mirror:// uri may also |
| # be fetched from specific mirrors (effectively overriding fetch |
| # restriction, but only for specific mirrors). |
| locations = custom_local_mirrors |
| else: |
| locations = mymirrors |
| |
| filedict={} |
| primaryuri_indexes={} |
| for myuri in myuris: |
| myfile=os.path.basename(myuri) |
| if not filedict.has_key(myfile): |
| filedict[myfile]=[] |
| for y in range(0,len(locations)): |
| filedict[myfile].append(locations[y]+"/distfiles/"+myfile) |
| if myuri[:9]=="mirror://": |
| eidx = myuri.find("/", 9) |
| if eidx != -1: |
| mirrorname = myuri[9:eidx] |
| |
| # Try user-defined mirrors first |
| if custommirrors.has_key(mirrorname): |
| for cmirr in custommirrors[mirrorname]: |
| filedict[myfile].append(cmirr+"/"+myuri[eidx+1:]) |
| # remove the mirrors we tried from the list of official mirrors |
| if cmirr.strip() in thirdpartymirrors[mirrorname]: |
| thirdpartymirrors[mirrorname].remove(cmirr) |
| # now try the official mirrors |
| if thirdpartymirrors.has_key(mirrorname): |
| shuffle(thirdpartymirrors[mirrorname]) |
| |
| for locmirr in thirdpartymirrors[mirrorname]: |
| filedict[myfile].append(locmirr+"/"+myuri[eidx+1:]) |
| |
| if not filedict[myfile]: |
| writemsg("No known mirror by the name: %s\n" % (mirrorname)) |
| else: |
| writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1) |
| writemsg(" %s\n" % (myuri), noiselevel=-1) |
| else: |
| if restrict_fetch: |
| # Only fetch from specific mirrors is allowed. |
| continue |
| if "primaryuri" in mysettings["RESTRICT"].split(): |
| # Use the source site first. |
| if primaryuri_indexes.has_key(myfile): |
| primaryuri_indexes[myfile] += 1 |
| else: |
| primaryuri_indexes[myfile] = 0 |
| filedict[myfile].insert(primaryuri_indexes[myfile], myuri) |
| else: |
| filedict[myfile].append(myuri) |
| |
| can_fetch=True |
| |
| if listonly: |
| can_fetch = False |
| |
| for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"): |
| if not mysettings.get(var_name, None): |
| can_fetch = False |
| |
| if can_fetch: |
| dirmode = 02070 |
| filemode = 060 |
| modemask = 02 |
| distdir_dirs = [""] |
| if "distlocks" in features: |
| distdir_dirs.append(".locks") |
| try: |
| |
| for x in distdir_dirs: |
| mydir = os.path.join(mysettings["DISTDIR"], x) |
| if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask): |
| writemsg("Adjusting permissions recursively: '%s'\n" % mydir, |
| noiselevel=-1) |
| def onerror(e): |
| raise # bail out on the first error that occurs during recursion |
| if not apply_recursive_permissions(mydir, |
| gid=portage_gid, dirmode=dirmode, dirmask=modemask, |
| filemode=filemode, filemask=modemask, onerror=onerror): |
| raise portage_exception.OperationNotPermitted( |
| "Failed to apply recursive permissions for the portage group.") |
| except portage_exception.PortageException, e: |
| if not os.path.isdir(mysettings["DISTDIR"]): |
| writemsg("!!! %s\n" % str(e), noiselevel=-1) |
| writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1) |
| writemsg("!!! Fetching will fail!\n", noiselevel=-1) |
| |
| if can_fetch and \ |
| not fetch_to_ro and \ |
| not os.access(mysettings["DISTDIR"], os.W_OK): |
| writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"], |
| noiselevel=-1) |
| can_fetch = False |
| |
| if can_fetch and use_locks and locks_in_subdir: |
| distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir) |
| if not os.access(distlocks_subdir, os.W_OK): |
| writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir, |
| noiselevel=-1) |
| return 0 |
| del distlocks_subdir |
| for myfile in filedict.keys(): |
| """ |
| fetched status |
| 0 nonexistent |
| 1 partially downloaded |
| 2 completely downloaded |
| """ |
| myfile_path = os.path.join(mysettings["DISTDIR"], myfile) |
| fetched=0 |
| file_lock = None |
| if listonly: |
| writemsg_stdout("\n", noiselevel=-1) |
| else: |
| if use_locks and can_fetch: |
| if locks_in_subdir: |
| file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1) |
| else: |
| file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1) |
| try: |
| if not listonly: |
| if fsmirrors and not os.path.exists(myfile_path): |
| for mydir in fsmirrors: |
| mirror_file = os.path.join(mydir, myfile) |
| try: |
| shutil.copyfile(mirror_file, myfile_path) |
| writemsg(_("Local mirror has file:" + \ |
| " %(file)s\n" % {"file":myfile})) |
| break |
| except (IOError, OSError), e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| |
| try: |
| mystat = os.stat(myfile_path) |
| except OSError, e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| else: |
| try: |
| apply_secpass_permissions( |
| myfile_path, gid=portage_gid, mode=0664, mask=02, |
| stat_cached=mystat) |
| except portage_exception.PortageException, e: |
| if not os.access(myfile_path, os.R_OK): |
| writemsg("!!! Failed to adjust permissions:" + \ |
| " %s\n" % str(e), noiselevel=-1) |
| if myfile not in mydigests: |
| # We don't have a digest, but the file exists. We must |
| # assume that it is fully downloaded. |
| continue |
| else: |
| if mystat.st_size < mydigests[myfile]["size"] and \ |
| not restrict_fetch: |
| fetched = 1 # Try to resume this download. |
| else: |
| verified_ok, reason = portage_checksum.verify_all( |
| myfile_path, mydigests[myfile]) |
| if not verified_ok: |
| writemsg("!!! Previously fetched" + \ |
| " file: '%s'\n" % myfile, noiselevel=-1) |
| writemsg("!!! Reason: %s\n" % reason[0], |
| noiselevel=-1) |
| writemsg(("!!! Got: %s\n" + \ |
| "!!! Expected: %s\n") % \ |
| (reason[1], reason[2]), noiselevel=-1) |
| if can_fetch and not restrict_fetch: |
| writemsg("Refetching...\n\n", |
| noiselevel=-1) |
| os.unlink(myfile_path) |
| else: |
| eout = output.EOutput() |
| eout.quiet = \ |
| mysettings.get("PORTAGE_QUIET", None) == "1" |
| for digest_name in mydigests[myfile]: |
| eout.ebegin( |
| "%s %s ;-)" % (myfile, digest_name)) |
| eout.eend(0) |
| continue # fetch any remaining files |
| |
| for loc in filedict[myfile]: |
| if listonly: |
| writemsg_stdout(loc+" ", noiselevel=-1) |
| continue |
| # allow different fetchcommands per protocol |
| protocol = loc[0:loc.find("://")] |
| if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()): |
| fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()] |
| else: |
| fetchcommand=mysettings["FETCHCOMMAND"] |
| if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()): |
| resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()] |
| else: |
| resumecommand=mysettings["RESUMECOMMAND"] |
| |
| fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"]) |
| resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"]) |
| |
| if not can_fetch: |
| if fetched != 2: |
| if fetched == 0: |
| writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile, |
| noiselevel=-1) |
| else: |
| writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile, |
| noiselevel=-1) |
| for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"): |
| if not mysettings.get(var_name, None): |
| writemsg(("!!! %s is unset. It should " + \ |
| "have been defined in /etc/make.globals.\n") \ |
| % var_name, noiselevel=-1) |
| return 0 |
| else: |
| continue |
| |
| if fetched != 2: |
| #we either need to resume or start the download |
| #you can't use "continue" when you're inside a "try" block |
| if fetched==1: |
| #resume mode: |
| writemsg(">>> Resuming download...\n") |
| locfetch=resumecommand |
| else: |
| #normal mode: |
|