| # portage.py -- core Portage functionality |
| # Copyright 1998-2004 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| # $Id$ |
| |
| |
| VERSION="$Rev$"[6:-2] + "-svn" |
| |
| # =========================================================================== |
| # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT |
| # =========================================================================== |
| |
| try: |
| import sys |
| import copy |
| import errno |
| import os |
| import re |
| import shutil |
| import time |
| try: |
| import cPickle |
| except ImportError: |
| import pickle as cPickle |
| |
| import stat |
| import commands |
| from time import sleep |
| from random import shuffle |
| import UserDict |
| from itertools import chain, izip |
| import platform |
| import warnings |
| except ImportError, e: |
| sys.stderr.write("\n\n") |
| sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n") |
| sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n") |
| sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n") |
| |
| sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n") |
| sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n") |
| sys.stderr.write(" "+str(e)+"\n\n"); |
| raise |
| |
| bsd_chflags = None |
| if platform.system() in ["FreeBSD"]: |
| def bsd_chflags(): |
| pass |
| def _chflags(path, flags, opts=""): |
| cmd = "chflags %s %o '%s'" % (opts, flags, path) |
| status, output = commands.getstatusoutput(cmd) |
| if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK: |
| return |
| # Try to generate an ENOENT error if appropriate. |
| if "h" in opts: |
| os.lstat(path) |
| else: |
| os.stat(path) |
| # Make sure the binary exists. |
| if not portage.process.find_binary("chflags"): |
| raise portage.exception.CommandNotFound("chflags") |
| # Now we're not sure exactly why it failed or what |
| # the real errno was, so just report EPERM. |
| e = OSError(errno.EPERM, output) |
| e.errno = errno.EPERM |
| e.filename = path |
| e.message = output |
| raise e |
| def _lchflags(path, flags): |
| return _chflags(path, flags, opts="-h") |
| bsd_chflags.chflags = _chflags |
| bsd_chflags.lchflags = _lchflags |
| |
| try: |
| from portage.cache.cache_errors import CacheError |
| import portage.cvstree |
| import portage.xpak |
| import portage.getbinpkg |
| import portage.dep |
| from portage.dep import dep_getcpv, dep_getkey, get_operator, \ |
| isjustname, isspecific, isvalidatom, \ |
| match_from_list, match_to_list, best_match_to_list |
| |
| # XXX: This needs to get cleaned up. |
| import portage.output |
| from portage.output import bold, colorize, green, red, yellow |
| |
| import portage.const |
| from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \ |
| USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \ |
| PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \ |
| EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \ |
| MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \ |
| DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \ |
| INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\ |
| INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE |
| |
| from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \ |
| portage_uid, portage_gid, userpriv_groups |
| from portage.manifest import Manifest |
| |
| import portage.util |
| from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \ |
| dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \ |
| map_dictlist_vals, new_protect_filename, normalize_path, \ |
| pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \ |
| unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic |
| import portage.exception |
| import portage.gpg |
| import portage.locks |
| import portage.process |
| from portage.process import atexit_register, run_exitfuncs |
| from portage.locks import unlockfile,unlockdir,lockfile,lockdir |
| import portage.checksum |
| from portage.checksum import perform_md5,perform_checksum,prelink_capable |
| import portage.eclass_cache |
| from portage.localization import _ |
| from portage.update import dep_transform, fixdbentries, grab_updates, \ |
| parse_updates, update_config_files, update_dbentries |
| |
| # Need these functions directly in portage namespace to not break every external tool in existence |
| from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \ |
| pkgsplit, vercmp, ververify |
| |
| # endversion and endversion_keys are for backward compatibility only. |
| from portage.versions import endversion_keys |
| from portage.versions import suffix_value as endversion |
| |
| except ImportError, e: |
| sys.stderr.write("\n\n") |
| sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n") |
| sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n") |
| sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n") |
| sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n") |
| sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n") |
| sys.stderr.write("!!! a recovery of portage.\n") |
| sys.stderr.write(" "+str(e)+"\n\n") |
| raise |
| |
| |
| try: |
| import portage.selinux as selinux |
| except OSError, e: |
| writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1) |
| del e |
| except ImportError: |
| pass |
| |
| # =========================================================================== |
| # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END |
| # =========================================================================== |
| |
| |
| def load_mod(name): |
| modname = ".".join(name.split(".")[:-1]) |
| mod = __import__(modname) |
| components = name.split('.') |
| for comp in components[1:]: |
| mod = getattr(mod, comp) |
| return mod |
| |
| def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1): |
| for x in key_order: |
| if x in top_dict and key in top_dict[x]: |
| if FullCopy: |
| return copy.deepcopy(top_dict[x][key]) |
| else: |
| return top_dict[x][key] |
| if EmptyOnError: |
| return "" |
| else: |
| raise KeyError("Key not found in list; '%s'" % key) |
| |
| def getcwd(): |
| "this fixes situations where the current directory doesn't exist" |
| try: |
| return os.getcwd() |
| except OSError: #dir doesn't exist |
| os.chdir("/") |
| return "/" |
| getcwd() |
| |
| def abssymlink(symlink): |
| "This reads symlinks, resolving the relative symlinks, and returning the absolute." |
| mylink=os.readlink(symlink) |
| if mylink[0] != '/': |
| mydir=os.path.dirname(symlink) |
| mylink=mydir+"/"+mylink |
| return os.path.normpath(mylink) |
| |
| dircache = {} |
| cacheHit=0 |
| cacheMiss=0 |
| cacheStale=0 |
| def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True): |
| global cacheHit,cacheMiss,cacheStale |
| mypath = normalize_path(my_original_path) |
| if mypath in dircache: |
| cacheHit += 1 |
| cached_mtime, list, ftype = dircache[mypath] |
| else: |
| cacheMiss += 1 |
| cached_mtime, list, ftype = -1, [], [] |
| try: |
| pathstat = os.stat(mypath) |
| if stat.S_ISDIR(pathstat[stat.ST_MODE]): |
| mtime = pathstat.st_mtime |
| else: |
| raise portage.exception.DirectoryNotFound(mypath) |
| except EnvironmentError, e: |
| if e.errno == portage.exception.PermissionDenied.errno: |
| raise portage.exception.PermissionDenied(mypath) |
| del e |
| if EmptyOnError: |
| return [], [] |
| return None, None |
| except portage.exception.PortageException: |
| if EmptyOnError: |
| return [], [] |
| return None, None |
| # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid |
| if mtime != cached_mtime or time.time() - mtime < 4: |
| if mypath in dircache: |
| cacheStale += 1 |
| try: |
| list = os.listdir(mypath) |
| except EnvironmentError, e: |
| if e.errno != errno.EACCES: |
| raise |
| del e |
| raise portage.exception.PermissionDenied(mypath) |
| ftype = [] |
| for x in list: |
| try: |
| if followSymlinks: |
| pathstat = os.stat(mypath+"/"+x) |
| else: |
| pathstat = os.lstat(mypath+"/"+x) |
| |
| if stat.S_ISREG(pathstat[stat.ST_MODE]): |
| ftype.append(0) |
| elif stat.S_ISDIR(pathstat[stat.ST_MODE]): |
| ftype.append(1) |
| elif stat.S_ISLNK(pathstat[stat.ST_MODE]): |
| ftype.append(2) |
| else: |
| ftype.append(3) |
| except (IOError, OSError): |
| ftype.append(3) |
| dircache[mypath] = mtime, list, ftype |
| |
| ret_list = [] |
| ret_ftype = [] |
| for x in range(0, len(list)): |
| if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")): |
| ret_list.append(list[x]) |
| ret_ftype.append(ftype[x]) |
| elif (list[x] not in ignorelist): |
| ret_list.append(list[x]) |
| ret_ftype.append(ftype[x]) |
| |
| writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10) |
| return ret_list, ret_ftype |
| |
| def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True, |
| EmptyOnError=False, dirsonly=False): |
| """ |
| Portage-specific implementation of os.listdir |
| |
| @param mypath: Path whose contents you wish to list |
| @type mypath: String |
| @param recursive: Recursively scan directories contained within mypath |
| @type recursive: Boolean |
| @param filesonly; Only return files, not more directories |
| @type filesonly: Boolean |
| @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS') |
| @type ignorecvs: Boolean |
| @param ignorelist: List of filenames/directories to exclude |
| @type ignorelist: List |
| @param followSymlinks: Follow Symlink'd files and directories |
| @type followSymlinks: Boolean |
| @param EmptyOnError: Return [] if an error occurs. |
| @type EmptyOnError: Boolean |
| @param dirsonly: Only return directories. |
| @type dirsonly: Boolean |
| @rtype: List |
| @returns: A list of files and directories (or just files or just directories) or an empty list. |
| """ |
| |
| list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks) |
| |
| if list is None: |
| list=[] |
| if ftype is None: |
| ftype=[] |
| |
| if not (filesonly or dirsonly or recursive): |
| return list |
| |
| if recursive: |
| x=0 |
| while x<len(ftype): |
| if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')): |
| l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError, |
| followSymlinks) |
| |
| l=l[:] |
| for y in range(0,len(l)): |
| l[y]=list[x]+"/"+l[y] |
| list=list+l |
| ftype=ftype+f |
| x+=1 |
| if filesonly: |
| rlist=[] |
| for x in range(0,len(ftype)): |
| if ftype[x]==0: |
| rlist=rlist+[list[x]] |
| elif dirsonly: |
| rlist = [] |
| for x in range(0, len(ftype)): |
| if ftype[x] == 1: |
| rlist = rlist + [list[x]] |
| else: |
| rlist=list |
| |
| return rlist |
| |
| def flatten(mytokens): |
| """this function now turns a [1,[2,3]] list into |
| a [1,2,3] list and returns it.""" |
| newlist=[] |
| for x in mytokens: |
| if isinstance(x, list): |
| newlist.extend(flatten(x)) |
| else: |
| newlist.append(x) |
| return newlist |
| |
| #beautiful directed graph object |
| |
| class digraph(object): |
| def __init__(self): |
| """Create an empty digraph""" |
| |
| # { node : ( { child : priority } , { parent : priority } ) } |
| self.nodes = {} |
| self.order = [] |
| |
| def add(self, node, parent, priority=0): |
| """Adds the specified node with the specified parent. |
| |
| If the dep is a soft-dep and the node already has a hard |
| relationship to the parent, the relationship is left as hard.""" |
| |
| if node not in self.nodes: |
| self.nodes[node] = ({}, {}) |
| self.order.append(node) |
| |
| if not parent: |
| return |
| |
| if parent not in self.nodes: |
| self.nodes[parent] = ({}, {}) |
| self.order.append(parent) |
| |
| if parent in self.nodes[node][1]: |
| if priority > self.nodes[node][1][parent]: |
| self.nodes[node][1][parent] = priority |
| else: |
| self.nodes[node][1][parent] = priority |
| |
| if node in self.nodes[parent][0]: |
| if priority > self.nodes[parent][0][node]: |
| self.nodes[parent][0][node] = priority |
| else: |
| self.nodes[parent][0][node] = priority |
| |
| def remove(self, node): |
| """Removes the specified node from the digraph, also removing |
| and ties to other nodes in the digraph. Raises KeyError if the |
| node doesn't exist.""" |
| |
| if node not in self.nodes: |
| raise KeyError(node) |
| |
| for parent in self.nodes[node][1]: |
| del self.nodes[parent][0][node] |
| for child in self.nodes[node][0]: |
| del self.nodes[child][1][node] |
| |
| del self.nodes[node] |
| self.order.remove(node) |
| |
| def difference_update(self, t): |
| """ |
| Remove all given nodes from node_set. This is more efficient |
| than multiple calls to the remove() method. |
| """ |
| if isinstance(t, (list, tuple)) or \ |
| not hasattr(t, "__contains__"): |
| t = frozenset(t) |
| order = [] |
| for node in self.order: |
| if node not in t: |
| order.append(node) |
| continue |
| for parent in self.nodes[node][1]: |
| del self.nodes[parent][0][node] |
| for child in self.nodes[node][0]: |
| del self.nodes[child][1][node] |
| del self.nodes[node] |
| self.order = order |
| |
| def remove_edge(self, child, parent): |
| """ |
| Remove edge in the direction from child to parent. Note that it is |
| possible for a remaining edge to exist in the opposite direction. |
| Any endpoint vertices that become isolated will remain in the graph. |
| """ |
| |
| # Nothing should be modified when a KeyError is raised. |
| for k in parent, child: |
| if k not in self.nodes: |
| raise KeyError(k) |
| |
| # Make sure the edge exists. |
| if child not in self.nodes[parent][0]: |
| raise KeyError(child) |
| if parent not in self.nodes[child][1]: |
| raise KeyError(parent) |
| |
| # Remove the edge. |
| del self.nodes[child][1][parent] |
| del self.nodes[parent][0][child] |
| |
| def __iter__(self): |
| return iter(self.order) |
| |
| def contains(self, node): |
| """Checks if the digraph contains mynode""" |
| return node in self.nodes |
| |
| def get(self, key, default=None): |
| return self.nodes.get(key, default) |
| |
| def all_nodes(self): |
| """Return a list of all nodes in the graph""" |
| return self.order[:] |
| |
| def child_nodes(self, node, ignore_priority=None): |
| """Return all children of the specified node""" |
| if ignore_priority is None: |
| return self.nodes[node][0].keys() |
| children = [] |
| for child, priority in self.nodes[node][0].iteritems(): |
| if priority > ignore_priority: |
| children.append(child) |
| return children |
| |
| def parent_nodes(self, node): |
| """Return all parents of the specified node""" |
| return self.nodes[node][1].keys() |
| |
| def leaf_nodes(self, ignore_priority=None): |
| """Return all nodes that have no children |
| |
| If ignore_soft_deps is True, soft deps are not counted as |
| children in calculations.""" |
| |
| leaf_nodes = [] |
| for node in self.order: |
| is_leaf_node = True |
| for child in self.nodes[node][0]: |
| if self.nodes[node][0][child] > ignore_priority: |
| is_leaf_node = False |
| break |
| if is_leaf_node: |
| leaf_nodes.append(node) |
| return leaf_nodes |
| |
| def root_nodes(self, ignore_priority=None): |
| """Return all nodes that have no parents. |
| |
| If ignore_soft_deps is True, soft deps are not counted as |
| parents in calculations.""" |
| |
| root_nodes = [] |
| for node in self.order: |
| is_root_node = True |
| for parent in self.nodes[node][1]: |
| if self.nodes[node][1][parent] > ignore_priority: |
| is_root_node = False |
| break |
| if is_root_node: |
| root_nodes.append(node) |
| return root_nodes |
| |
| def is_empty(self): |
| """Checks if the digraph is empty""" |
| return len(self.nodes) == 0 |
| |
| def clone(self): |
| clone = digraph() |
| clone.nodes = {} |
| for k, v in self.nodes.iteritems(): |
| clone.nodes[k] = (v[0].copy(), v[1].copy()) |
| clone.order = self.order[:] |
| return clone |
| |
| # Backward compatibility |
| addnode = add |
| allnodes = all_nodes |
| allzeros = leaf_nodes |
| hasnode = contains |
| __contains__ = contains |
| empty = is_empty |
| copy = clone |
| |
| def delnode(self, node): |
| try: |
| self.remove(node) |
| except KeyError: |
| pass |
| |
| def firstzero(self): |
| leaf_nodes = self.leaf_nodes() |
| if leaf_nodes: |
| return leaf_nodes[0] |
| return None |
| |
| def hasallzeros(self, ignore_priority=None): |
| return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \ |
| len(self.order) |
| |
| def debug_print(self): |
| for node in self.nodes: |
| print node, |
| if self.nodes[node][0]: |
| print "depends on" |
| else: |
| print "(no children)" |
| for child in self.nodes[node][0]: |
| print " ",child, |
| print "(%s)" % self.nodes[node][0][child] |
| |
| #parse /etc/env.d and generate /etc/profile.env |
| |
| def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None, |
| env=None, writemsg_level=portage.util.writemsg_level): |
| if target_root is None: |
| global settings |
| target_root = settings["ROOT"] |
| if prev_mtimes is None: |
| global mtimedb |
| prev_mtimes = mtimedb["ldpath"] |
| if env is None: |
| env = os.environ |
| envd_dir = os.path.join(target_root, "etc", "env.d") |
| portage.util.ensure_dirs(envd_dir, mode=0755) |
| fns = listdir(envd_dir, EmptyOnError=1) |
| fns.sort() |
| templist = [] |
| for x in fns: |
| if len(x) < 3: |
| continue |
| if not x[0].isdigit() or not x[1].isdigit(): |
| continue |
| if x.startswith(".") or x.endswith("~") or x.endswith(".bak"): |
| continue |
| templist.append(x) |
| fns = templist |
| del templist |
| |
| space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"]) |
| colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", |
| "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", |
| "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK", |
| "PYTHONPATH", "ROOTPATH"]) |
| |
| config_list = [] |
| |
| for x in fns: |
| file_path = os.path.join(envd_dir, x) |
| try: |
| myconfig = getconfig(file_path, expand=False) |
| except portage.exception.ParseError, e: |
| writemsg("!!! '%s'\n" % str(e), noiselevel=-1) |
| del e |
| continue |
| if myconfig is None: |
| # broken symlink or file removed by a concurrent process |
| writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1) |
| continue |
| config_list.append(myconfig) |
| if "SPACE_SEPARATED" in myconfig: |
| space_separated.update(myconfig["SPACE_SEPARATED"].split()) |
| del myconfig["SPACE_SEPARATED"] |
| if "COLON_SEPARATED" in myconfig: |
| colon_separated.update(myconfig["COLON_SEPARATED"].split()) |
| del myconfig["COLON_SEPARATED"] |
| |
| env = {} |
| specials = {} |
| for var in space_separated: |
| mylist = [] |
| for myconfig in config_list: |
| if var in myconfig: |
| for item in myconfig[var].split(): |
| if item and not item in mylist: |
| mylist.append(item) |
| del myconfig[var] # prepare for env.update(myconfig) |
| if mylist: |
| env[var] = " ".join(mylist) |
| specials[var] = mylist |
| |
| for var in colon_separated: |
| mylist = [] |
| for myconfig in config_list: |
| if var in myconfig: |
| for item in myconfig[var].split(":"): |
| if item and not item in mylist: |
| mylist.append(item) |
| del myconfig[var] # prepare for env.update(myconfig) |
| if mylist: |
| env[var] = ":".join(mylist) |
| specials[var] = mylist |
| |
| for myconfig in config_list: |
| """Cumulative variables have already been deleted from myconfig so that |
| they won't be overwritten by this dict.update call.""" |
| env.update(myconfig) |
| |
| ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf") |
| try: |
| myld = open(ldsoconf_path) |
| myldlines=myld.readlines() |
| myld.close() |
| oldld=[] |
| for x in myldlines: |
| #each line has at least one char (a newline) |
| if x[0]=="#": |
| continue |
| oldld.append(x[:-1]) |
| except (IOError, OSError), e: |
| if e.errno != errno.ENOENT: |
| raise |
| oldld = None |
| |
| ld_cache_update=False |
| |
| newld = specials["LDPATH"] |
| if (oldld!=newld): |
| #ld.so.conf needs updating and ldconfig needs to be run |
| myfd = atomic_ofstream(ldsoconf_path) |
| myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n") |
| myfd.write("# contents of /etc/env.d directory\n") |
| for x in specials["LDPATH"]: |
| myfd.write(x+"\n") |
| myfd.close() |
| ld_cache_update=True |
| |
| # Update prelink.conf if we are prelink-enabled |
| if prelink_capable: |
| newprelink = atomic_ofstream( |
| os.path.join(target_root, "etc", "prelink.conf")) |
| newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n") |
| newprelink.write("# contents of /etc/env.d directory\n") |
| |
| for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]: |
| newprelink.write("-l "+x+"\n"); |
| for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]: |
| if not x: |
| continue |
| if x[-1]!='/': |
| x=x+"/" |
| plmasked=0 |
| for y in specials["PRELINK_PATH_MASK"]: |
| if not y: |
| continue |
| if y[-1]!='/': |
| y=y+"/" |
| if y==x[0:len(y)]: |
| plmasked=1 |
| break |
| if not plmasked: |
| newprelink.write("-h "+x+"\n") |
| for x in specials["PRELINK_PATH_MASK"]: |
| newprelink.write("-b "+x+"\n") |
| newprelink.close() |
| |
| # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer |
| # granularity is possible. In order to avoid the potential ambiguity of |
| # mtimes that differ by less than 1 second, sleep here if any of the |
| # directories have been modified during the current second. |
| sleep_for_mtime_granularity = False |
| current_time = long(time.time()) |
| mtime_changed = False |
| lib_dirs = set() |
| for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']): |
| x = os.path.join(target_root, lib_dir.lstrip(os.sep)) |
| try: |
| newldpathtime = long(os.stat(x).st_mtime) |
| lib_dirs.add(normalize_path(x)) |
| except OSError, oe: |
| if oe.errno == errno.ENOENT: |
| try: |
| del prev_mtimes[x] |
| except KeyError: |
| pass |
| # ignore this path because it doesn't exist |
| continue |
| raise |
| if newldpathtime == current_time: |
| sleep_for_mtime_granularity = True |
| if x in prev_mtimes: |
| if prev_mtimes[x] == newldpathtime: |
| pass |
| else: |
| prev_mtimes[x] = newldpathtime |
| mtime_changed = True |
| else: |
| prev_mtimes[x] = newldpathtime |
| mtime_changed = True |
| |
| if mtime_changed: |
| ld_cache_update = True |
| |
| if makelinks and \ |
| not ld_cache_update and \ |
| contents is not None: |
| libdir_contents_changed = False |
| for mypath, mydata in contents.iteritems(): |
| if mydata[0] not in ("obj","sym"): |
| continue |
| head, tail = os.path.split(mypath) |
| if head in lib_dirs: |
| libdir_contents_changed = True |
| break |
| if not libdir_contents_changed: |
| makelinks = False |
| |
| ldconfig = "/sbin/ldconfig" |
| if "CHOST" in env and "CBUILD" in env and \ |
| env["CHOST"] != env["CBUILD"]: |
| from portage.process import find_binary |
| ldconfig = find_binary("%s-ldconfig" % env["CHOST"]) |
| |
| # Only run ldconfig as needed |
| if (ld_cache_update or makelinks) and ldconfig: |
| # ldconfig has very different behaviour between FreeBSD and Linux |
| if ostype=="Linux" or ostype.lower().endswith("gnu"): |
| # We can't update links if we haven't cleaned other versions first, as |
| # an older package installed ON TOP of a newer version will cause ldconfig |
| # to overwrite the symlinks we just made. -X means no links. After 'clean' |
| # we can safely create links. |
| writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \ |
| (target_root,)) |
| if makelinks: |
| os.system("cd / ; %s -r '%s'" % (ldconfig, target_root)) |
| else: |
| os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root)) |
| elif ostype in ("FreeBSD","DragonFly"): |
| writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \ |
| target_root) |
| os.system(("cd / ; %s -elf -i " + \ |
| "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \ |
| (ldconfig, target_root, target_root)) |
| |
| del specials["LDPATH"] |
| |
| penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n" |
| penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n" |
| cenvnotice = penvnotice[:] |
| penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n" |
| cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n" |
| |
| #create /etc/profile.env for bash support |
| outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env")) |
| outfile.write(penvnotice) |
| |
| env_keys = [ x for x in env if x != "LDPATH" ] |
| env_keys.sort() |
| for k in env_keys: |
| v = env[k] |
| if v.startswith('$') and not v.startswith('${'): |
| outfile.write("export %s=$'%s'\n" % (k, v[1:])) |
| else: |
| outfile.write("export %s='%s'\n" % (k, v)) |
| outfile.close() |
| |
| #create /etc/csh.env for (t)csh support |
| outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env")) |
| outfile.write(cenvnotice) |
| for x in env_keys: |
| outfile.write("setenv %s '%s'\n" % (x, env[x])) |
| outfile.close() |
| |
| if sleep_for_mtime_granularity: |
| while current_time == long(time.time()): |
| sleep(1) |
| |
| def ExtractKernelVersion(base_dir): |
| """ |
| Try to figure out what kernel version we are running |
| @param base_dir: Path to sources (usually /usr/src/linux) |
| @type base_dir: string |
| @rtype: tuple( version[string], error[string]) |
| @returns: |
| 1. tuple( version[string], error[string]) |
| Either version or error is populated (but never both) |
| |
| """ |
| lines = [] |
| pathname = os.path.join(base_dir, 'Makefile') |
| try: |
| f = open(pathname, 'r') |
| except OSError, details: |
| return (None, str(details)) |
| except IOError, details: |
| return (None, str(details)) |
| |
| try: |
| for i in range(4): |
| lines.append(f.readline()) |
| except OSError, details: |
| return (None, str(details)) |
| except IOError, details: |
| return (None, str(details)) |
| |
| lines = [l.strip() for l in lines] |
| |
| version = '' |
| |
| #XXX: The following code relies on the ordering of vars within the Makefile |
| for line in lines: |
| # split on the '=' then remove annoying whitespace |
| items = line.split("=") |
| items = [i.strip() for i in items] |
| if items[0] == 'VERSION' or \ |
| items[0] == 'PATCHLEVEL': |
| version += items[1] |
| version += "." |
| elif items[0] == 'SUBLEVEL': |
| version += items[1] |
| elif items[0] == 'EXTRAVERSION' and \ |
| items[-1] != items[0]: |
| version += items[1] |
| |
| # Grab a list of files named localversion* and sort them |
| localversions = os.listdir(base_dir) |
| for x in range(len(localversions)-1,-1,-1): |
| if localversions[x][:12] != "localversion": |
| del localversions[x] |
| localversions.sort() |
| |
| # Append the contents of each to the version string, stripping ALL whitespace |
| for lv in localversions: |
| version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() ) |
| |
| # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace |
| kernelconfig = getconfig(base_dir+"/.config") |
| if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig: |
| version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split()) |
| |
| return (version,None) |
| |
| def autouse(myvartree, use_cache=1, mysettings=None): |
| """ |
| autuse returns a list of USE variables auto-enabled to packages being installed |
| |
| @param myvartree: Instance of the vartree class (from /var/db/pkg...) |
| @type myvartree: vartree |
| @param use_cache: read values from cache |
| @type use_cache: Boolean |
| @param mysettings: Instance of config |
| @type mysettings: config |
| @rtype: string |
| @returns: A string containing a list of USE variables that are enabled via use.defaults |
| """ |
| if mysettings is None: |
| global settings |
| mysettings = settings |
| if mysettings.profile_path is None: |
| return "" |
| myusevars="" |
| usedefaults = mysettings.use_defs |
| for myuse in usedefaults: |
| dep_met = True |
| for mydep in usedefaults[myuse]: |
| if not myvartree.dep_match(mydep,use_cache=True): |
| dep_met = False |
| break |
| if dep_met: |
| myusevars += " "+myuse |
| return myusevars |
| |
| def check_config_instance(test): |
| if not isinstance(test, config): |
| raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config)) |
| |
| class config(object): |
| """ |
| This class encompasses the main portage configuration. Data is pulled from |
| ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all |
| parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified |
| overrides. |
| |
| Generally if you need data like USE flags, FEATURES, environment variables, |
| virtuals ...etc you look in here. |
| """ |
| |
| _environ_whitelist = [] |
| |
| # Whitelisted variables are always allowed to enter the ebuild |
| # environment. Generally, this only includes special portage |
| # variables. Ebuilds can unset variables that are not whitelisted |
| # and rely on them remaining unset for future phases, without them |
| # leaking back in from various locations (bug #189417). It's very |
| # important to set our special BASH_ENV variable in the ebuild |
| # environment in order to prevent sandbox from sourcing /etc/profile |
| # in it's bashrc (causing major leakage). |
| _environ_whitelist += [ |
| "BASH_ENV", "BUILD_PREFIX", "D", |
| "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD", |
| "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST", |
| "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM", |
| "FEATURES", "FILESDIR", "HOME", "PATH", |
| "PKGDIR", |
| "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR", |
| "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST", |
| "PORTAGE_BASHRC", |
| "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS", |
| "PORTAGE_BINPKG_TMPFILE", |
| "PORTAGE_BIN_PATH", |
| "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP", |
| "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR", |
| "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID", |
| "PORTAGE_IUSE", |
| "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID", |
| "PORTAGE_PYM_PATH", "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT", |
| "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_WORKDIR_MODE", |
| "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS", |
| "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR", |
| "USE_EXPAND", "USE_ORDER", "WORKDIR", |
| "XARGS", |
| ] |
| |
| # user config variables |
| _environ_whitelist += [ |
| "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK" |
| ] |
| |
| _environ_whitelist += [ |
| "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR" |
| ] |
| |
| # misc variables inherited from the calling environment |
| _environ_whitelist += [ |
| "COLORTERM", "DISPLAY", "EDITOR", "LESS", |
| "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER", |
| "TERM", "TERMCAP", "USER", |
| ] |
| |
| # other variables inherited from the calling environment |
| _environ_whitelist += [ |
| "CVS_RSH", "ECHANGELOG_USER", |
| "GPG_AGENT_INFO", |
| "SSH_AGENT_PID", "SSH_AUTH_SOCK", |
| "STY", "WINDOW", "XAUTHORITY", |
| ] |
| |
| _environ_whitelist = frozenset(_environ_whitelist) |
| |
| _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*') |
| |
| # Filter selected variables in the config.environ() method so that |
| # they don't needlessly propagate down into the ebuild environment. |
| _environ_filter = [] |
| |
| # misc variables inherited from the calling environment |
| _environ_filter += [ |
| "INFOPATH", "MANPATH", |
| ] |
| |
| # portage config variables and variables set directly by portage |
| _environ_filter += [ |
| "ACCEPT_KEYWORDS", "AUTOCLEAN", |
| "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT", |
| "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS", |
| "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP", |
| "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP", |
| "GENTOO_MIRRORS", "NOCONFMEM", "O", |
| "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER", |
| "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES", |
| "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT", |
| "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM", |
| "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE", |
| "PORTAGE_GPG_DIR", |
| "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND", |
| "PORTAGE_PACKAGE_EMPTY_ABORT", |
| "PORTAGE_RO_DISTDIRS", |
| "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS", |
| "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR", |
| "QUICKPKG_DEFAULT_OPTS", |
| "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP", |
| "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER", |
| ] |
| |
| _environ_filter = frozenset(_environ_filter) |
| |
| def __init__(self, clone=None, mycpv=None, config_profile_path=None, |
| config_incrementals=None, config_root=None, target_root=None, |
| local_config=True): |
| """ |
| @param clone: If provided, init will use deepcopy to copy by value the instance. |
| @type clone: Instance of config class. |
| @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None |
| and then calling instance.setcpv(mycpv). |
| @type mycpv: String |
| @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const) |
| @type config_profile_path: String |
| @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS) |
| @type config_incrementals: List |
| @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT) |
| @type config_root: String |
| @param target_root: __init__ override of $ROOT env variable. |
| @type target_root: String |
| @param local_config: Enables loading of local config (/etc/portage); used most by repoman to |
| ignore local config (keywording and unmasking) |
| @type local_config: Boolean |
| """ |
| |
| # When initializing the global portage.settings instance, avoid |
| # raising exceptions whenever possible since exceptions thrown |
| # from 'import portage' or 'import portage.exceptions' statements |
| # can practically render the api unusable for api consumers. |
| tolerant = "_initializing_globals" in globals() |
| |
| self.already_in_regenerate = 0 |
| |
| self.locked = 0 |
| self.mycpv = None |
| self.puse = [] |
| self.modifiedkeys = [] |
| self.uvlist = [] |
| self._accept_chost_re = None |
| |
| self.virtuals = {} |
| self.virts_p = {} |
| self.dirVirtuals = None |
| self.v_count = 0 |
| |
| # Virtuals obtained from the vartree |
| self.treeVirtuals = {} |
| # Virtuals by user specification. Includes negatives. |
| self.userVirtuals = {} |
| # Virtual negatives from user specifications. |
| self.negVirtuals = {} |
| # Virtuals added by the depgraph via self.setinst(). |
| self._depgraphVirtuals = {} |
| |
| self.user_profile_dir = None |
| self.local_config = local_config |
| |
| if clone: |
| self.incrementals = copy.deepcopy(clone.incrementals) |
| self.profile_path = copy.deepcopy(clone.profile_path) |
| self.user_profile_dir = copy.deepcopy(clone.user_profile_dir) |
| self.local_config = copy.deepcopy(clone.local_config) |
| |
| self.module_priority = copy.deepcopy(clone.module_priority) |
| self.modules = copy.deepcopy(clone.modules) |
| |
| self.depcachedir = copy.deepcopy(clone.depcachedir) |
| |
| self.packages = copy.deepcopy(clone.packages) |
| self.virtuals = copy.deepcopy(clone.virtuals) |
| |
| self.dirVirtuals = copy.deepcopy(clone.dirVirtuals) |
| self.treeVirtuals = copy.deepcopy(clone.treeVirtuals) |
| self.userVirtuals = copy.deepcopy(clone.userVirtuals) |
| self.negVirtuals = copy.deepcopy(clone.negVirtuals) |
| self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals) |
| |
| self.use_defs = copy.deepcopy(clone.use_defs) |
| self.usemask = copy.deepcopy(clone.usemask) |
| self.usemask_list = copy.deepcopy(clone.usemask_list) |
| self.pusemask_list = copy.deepcopy(clone.pusemask_list) |
| self.useforce = copy.deepcopy(clone.useforce) |
| self.useforce_list = copy.deepcopy(clone.useforce_list) |
| self.puseforce_list = copy.deepcopy(clone.puseforce_list) |
| self.puse = copy.deepcopy(clone.puse) |
| self.make_defaults_use = copy.deepcopy(clone.make_defaults_use) |
| self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse) |
| self.mycpv = copy.deepcopy(clone.mycpv) |
| |
| self.configlist = copy.deepcopy(clone.configlist) |
| self.lookuplist = self.configlist[:] |
| self.lookuplist.reverse() |
| self.configdict = { |
| "env.d": self.configlist[0], |
| "pkginternal": self.configlist[1], |
| "globals": self.configlist[2], |
| "defaults": self.configlist[3], |
| "conf": self.configlist[4], |
| "pkg": self.configlist[5], |
| "auto": self.configlist[6], |
| "backupenv": self.configlist[7], |
| "env": self.configlist[8] } |
| self.profiles = copy.deepcopy(clone.profiles) |
| self.backupenv = self.configdict["backupenv"] |
| self.pusedict = copy.deepcopy(clone.pusedict) |
| self.categories = copy.deepcopy(clone.categories) |
| self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict) |
| self.pmaskdict = copy.deepcopy(clone.pmaskdict) |
| self.punmaskdict = copy.deepcopy(clone.punmaskdict) |
| self.prevmaskdict = copy.deepcopy(clone.prevmaskdict) |
| self.pprovideddict = copy.deepcopy(clone.pprovideddict) |
| self.features = copy.deepcopy(clone.features) |
| |
| self._accept_license = copy.deepcopy(clone._accept_license) |
| self._plicensedict = copy.deepcopy(clone._plicensedict) |
| else: |
| |
| def check_var_directory(varname, var): |
| if not os.path.isdir(var): |
| writemsg(("!!! Error: %s='%s' is not a directory. " + \ |
| "Please correct this.\n") % (varname, var), |
| noiselevel=-1) |
| raise portage.exception.DirectoryNotFound(var) |
| |
| if config_root is None: |
| config_root = "/" |
| |
| config_root = normalize_path(os.path.abspath( |
| config_root)).rstrip(os.path.sep) + os.path.sep |
| |
| check_var_directory("PORTAGE_CONFIGROOT", config_root) |
| |
| self.depcachedir = DEPCACHE_PATH |
| |
| if not config_profile_path: |
| config_profile_path = \ |
| os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep)) |
| if os.path.isdir(config_profile_path): |
| self.profile_path = config_profile_path |
| else: |
| self.profile_path = None |
| else: |
| self.profile_path = config_profile_path[:] |
| |
| if not config_incrementals: |
| writemsg("incrementals not specified to class config\n") |
| self.incrementals = copy.deepcopy(portage.const.INCREMENTALS) |
| else: |
| self.incrementals = copy.deepcopy(config_incrementals) |
| |
| self.module_priority = ["user","default"] |
| self.modules = {} |
| self.modules["user"] = getconfig( |
| os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep))) |
| if self.modules["user"] is None: |
| self.modules["user"] = {} |
| self.modules["default"] = { |
| "portdbapi.metadbmodule": "portage.cache.metadata.database", |
| "portdbapi.auxdbmodule": "portage.cache.flat_hash.database", |
| } |
| |
| self.usemask=[] |
| self.configlist=[] |
| |
| # back up our incremental variables: |
| self.configdict={} |
| # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ] |
| self.configlist.append({}) |
| self.configdict["env.d"] = self.configlist[-1] |
| |
| self.configlist.append({}) |
| self.configdict["pkginternal"] = self.configlist[-1] |
| |
| # The symlink might not exist or might not be a symlink. |
| if self.profile_path is None: |
| self.profiles = [] |
| else: |
| self.profiles = [] |
| def addProfile(currentPath): |
| parentsFile = os.path.join(currentPath, "parent") |
| if os.path.exists(parentsFile): |
| parents = grabfile(parentsFile) |
| if not parents: |
| raise portage.exception.ParseError( |
| "Empty parent file: '%s'" % parentsFile) |
| for parentPath in parents: |
| parentPath = normalize_path(os.path.join( |
| currentPath, parentPath)) |
| if os.path.exists(parentPath): |
| addProfile(parentPath) |
| else: |
| raise portage.exception.ParseError( |
| "Parent '%s' not found: '%s'" % \ |
| (parentPath, parentsFile)) |
| self.profiles.append(currentPath) |
| try: |
| addProfile(os.path.realpath(self.profile_path)) |
| except portage.exception.ParseError, e: |
| writemsg("!!! Unable to parse profile: '%s'\n" % \ |
| self.profile_path, noiselevel=-1) |
| writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1) |
| del e |
| self.profiles = [] |
| if local_config: |
| custom_prof = os.path.join( |
| config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep)) |
| if os.path.exists(custom_prof): |
| self.user_profile_dir = custom_prof |
| self.profiles.append(custom_prof) |
| del custom_prof |
| |
| self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles] |
| self.packages = stack_lists(self.packages_list, incremental=1) |
| del self.packages_list |
| #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1) |
| |
| # revmaskdict |
| self.prevmaskdict={} |
| for x in self.packages: |
| mycatpkg=dep_getkey(x) |
| if mycatpkg not in self.prevmaskdict: |
| self.prevmaskdict[mycatpkg]=[x] |
| else: |
| self.prevmaskdict[mycatpkg].append(x) |
| |
| # get profile-masked use flags -- INCREMENTAL Child over parent |
| self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \ |
| for x in self.profiles] |
| self.usemask = set(stack_lists( |
| self.usemask_list, incremental=True)) |
| use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles] |
| self.use_defs = stack_dictlist(use_defs_lists, incremental=True) |
| del use_defs_lists |
| |
| self.pusemask_list = [] |
| rawpusemask = [grabdict_package( |
| os.path.join(x, "package.use.mask")) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawpusemask[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.pusemask_list.append(cpdict) |
| del rawpusemask |
| |
| self.pkgprofileuse = [] |
| rawprofileuse = [grabdict_package( |
| os.path.join(x, "package.use"), juststrings=True) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawprofileuse[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.pkgprofileuse.append(cpdict) |
| del rawprofileuse |
| |
| self.useforce_list = [grabfile(os.path.join(x, "use.force")) \ |
| for x in self.profiles] |
| self.useforce = set(stack_lists( |
| self.useforce_list, incremental=True)) |
| |
| self.puseforce_list = [] |
| rawpuseforce = [grabdict_package( |
| os.path.join(x, "package.use.force")) \ |
| for x in self.profiles] |
| for i in xrange(len(self.profiles)): |
| cpdict = {} |
| for k, v in rawpuseforce[i].iteritems(): |
| cpdict.setdefault(dep_getkey(k), {})[k] = v |
| self.puseforce_list.append(cpdict) |
| del rawpuseforce |
| |
| make_conf = getconfig( |
| os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)), |
| tolerant=tolerant, allow_sourcing=True) |
| if make_conf is None: |
| make_conf = {} |
| |
| # Allow ROOT setting to come from make.conf if it's not overridden |
| # by the constructor argument (from the calling environment). |
| if target_root is None and "ROOT" in make_conf: |
| target_root = make_conf["ROOT"] |
| if not target_root.strip(): |
| target_root = None |
| if target_root is None: |
| target_root = "/" |
| |
| target_root = normalize_path(os.path.abspath( |
| target_root)).rstrip(os.path.sep) + os.path.sep |
| |
| portage.util.ensure_dirs(target_root) |
| check_var_directory("ROOT", target_root) |
| |
| # The expand_map is used for variable substitution |
| # in getconfig() calls, and the getconfig() calls |
| # update expand_map with the value of each variable |
| # assignment that occurs. Variable substitution occurs |
| # in the following order, which corresponds to the |
| # order of appearance in self.lookuplist: |
| # |
| # * env.d |
| # * make.globals |
| # * make.defaults |
| # * make.conf |
| # |
| # Notably absent is "env", since we want to avoid any |
| # interaction with the calling environment that might |
| # lead to unexpected results. |
| expand_map = {} |
| |
| env_d = getconfig(os.path.join(target_root, "etc", "profile.env"), |
| expand=expand_map) |
| # env_d will be None if profile.env doesn't exist. |
| if env_d: |
| self.configdict["env.d"].update(env_d) |
| expand_map.update(env_d) |
| |
| # backupenv is used for calculating incremental variables. |
| self.backupenv = os.environ.copy() |
| |
| if env_d: |
| # Remove duplicate values so they don't override updated |
| # profile.env values later (profile.env is reloaded in each |
| # call to self.regenerate). |
| for k, v in env_d.iteritems(): |
| try: |
| if self.backupenv[k] == v: |
| del self.backupenv[k] |
| except KeyError: |
| pass |
| del k, v |
| |
| self.configdict["env"] = self.backupenv.copy() |
| |
| # make.globals should not be relative to config_root |
| # because it only contains constants. |
| self.mygcfg = getconfig(os.path.join("/etc", "make.globals"), |
| expand=expand_map) |
| |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| |
| self.configlist.append(self.mygcfg) |
| self.configdict["globals"]=self.configlist[-1] |
| |
| self.make_defaults_use = [] |
| self.mygcfg = {} |
| if self.profiles: |
| mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"), |
| expand=expand_map) for x in self.profiles] |
| |
| for cfg in mygcfg_dlists: |
| if cfg: |
| self.make_defaults_use.append(cfg.get("USE", "")) |
| else: |
| self.make_defaults_use.append("") |
| self.mygcfg = stack_dicts(mygcfg_dlists, |
| incrementals=portage.const.INCREMENTALS, ignore_none=1) |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| self.configlist.append(self.mygcfg) |
| self.configdict["defaults"]=self.configlist[-1] |
| |
| self.mygcfg = getconfig( |
| os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)), |
| tolerant=tolerant, allow_sourcing=True, expand=expand_map) |
| if self.mygcfg is None: |
| self.mygcfg = {} |
| |
| # Don't allow the user to override certain variables in make.conf |
| profile_only_variables = self.configdict["defaults"].get( |
| "PROFILE_ONLY_VARIABLES", "").split() |
| for k in profile_only_variables: |
| self.mygcfg.pop(k, None) |
| |
| self.configlist.append(self.mygcfg) |
| self.configdict["conf"]=self.configlist[-1] |
| |
| self.configlist.append({}) |
| self.configdict["pkg"]=self.configlist[-1] |
| |
| #auto-use: |
| self.configlist.append({}) |
| self.configdict["auto"]=self.configlist[-1] |
| |
| self.configlist.append(self.backupenv) # XXX Why though? |
| self.configdict["backupenv"]=self.configlist[-1] |
| |
| # Don't allow the user to override certain variables in the env |
| for k in profile_only_variables: |
| self.backupenv.pop(k, None) |
| |
| self.configlist.append(self.configdict["env"]) |
| |
| # make lookuplist for loading package.* |
| self.lookuplist=self.configlist[:] |
| self.lookuplist.reverse() |
| |
| # Blacklist vars that could interfere with portage internals. |
| for blacklisted in "CATEGORY", "EBUILD_PHASE", \ |
| "EMERGE_FROM", "PKGUSE", "PORTAGE_CONFIGROOT", \ |
| "PORTAGE_IUSE", "PORTAGE_USE", "ROOT": |
| for cfg in self.lookuplist: |
| cfg.pop(blacklisted, None) |
| del blacklisted, cfg |
| |
| self["PORTAGE_CONFIGROOT"] = config_root |
| self.backup_changes("PORTAGE_CONFIGROOT") |
| self["ROOT"] = target_root |
| self.backup_changes("ROOT") |
| |
| self.pusedict = {} |
| self.pkeywordsdict = {} |
| self._plicensedict = {} |
| self.punmaskdict = {} |
| abs_user_config = os.path.join(config_root, |
| USER_CONFIG_PATH.lstrip(os.path.sep)) |
| |
| # locations for "categories" and "arch.list" files |
| locations = [os.path.join(self["PORTDIR"], "profiles")] |
| pmask_locations = [os.path.join(self["PORTDIR"], "profiles")] |
| pmask_locations.extend(self.profiles) |
| |
| """ repoman controls PORTDIR_OVERLAY via the environment, so no |
| special cases are needed here.""" |
| overlay_profiles = [] |
| for ov in self["PORTDIR_OVERLAY"].split(): |
| ov = normalize_path(ov) |
| profiles_dir = os.path.join(ov, "profiles") |
| if os.path.isdir(profiles_dir): |
| overlay_profiles.append(profiles_dir) |
| locations += overlay_profiles |
| |
| pmask_locations.extend(overlay_profiles) |
| |
| if local_config: |
| locations.append(abs_user_config) |
| pmask_locations.append(abs_user_config) |
| pusedict = grabdict_package( |
| os.path.join(abs_user_config, "package.use"), recursive=1) |
| for key in pusedict.keys(): |
| cp = dep_getkey(key) |
| if cp not in self.pusedict: |
| self.pusedict[cp] = {} |
| self.pusedict[cp][key] = pusedict[key] |
| |
| #package.keywords |
| pkgdict = grabdict_package( |
| os.path.join(abs_user_config, "package.keywords"), |
| recursive=1) |
| for key in pkgdict.keys(): |
| # default to ~arch if no specific keyword is given |
| if not pkgdict[key]: |
| mykeywordlist = [] |
| if self.configdict["defaults"] and \ |
| "ACCEPT_KEYWORDS" in self.configdict["defaults"]: |
| groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split() |
| else: |
| groups = [] |
| for keyword in groups: |
| if not keyword[0] in "~-": |
| mykeywordlist.append("~"+keyword) |
| pkgdict[key] = mykeywordlist |
| cp = dep_getkey(key) |
| if cp not in self.pkeywordsdict: |
| self.pkeywordsdict[cp] = {} |
| self.pkeywordsdict[cp][key] = pkgdict[key] |
| |
| #package.license |
| licdict = grabdict_package(os.path.join( |
| abs_user_config, "package.license"), recursive=1) |
| for k, v in licdict.iteritems(): |
| cp = dep_getkey(k) |
| cp_dict = self._plicensedict.get(cp) |
| if not cp_dict: |
| cp_dict = {} |
| self._plicensedict[cp] = cp_dict |
| cp_dict[k] = self.expandLicenseTokens(v) |
| |
| #package.unmask |
| pkgunmasklines = grabfile_package( |
| os.path.join(abs_user_config, "package.unmask"), |
| recursive=1) |
| for x in pkgunmasklines: |
| mycatpkg=dep_getkey(x) |
| if mycatpkg in self.punmaskdict: |
| self.punmaskdict[mycatpkg].append(x) |
| else: |
| self.punmaskdict[mycatpkg]=[x] |
| |
| #getting categories from an external file now |
| categories = [grabfile(os.path.join(x, "categories")) for x in locations] |
| self.categories = stack_lists(categories, incremental=1) |
| del categories |
| |
| archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations] |
| archlist = stack_lists(archlist, incremental=1) |
| self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist) |
| |
| #package.mask |
| pkgmasklines = [] |
| for x in pmask_locations: |
| pkgmasklines.append(grabfile_package( |
| os.path.join(x, "package.mask"), recursive=1)) |
| pkgmasklines = stack_lists(pkgmasklines, incremental=1) |
| |
| self.pmaskdict = {} |
| for x in pkgmasklines: |
| mycatpkg=dep_getkey(x) |
| if mycatpkg in self.pmaskdict: |
| self.pmaskdict[mycatpkg].append(x) |
| else: |
| self.pmaskdict[mycatpkg]=[x] |
| |
| pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles] |
| pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1) |
| has_invalid_data = False |
| for x in range(len(pkgprovidedlines)-1, -1, -1): |
| myline = pkgprovidedlines[x] |
| if not isvalidatom("=" + myline): |
| writemsg("Invalid package name in package.provided:" + \ |
| " %s\n" % myline, noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| cpvr = catpkgsplit(pkgprovidedlines[x]) |
| if not cpvr or cpvr[0] == "null": |
| writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n", |
| noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| if cpvr[0] == "virtual": |
| writemsg("Virtual package in package.provided: %s\n" % \ |
| myline, noiselevel=-1) |
| has_invalid_data = True |
| del pkgprovidedlines[x] |
| continue |
| if has_invalid_data: |
| writemsg("See portage(5) for correct package.provided usage.\n", |
| noiselevel=-1) |
| self.pprovideddict = {} |
| for x in pkgprovidedlines: |
| cpv=catpkgsplit(x) |
| if not x: |
| continue |
| mycatpkg=dep_getkey(x) |
| if mycatpkg in self.pprovideddict: |
| self.pprovideddict[mycatpkg].append(x) |
| else: |
| self.pprovideddict[mycatpkg]=[x] |
| |
| # parse licensegroups |
| self._license_groups = {} |
| for x in locations: |
| self._license_groups.update( |
| grabdict(os.path.join(x, "license_groups"))) |
| |
| # reasonable defaults; this is important as without USE_ORDER, |
| # USE will always be "" (nothing set)! |
| if "USE_ORDER" not in self: |
| self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d" |
| |
| self["PORTAGE_GID"] = str(portage_gid) |
| self.backup_changes("PORTAGE_GID") |
| |
| if self.get("PORTAGE_DEPCACHEDIR", None): |
| self.depcachedir = self["PORTAGE_DEPCACHEDIR"] |
| self["PORTAGE_DEPCACHEDIR"] = self.depcachedir |
| self.backup_changes("PORTAGE_DEPCACHEDIR") |
| |
| overlays = self.get("PORTDIR_OVERLAY","").split() |
| if overlays: |
| new_ov = [] |
| for ov in overlays: |
| ov = normalize_path(ov) |
| if os.path.isdir(ov): |
| new_ov.append(ov) |
| else: |
| writemsg("!!! Invalid PORTDIR_OVERLAY" + \ |
| " (not a dir): '%s'\n" % ov, noiselevel=-1) |
| self["PORTDIR_OVERLAY"] = " ".join(new_ov) |
| self.backup_changes("PORTDIR_OVERLAY") |
| |
| if "CBUILD" not in self and "CHOST" in self: |
| self["CBUILD"] = self["CHOST"] |
| self.backup_changes("CBUILD") |
| |
| self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH |
| self.backup_changes("PORTAGE_BIN_PATH") |
| self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH |
| self.backup_changes("PORTAGE_PYM_PATH") |
| |
| # Expand license groups |
| # This has to do be done for each config layer before regenerate() |
| # in order for incremental negation to work properly. |
| if local_config: |
| for c in self.configdict.itervalues(): |
| v = c.get("ACCEPT_LICENSE") |
| if not v: |
| continue |
| v = " ".join(self.expandLicenseTokens(v.split())) |
| c["ACCEPT_LICENSE"] = v |
| del c, v |
| |
| for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"): |
| try: |
| self[var] = str(int(self.get(var, "0"))) |
| except ValueError: |
| writemsg(("!!! %s='%s' is not a valid integer. " + \ |
| "Falling back to '0'.\n") % (var, self[var]), |
| noiselevel=-1) |
| self[var] = "0" |
| self.backup_changes(var) |
| |
| # initialize self.features |
| self.regenerate() |
| |
| if local_config: |
| self._accept_license = \ |
| set(self.get("ACCEPT_LICENSE", "").split()) |
| # In order to enforce explicit acceptance for restrictive |
| # licenses that require it, "*" will not be allowed in the |
| # user config. Don't enforce this until license groups are |
| # fully implemented in the tree. |
| #self._accept_license.discard("*") |
| if not self._accept_license: |
| self._accept_license = set(["*"]) |
| else: |
| # repoman will accept any license |
| self._accept_license = set(["*"]) |
| |
| if "gpg" in self.features: |
| if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \ |
| not os.path.isdir(self["PORTAGE_GPG_DIR"]): |
| writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \ |
| " Removing gpg from FEATURES.\n"), noiselevel=-1) |
| self.features.remove("gpg") |
| |
| if not portage.process.sandbox_capable and \ |
| ("sandbox" in self.features or "usersandbox" in self.features): |
| if self.profile_path is not None and \ |
| os.path.realpath(self.profile_path) == \ |
| os.path.realpath(PROFILE_PATH): |
| """ Don't show this warning when running repoman and the |
| sandbox feature came from a profile that doesn't belong to |
| the user.""" |
| writemsg(colorize("BAD", "!!! Problem with sandbox" + \ |
| " binary. Disabling...\n\n"), noiselevel=-1) |
| if "sandbox" in self.features: |
| self.features.remove("sandbox") |
| if "usersandbox" in self.features: |
| self.features.remove("usersandbox") |
| |
| self.features.sort() |
| if "gpg" in self.features: |
| writemsg(colorize("WARN", "!!! FEATURES=gpg is unmaintained, incomplete and broken. Disabling it."), noiselevel=-1) |
| self.features.remove("gpg") |
| self["FEATURES"] = " ".join(self.features) |
| self.backup_changes("FEATURES") |
| |
| self._init_dirs() |
| |
| if mycpv: |
| self.setcpv(mycpv) |
| |
| def _init_dirs(self): |
| """ |
| Create a few directories that are critical to portage operation |
| """ |
| if not os.access(self["ROOT"], os.W_OK): |
| return |
| |
| # gid, mode, mask, preserve_perms |
| dir_mode_map = { |
| "tmp" : ( -1, 01777, 0, True), |
| "var/tmp" : ( -1, 01777, 0, True), |
| PRIVATE_PATH : ( portage_gid, 02750, 02, False), |
| CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False) |
| } |
| |
| for mypath, (gid, mode, modemask, preserve_perms) \ |
| in dir_mode_map.iteritems(): |
| mydir = os.path.join(self["ROOT"], mypath) |
| if preserve_perms and os.path.isdir(mydir): |
| # Only adjust permissions on some directories if |
| # they don't exist yet. This gives freedom to the |
| # user to adjust permissions to suit their taste. |
| continue |
| try: |
| portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask) |
| except portage.exception.PortageException, e: |
| writemsg("!!! Directory initialization failed: '%s'\n" % mydir, |
| noiselevel=-1) |
| writemsg("!!! %s\n" % str(e), |
| noiselevel=-1) |
| |
| def expandLicenseTokens(self, tokens): |
| """ Take a token from ACCEPT_LICENSE or package.license and expand it |
| if it's a group token (indicated by @) or just return it if it's not a |
| group. If a group is negated then negate all group elements.""" |
| expanded_tokens = [] |
| for x in tokens: |
| expanded_tokens.extend(self._expandLicenseToken(x, None)) |
| return expanded_tokens |
| |
| def _expandLicenseToken(self, token, traversed_groups): |
| negate = False |
| rValue = [] |
| if token.startswith("-"): |
| negate = True |
| license_name = token[1:] |
| else: |
| license_name = token |
| if not license_name.startswith("@"): |
| rValue.append(token) |
| return rValue |
| group_name = license_name[1:] |
| if not traversed_groups: |
| traversed_groups = set() |
| license_group = self._license_groups.get(group_name) |
| if group_name in traversed_groups: |
| writemsg(("Circular license group reference" + \ |
| " detected in '%s'\n") % group_name, noiselevel=-1) |
| rValue.append("@"+group_name) |
| elif license_group: |
| traversed_groups.add(group_name) |
| for l in license_group: |
| if l.startswith("-"): |
| writemsg(("Skipping invalid element %s" + \ |
| " in license group '%s'\n") % (l, group_name), |
| noiselevel=-1) |
| else: |
| rValue.extend(self._expandLicenseToken(l, traversed_groups)) |
| else: |
| writemsg("Undefined license group '%s'\n" % group_name, |
| noiselevel=-1) |
| rValue.append("@"+group_name) |
| if negate: |
| rValue = ["-" + token for token in rValue] |
| return rValue |
| |
| def validate(self): |
| """Validate miscellaneous settings and display warnings if necessary. |
| (This code was previously in the global scope of portage.py)""" |
| |
| groups = self["ACCEPT_KEYWORDS"].split() |
| archlist = self.archlist() |
| if not archlist: |
| writemsg("--- 'profiles/arch.list' is empty or " + \ |
| "not available. Empty portage tree?\n", noiselevel=1) |
| else: |
| for group in groups: |
| if group not in archlist and \ |
| not (group.startswith("-") and group[1:] in archlist) and \ |
| group not in ("*", "~*", "**"): |
| writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group), |
| noiselevel=-1) |
| |
| abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"], |
| PROFILE_PATH.lstrip(os.path.sep)) |
| if not self.profile_path or (not os.path.islink(abs_profile_path) and \ |
| not os.path.exists(os.path.join(abs_profile_path, "parent")) and \ |
| os.path.exists(os.path.join(self["PORTDIR"], "profiles"))): |
| writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path, |
| noiselevel=-1) |
| writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"]) |
| writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n") |
| |
| abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"], |
| USER_VIRTUALS_FILE.lstrip(os.path.sep)) |
| if os.path.exists(abs_user_virtuals): |
| writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n") |
| writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n") |
| writemsg("!!! this new location.\n\n") |
| |
| def loadVirtuals(self,root): |
| """Not currently used by portage.""" |
| writemsg("DEPRECATED: portage.config.loadVirtuals\n") |
| self.getvirtuals(root) |
| |
| def load_best_module(self,property_string): |
| best_mod = best_from_dict(property_string,self.modules,self.module_priority) |
| mod = None |
| try: |
| mod = load_mod(best_mod) |
| except ImportError: |
| if best_mod.startswith("cache."): |
| best_mod = "portage." + best_mod |
| try: |
| mod = load_mod(best_mod) |
| except ImportError: |
| pass |
| if mod is None: |
| raise |
| return mod |
| |
| def lock(self): |
| self.locked = 1 |
| |
| def unlock(self): |
| self.locked = 0 |
| |
| def modifying(self): |
| if self.locked: |
| raise Exception("Configuration is locked.") |
| |
| def backup_changes(self,key=None): |
| self.modifying() |
| if key and key in self.configdict["env"]: |
| self.backupenv[key] = copy.deepcopy(self.configdict["env"][key]) |
| else: |
| raise KeyError("No such key defined in environment: %s" % key) |
| |
| def reset(self,keeping_pkg=0,use_cache=1): |
| """ |
| Restore environment from self.backupenv, call self.regenerate() |
| @param keeping_pkg: Should we keep the set_cpv() data or delete it. |
| @type keeping_pkg: Boolean |
| @param use_cache: Should self.regenerate use the cache or not |
| @type use_cache: Boolean |
| @rype: None |
| """ |
| self.modifying() |
| self.configdict["env"].clear() |
| self.configdict["env"].update(self.backupenv) |
| |
| self.modifiedkeys = [] |
| if not keeping_pkg: |
| self.mycpv = None |
| self.puse = "" |
| self.configdict["pkg"].clear() |
| self.configdict["pkginternal"].clear() |
| self.configdict["defaults"]["USE"] = \ |
| " ".join(self.make_defaults_use) |
| self.usemask = set(stack_lists( |
| self.usemask_list, incremental=True)) |
| self.useforce = set(stack_lists( |
| self.useforce_list, incremental=True)) |
| self.regenerate(use_cache=use_cache) |
| |
| def load_infodir(self,infodir): |
| self.modifying() |
| backup_pkg_metadata = dict(self.configdict["pkg"].iteritems()) |
| if "pkg" in self.configdict and \ |
| "CATEGORY" in self.configdict["pkg"]: |
| self.configdict["pkg"].clear() |
| self.configdict["pkg"]["CATEGORY"] = \ |
| backup_pkg_metadata["CATEGORY"] |
| else: |
| raise portage.exception.PortageException( |
| "No pkg setup for settings instance?") |
| |
| retval = 0 |
| found_category_file = False |
| if os.path.isdir(infodir): |
| if os.path.exists(infodir+"/environment"): |
| self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment" |
| |
| myre = re.compile('^[A-Z]+$') |
| null_byte = "\0" |
| for filename in listdir(infodir,filesonly=1,EmptyOnError=1): |
| if filename == "FEATURES": |
| # FEATURES from the build host shouldn't be interpreted as |
| # FEATURES on the client system. |
| continue |
| if filename == "CATEGORY": |
| found_category_file = True |
| continue |
| if myre.match(filename): |
| try: |
| file_path = os.path.join(infodir, filename) |
| mydata = open(file_path).read().strip() |
| if len(mydata) < 2048 or filename == "USE": |
| if null_byte in mydata: |
| writemsg("!!! Null byte found in metadata " + \ |
| "file: '%s'\n" % file_path, noiselevel=-1) |
| continue |
| if filename == "USE": |
| binpkg_flags = "-* " + mydata |
| self.configdict["pkg"][filename] = binpkg_flags |
| self.configdict["env"][filename] = mydata |
| else: |
| self.configdict["pkg"][filename] = mydata |
| self.configdict["env"][filename] = mydata |
| except (OSError, IOError): |
| writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename, |
| noiselevel=-1) |
| pass |
| retval = 1 |
| |
| # Missing or corrupt CATEGORY will cause problems for |
| # doebuild(), which uses it to infer the cpv. We already |
| # know the category, so there's no need to trust this |
| # file. Show a warning if the file is missing though, |
| # because it's required (especially for binary packages). |
| if not found_category_file: |
| writemsg("!!! CATEGORY file is missing: %s\n" % \ |
| os.path.join(infodir, "CATEGORY"), noiselevel=-1) |
| self.configdict["pkg"].update(backup_pkg_metadata) |
| retval = 0 |
| |
| # Always set known good values for these variables, since |
| # corruption of these can cause problems: |
| cat, pf = catsplit(self.mycpv) |
| self.configdict["pkg"]["CATEGORY"] = cat |
| self.configdict["pkg"]["PF"] = pf |
| |
| return retval |
| |
| def setcpv(self, mycpv, use_cache=1, mydb=None): |
| """ |
| Load a particular CPV into the config, this lets us see the |
| Default USE flags for a particular ebuild as well as the USE |
| flags from package.use. |
| |
| @param mycpv: A cpv to load |
| @type mycpv: string |
| @param use_cache: Enables caching |
| @type use_cache: Boolean |
| @param mydb: a dbapi instance that supports aux_get with the IUSE key. |
| @type mydb: dbapi or derivative. |
| @rtype: None |
| """ |
| |
| self.modifying() |
| |
| pkg = None |
| if not isinstance(mycpv, basestring): |
| pkg = mycpv |
| mycpv = pkg.cpv |
| mydb = pkg.metadata |
| |
| if self.mycpv == mycpv: |
| return |
| ebuild_phase = self.get("EBUILD_PHASE") |
| has_changed = False |
| self.mycpv = mycpv |
| cp = dep_getkey(mycpv) |
| cpv_slot = self.mycpv |
| pkginternaluse = "" |
| iuse = "" |
| if mydb: |
| if not hasattr(mydb, "aux_get"): |
| slot = mydb["SLOT"] |
| iuse = mydb["IUSE"] |
| else: |
| slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"]) |
| if pkg is None: |
| cpv_slot = "%s:%s" % (self.mycpv, slot) |
| else: |
| cpv_slot = pkg |
| pkginternaluse = [] |
| for x in iuse.split(): |
| if x.startswith("+"): |
| pkginternaluse.append(x[1:]) |
| elif x.startswith("-"): |
| pkginternaluse.append(x) |
| pkginternaluse = " ".join(pkginternaluse) |
| if pkginternaluse != self.configdict["pkginternal"].get("USE", ""): |
| self.configdict["pkginternal"]["USE"] = pkginternaluse |
| has_changed = True |
| defaults = [] |
| pos = 0 |
| for i in xrange(len(self.profiles)): |
| cpdict = self.pkgprofileuse[i].get(cp, None) |
| if cpdict: |
| keys = cpdict.keys() |
| while keys: |
| bestmatch = best_match_to_list(cpv_slot, keys) |
| if bestmatch: |
| keys.remove(bestmatch) |
| defaults.insert(pos, cpdict[bestmatch]) |
| else: |
| break |
| del keys |
| if self.make_defaults_use[i]: |
| defaults.insert(pos, self.make_defaults_use[i]) |
| pos = len(defaults) |
| defaults = " ".join(defaults) |
| if defaults != self.configdict["defaults"].get("USE",""): |
| self.configdict["defaults"]["USE"] = defaults |
| has_changed = True |
| useforce = [] |
| pos = 0 |
| for i in xrange(len(self.profiles)): |
| cpdict = self.puseforce_list[i].get(cp, None) |
| if cpdict: |
| keys = cpdict.keys() |
| while keys: |
| best_match = best_match_to_list(cpv_slot, keys) |
| if best_match: |
| keys.remove(best_match) |
| useforce.insert(pos, cpdict[best_match]) |
| else: |
| break |
| del keys |
| if self.useforce_list[i]: |
| useforce.insert(pos, self.useforce_list[i]) |
| pos = len(useforce) |
| useforce = set(stack_lists(useforce, incremental=True)) |
| if useforce != self.useforce: |
| self.useforce = useforce |
| has_changed = True |
| usemask = [] |
| pos = 0 |
| for i in xrange(len(self.profiles)): |
| cpdict = self.pusemask_list[i].get(cp, None) |
| if cpdict: |
| keys = cpdict.keys() |
| while keys: |
| best_match = best_match_to_list(cpv_slot, keys) |
| if best_match: |
| keys.remove(best_match) |
| usemask.insert(pos, cpdict[best_match]) |
| else: |
| break |
| del keys |
| if self.usemask_list[i]: |
| usemask.insert(pos, self.usemask_list[i]) |
| pos = len(usemask) |
| usemask = set(stack_lists(usemask, incremental=True)) |
| if usemask != self.usemask: |
| self.usemask = usemask |
| has_changed = True |
| oldpuse = self.puse |
| self.puse = "" |
| cpdict = self.pusedict.get(cp) |
| if cpdict: |
| keys = cpdict.keys() |
| while keys: |
| self.pusekey = best_match_to_list(cpv_slot, keys) |
| if self.pusekey: |
| keys.remove(self.pusekey) |
| self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse |
| else: |
| break |
| del keys |
| if oldpuse != self.puse: |
| has_changed = True |
| self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file |
| self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE |
| previous_iuse = self.configdict["pkg"].get("IUSE") |
| self.configdict["pkg"]["IUSE"] = iuse |
| |
| # Always set known good values for these variables, since |
| # corruption of these can cause problems: |
| cat, pf = catsplit(self.mycpv) |
| self.configdict["pkg"]["CATEGORY"] = cat |
| self.configdict["pkg"]["PF"] = pf |
| |
| if has_changed: |
| self.reset(keeping_pkg=1,use_cache=use_cache) |
| |
| # If this is not an ebuild phase and reset() has not been called, |
| # it's safe to return early here if IUSE has not changed. |
| if not (has_changed or ebuild_phase) and \ |
| previous_iuse == iuse: |
| return |
| |
| # Filter out USE flags that aren't part of IUSE. This has to |
| # be done for every setcpv() call since practically every |
| # package has different IUSE. |
| use = set(self["USE"].split()) |
| iuse_implicit = self._get_implicit_iuse() |
| iuse_implicit.update(x.lstrip("+-") for x in iuse.split()) |
| |
| # Escape anything except ".*" which is supposed |
| # to pass through from _get_implicit_iuse() |
| regex = sorted(re.escape(x) for x in iuse_implicit) |
| regex = "^(%s)$" % "|".join(regex) |
| regex = regex.replace("\\.\\*", ".*") |
| self.configdict["pkg"]["PORTAGE_IUSE"] = regex |
| |
| ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1" |
| if ebuild_force_test and ebuild_phase and \ |
| not hasattr(self, "_ebuild_force_test_msg_shown"): |
| self._ebuild_force_test_msg_shown = True |
| writemsg("Forcing test.\n", noiselevel=-1) |
| if "test" in self.features and "test" in iuse_implicit: |
| if "test" in self.usemask and not ebuild_force_test: |
| # "test" is in IUSE and USE=test is masked, so execution |
| # of src_test() probably is not reliable. Therefore, |
| # temporarily disable FEATURES=test just for this package. |
| self["FEATURES"] = " ".join(x for x in self.features \ |
| if x != "test") |
| use.discard("test") |
| else: |
| use.add("test") |
| if ebuild_force_test: |
| self.usemask.discard("test") |
| |
| # Use the calculated USE flags to regenerate the USE_EXPAND flags so |
| # that they are consistent. For optimal performance, use slice |
| # comparison instead of startswith(). |
| use_expand = self.get("USE_EXPAND", "").split() |
| for var in use_expand: |
| prefix = var.lower() + "_" |
| prefix_len = len(prefix) |
| expand_flags = set([ x[prefix_len:] for x in use \ |
| if x[:prefix_len] == prefix ]) |
| var_split = self.get(var, "").split() |
| # Preserve the order of var_split because it can matter for things |
| # like LINGUAS. |
| var_split = [ x for x in var_split if x in expand_flags ] |
| var_split.extend(expand_flags.difference(var_split)) |
| has_wildcard = "*" in var_split |
| if has_wildcard: |
| var_split = [ x for x in var_split if x != "*" ] |
| has_iuse = set() |
| for x in iuse_implicit: |
| if x[:prefix_len] == prefix: |
| has_iuse.add(x[prefix_len:]) |
| if has_wildcard: |
| # * means to enable everything in IUSE that's not masked |
| if has_iuse: |
| for x in iuse_implicit: |
| if x[:prefix_len] == prefix and x not in self.usemask: |
| suffix = x[prefix_len:] |
| var_split.append(suffix) |
| use.add(x) |
| else: |
| # If there is a wildcard and no matching flags in IUSE then |
| # LINGUAS should be unset so that all .mo files are |
| # installed. |
| var_split = [] |
| # Make the flags unique and filter them according to IUSE. |
| # Also, continue to preserve order for things like LINGUAS |
| # and filter any duplicates that variable may contain. |
| filtered_var_split = [] |
| remaining = has_iuse.intersection(var_split) |
| for x in var_split: |
| if x in remaining: |
| remaining.remove(x) |
| filtered_var_split.append(x) |
| var_split = filtered_var_split |
| |
| if var_split: |
| self[var] = " ".join(var_split) |
| else: |
| # Don't export empty USE_EXPAND vars unless the user config |
| # exports them as empty. This is required for vars such as |
| # LINGUAS, where unset and empty have different meanings. |
| if has_wildcard: |
| # ebuild.sh will see this and unset the variable so |
| # that things like LINGUAS work properly |
| self[var] = "*" |
| else: |
| if has_iuse: |
| self[var] = "" |
| else: |
| # It's not in IUSE, so just allow the variable content |
| # to pass through if it is defined somewhere. This |
| # allows packages that support LINGUAS but don't |
| # declare it in IUSE to use the variable outside of the |
| # USE_EXPAND context. |
| pass |
| |
| # Filtered for the ebuild environment. Store this in a separate |
| # attribute since we still want to be able to see global USE |
| # settings for things like emerge --info. |
| |
| self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted( |
| x for x in use if \ |
| x in iuse_implicit)) |
| |
| def _get_implicit_iuse(self): |
| """ |
| Some flags are considered to |
| be implicit members of IUSE: |
| * Flags derived from ARCH |
| * Flags derived from USE_EXPAND_HIDDEN variables |
| * Masked flags, such as those from {,package}use.mask |
| * Forced flags, such as those from {,package}use.force |
| * build and bootstrap flags used by bootstrap.sh |
| """ |
| iuse_implicit = set() |
| # Flags derived from ARCH. |
| arch = self.configdict["defaults"].get("ARCH") |
| if arch: |
| iuse_implicit.add(arch) |
| iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split()) |
| |
| # Flags derived from USE_EXPAND_HIDDEN variables |
| # such as ELIBC, KERNEL, and USERLAND. |
| use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split() |
| for x in use_expand_hidden: |
| iuse_implicit.add(x.lower() + "_.*") |
| |
| # Flags that have been masked or forced. |
| iuse_implicit.update(self.usemask) |
| iuse_implicit.update(self.useforce) |
| |
| # build and bootstrap flags used by bootstrap.sh |
| iuse_implicit.add("build") |
| iuse_implicit.add("bootstrap") |
| return iuse_implicit |
| |
| def _getMaskAtom(self, cpv, metadata): |
| """ |
| Take a package and return a matching package.mask atom, or None if no |
| such atom exists or it has been cancelled by package.unmask. PROVIDE |
| is not checked, so atoms will not be found for old-style virtuals. |
| |
| @param cpv: The package name |
| @type cpv: String |
| @param metadata: A dictionary of raw package metadata |
| @type metadata: dict |
| @rtype: String |
| @return: An matching atom string or None if one is not found. |
| """ |
| |
| cp = cpv_getkey(cpv) |
| mask_atoms = self.pmaskdict.get(cp) |
| if mask_atoms: |
| pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])] |
| unmask_atoms = self.punmaskdict.get(cp) |
| for x in mask_atoms: |
| if not match_from_list(x, pkg_list): |
| continue |
| if unmask_atoms: |
| for y in unmask_atoms: |
| if match_from_list(y, pkg_list): |
| return None |
| return x |
| return None |
| |
| def _getProfileMaskAtom(self, cpv, metadata): |
| """ |
| Take a package and return a matching profile atom, or None if no |
| such atom exists. Note that a profile atom may or may not have a "*" |
| prefix. PROVIDE is not checked, so atoms will not be found for |
| old-style virtuals. |
| |
| @param cpv: The package name |
| @type cpv: String |
| @param metadata: A dictionary of raw package metadata |
| @type metadata: dict |
| @rtype: String |
| @return: An matching profile atom string or None if one is not found. |
| """ |
| |
| cp = cpv_getkey(cpv) |
| profile_atoms = self.prevmaskdict.get(cp) |
| if profile_atoms: |
| pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])] |
| for x in profile_atoms: |
| if match_from_list(x.lstrip("*"), pkg_list): |
| continue |
| return x |
| return None |
| |
| def _getMissingKeywords(self, cpv, metadata): |
| """ |
| Take a package and return a list of any KEYWORDS that the user may |
| may need to accept for the given package. If the KEYWORDS are empty |
| and the the ** keyword has not been accepted, the returned list will |
| contain ** alone (in order to distiguish from the case of "none |
| missing"). |
| |
| @param cpv: The package name (for package.keywords support) |
| @type cpv: String |
| @param metadata: A dictionary of raw package metadata |
| @type metadata: dict |
| @rtype: List |
| @return: A list of KEYWORDS that have not been accepted. |
| """ |
| |
| # Hack: Need to check the env directly here as otherwise stacking |
| # doesn't work properly as negative values are lost in the config |
| # object (bug #139600) |
| egroups = self.configdict["backupenv"].get( |
| "ACCEPT_KEYWORDS", "").split() |
| mygroups = metadata["KEYWORDS"].split() |
| # Repoman may modify this attribute as necessary. |
| pgroups = self["ACCEPT_KEYWORDS"].split() |
| match=0 |
| cp = dep_getkey(cpv) |
| pkgdict = self.pkeywordsdict.get(cp) |
| matches = False |
| if pkgdict: |
| cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])] |
| for atom, pkgkeywords in pkgdict.iteritems(): |
| if match_from_list(atom, cpv_slot_list): |
| matches = True |
| pgroups.extend(pkgkeywords) |
| if matches or egroups: |
| pgroups.extend(egroups) |
| inc_pgroups = set() |
| for x in pgroups: |
| if x.startswith("-"): |
| if x == "-*": |
| inc_pgroups.clear() |
| else: |
| inc_pgroups.discard(x[1:]) |
| else: |
| inc_pgroups.add(x) |
| pgroups = inc_pgroups |
| del inc_pgroups |
| hasstable = False |
| hastesting = False |
| for gp in mygroups: |
| if gp == "*" or (gp == "-*" and len(mygroups) == 1): |
| writemsg(("--- WARNING: Package '%s' uses" + \ |
| " '%s' keyword.\n") % (cpv, gp), noiselevel=-1) |
| if gp == "*": |
| match = 1 |
| break |
| elif gp in pgroups: |
| match=1 |
| break |
| elif gp.startswith("~"): |
| hastesting = True |
| elif not gp.startswith("-"): |
| hasstable = True |
| if not match and \ |
| ((hastesting and "~*" in pgroups) or \ |
| (hasstable and "*" in pgroups) or "**" in pgroups): |
| match=1 |
| if match: |
| missing = [] |
| else: |
| if not mygroups: |
| # If KEYWORDS is empty then we still have to return something |
| # in order to distiguish from the case of "none missing". |
| mygroups.append("**") |
| missing = mygroups |
| return missing |
| |
| def _getMissingLicenses(self, cpv, metadata): |
| """ |
| Take a LICENSE string and return a list any licenses that the user may |
| may need to accept for the given package. The returned list will not |
| contain any licenses that have already been accepted. This method |
| can throw an InvalidDependString exception. |
| |
| @param cpv: The package name (for package.license support) |
| @type cpv: String |
| @param metadata: A dictionary of raw package metadata |
| @type metadata: dict |
| @rtype: List |
| @return: A list of licenses that have not been accepted. |
| """ |
| if "*" in self._accept_license: |
| return [] |
| acceptable_licenses = self._accept_license |
| cpdict = self._plicensedict.get(dep_getkey(cpv), None) |
| if cpdict: |
| acceptable_licenses = self._accept_license.copy() |
| cpv_slot = "%s:%s" % (cpv, metadata["SLOT"]) |
| for atom in match_to_list(cpv_slot, cpdict.keys()): |
| acceptable_licenses.update(cpdict[atom]) |
| license_struct = portage.dep.paren_reduce(metadata["LICENSE"]) |
| license_struct = portage.dep.use_reduce( |
| license_struct, uselist=metadata["USE"].split()) |
| license_struct = portage.dep.dep_opconvert(license_struct) |
| return self._getMaskedLicenses(license_struct, acceptable_licenses) |
| |
| def _getMaskedLicenses(self, license_struct, acceptable_licenses): |
| if not license_struct: |
| return [] |
| if license_struct[0] == "||": |
| ret = [] |
| for element in license_struct[1:]: |
| if isinstance(element, list): |
| if element: |
| ret.append(self._getMaskedLicenses( |
| element, acceptable_licenses)) |
| if not ret[-1]: |
| return [] |
| else: |
| if element in acceptable_licenses: |
| return [] |
| ret.append(element) |
| # Return all masked licenses, since we don't know which combination |
| # (if any) the user will decide to unmask. |
| return flatten(ret) |
| |
| ret = [] |
| for element in license_struct: |
| if isinstance(element, list): |
| if element: |
| ret.extend(self._getMaskedLicenses(element, |
| acceptable_licenses)) |
| else: |
| if element not in acceptable_licenses: |
| ret.append(element) |
| return ret |
| |
| def _accept_chost(self, pkg): |
| """ |
| @return True if pkg CHOST is accepted, False otherwise. |
| """ |
| if self._accept_chost_re is None: |
| accept_chost = self.get("ACCEPT_CHOSTS", "").split() |
| if not accept_chost: |
| chost = self.get("CHOST") |
| if chost: |
| accept_chost.append(chost) |
| if not accept_chost: |
| self._accept_chost_re = re.compile(".*") |
| elif len(accept_chost) == 1: |
| try: |
| self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0]) |
| except re.error, e: |
| writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \ |
| (accept_chost[0], e), noiselevel=-1) |
| self._accept_chost_re = re.compile("^$") |
| else: |
| try: |
| self._accept_chost_re = re.compile( |
| r'^(%s)$' % "|".join(accept_chost)) |
| except re.error, e: |
| writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \ |
| (" ".join(accept_chost), e), noiselevel=-1) |
| self._accept_chost_re = re.compile("^$") |
| |
| return self._accept_chost_re.match( |
| pkg.metadata.get("CHOST", "")) is not None |
| |
| def setinst(self,mycpv,mydbapi): |
| """This updates the preferences for old-style virtuals, |
| affecting the behavior of dep_expand() and dep_check() |
| calls. It can change dbapi.match() behavior since that |
| calls dep_expand(). However, dbapi instances have |
| internal match caches that are not invalidated when |
| preferences are updated here. This can potentially |
| lead to some inconsistency (relevant to bug #1343).""" |
| self.modifying() |
| if len(self.virtuals) == 0: |
| self.getvirtuals() |
| # Grab the virtuals this package provides and add them into the tree virtuals. |
| if not hasattr(mydbapi, "aux_get"): |
| provides = mydbapi["PROVIDE"] |
| else: |
| provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0] |
| if not provides: |
| return |
| if isinstance(mydbapi, portdbapi): |
| self.setcpv(mycpv, mydb=mydbapi) |
| myuse = self["PORTAGE_USE"] |
| elif not hasattr(mydbapi, "aux_get"): |
| myuse = mydbapi["USE"] |
| else: |
| myuse = mydbapi.aux_get(mycpv, ["USE"])[0] |
| virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split())) |
| |
| modified = False |
| cp = dep_getkey(mycpv) |
| for virt in virts: |
| virt = dep_getkey(virt) |
| providers = self.virtuals.get(virt) |
| if providers and cp in providers: |
| continue |
| providers = self._depgraphVirtuals.get(virt) |
| if providers is None: |
| providers = [] |
| self._depgraphVirtuals[virt] = providers |
| if cp not in providers: |
| providers.append(cp) |
| modified = True |
| |
| if modified: |
| self.virtuals = self.__getvirtuals_compile() |
| |
| def reload(self): |
| """Reload things like /etc/profile.env that can change during runtime.""" |
| env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env") |
| self.configdict["env.d"].clear() |
| env_d = getconfig(env_d_filename, expand=False) |
| if env_d: |
| # env_d will be None if profile.env doesn't exist. |
| self.configdict["env.d"].update(env_d) |
| |
| def regenerate(self,useonly=0,use_cache=1): |
| """ |
| Regenerate settings |
| This involves regenerating valid USE flags, re-expanding USE_EXPAND flags |
| re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL |
| variables. This also updates the env.d configdict; useful in case an ebuild |
| changes the environment. |
| |
| If FEATURES has already stacked, it is not stacked twice. |
| |
| @param useonly: Only regenerate USE flags (not any other incrementals) |
| @type useonly: Boolean |
| @param use_cache: Enable Caching (only for autouse) |
| @type use_cache: Boolean |
| @rtype: None |
| """ |
| |
| self.modifying() |
| if self.already_in_regenerate: |
| # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops. |
| writemsg("!!! Looping in regenerate.\n",1) |
| return |
| else: |
| self.already_in_regenerate = 1 |
| |
| if useonly: |
| myincrementals=["USE"] |
| else: |
| myincrementals = self.incrementals |
| myincrementals = set(myincrementals) |
| # If self.features exists, it has already been stacked and may have |
| # been mutated, so don't stack it again or else any mutations will be |
| # reverted. |
| if "FEATURES" in myincrementals and hasattr(self, "features"): |
| myincrementals.remove("FEATURES") |
| |
| if "USE" in myincrementals: |
| # Process USE last because it depends on USE_EXPAND which is also |
| # an incremental! |
| myincrementals.remove("USE") |
| |
| for mykey in myincrementals: |
| |
| mydbs=self.configlist[:-1] |
| |
| myflags=[] |
| for curdb in mydbs: |
| if mykey not in curdb: |
| continue |
| #variables are already expanded |
| mysplit = curdb[mykey].split() |
| |
| for x in mysplit: |
| if x=="-*": |
| # "-*" is a special "minus" var that means "unset all settings". |
| # so USE="-* gnome" will have *just* gnome enabled. |
| myflags = [] |
| continue |
| |
| if x[0]=="+": |
| # Not legal. People assume too much. Complain. |
| writemsg(red("USE flags should not start with a '+': %s\n" % x), |
| noiselevel=-1) |
| x=x[1:] |
| if not x: |
| continue |
| |
| if (x[0]=="-"): |
| if (x[1:] in myflags): |
| # Unset/Remove it. |
| del myflags[myflags.index(x[1:])] |
| continue |
| |
| # We got here, so add it now. |
| if x not in myflags: |
| myflags.append(x) |
| |
| myflags.sort() |
| #store setting in last element of configlist, the original environment: |
| if myflags or mykey in self: |
| self.configlist[-1][mykey] = " ".join(myflags) |
| del myflags |
| |
|