| # Copyright 1998-2011 Gentoo Foundation |
| # Distributed under the terms of the GNU General Public License v2 |
| |
| __all__ = [ |
| "vardbapi", "vartree", "dblink"] + \ |
| ["write_contents", "tar_contents"] |
| |
| import portage |
| portage.proxy.lazyimport.lazyimport(globals(), |
| 'portage.checksum:_perform_md5_merge@perform_md5', |
| 'portage.data:portage_gid,portage_uid,secpass', |
| 'portage.dbapi.dep_expand:dep_expand', |
| 'portage.dbapi._MergeProcess:MergeProcess', |
| 'portage.dep:dep_getkey,isjustname,match_from_list,' + \ |
| 'use_reduce,_slot_re', |
| 'portage.elog:collect_ebuild_messages,collect_messages,' + \ |
| 'elog_process,_merge_logentries', |
| 'portage.locks:lockdir,unlockdir', |
| 'portage.output:bold,colorize', |
| 'portage.package.ebuild.doebuild:doebuild_environment,' + \ |
| '_spawn_phase', |
| 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs', |
| 'portage.update:fixdbentries', |
| 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \ |
| 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \ |
| 'grabdict,normalize_path,new_protect_filename', |
| 'portage.util.digraph:digraph', |
| 'portage.util.env_update:env_update', |
| 'portage.util.listdir:dircache,listdir', |
| 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry', |
| 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap', |
| 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \ |
| '_pkgsplit@pkgsplit', |
| ) |
| |
| from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \ |
| PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH |
| from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS |
| from portage.dbapi import dbapi |
| from portage.exception import CommandNotFound, \ |
| InvalidData, InvalidPackageName, \ |
| FileNotFound, PermissionDenied, UnsupportedAPIException |
| from portage.localization import _ |
| from portage.util.movefile import movefile |
| |
| from portage import abssymlink, _movefile, bsd_chflags |
| |
| # This is a special version of the os module, wrapped for unicode support. |
| from portage import os |
| from portage import _encodings |
| from portage import _os_merge |
| from portage import _selinux_merge |
| from portage import _unicode_decode |
| from portage import _unicode_encode |
| |
| from _emerge.AsynchronousLock import AsynchronousLock |
| from _emerge.EbuildBuildDir import EbuildBuildDir |
| from _emerge.EbuildPhase import EbuildPhase |
| from _emerge.PollScheduler import PollScheduler |
| from _emerge.MiscFunctionsProcess import MiscFunctionsProcess |
| |
| import codecs |
| import fileinput |
| import gc |
| import re, shutil, stat, errno, subprocess |
| import logging |
| import os as _os |
| import stat |
| import sys |
| import tempfile |
| import time |
| import warnings |
| |
| try: |
| import cPickle as pickle |
| except ImportError: |
| import pickle |
| |
| if sys.hexversion >= 0x3000000: |
| basestring = str |
| long = int |
| |
| class vardbapi(dbapi): |
| |
| _excluded_dirs = ["CVS", "lost+found"] |
| _excluded_dirs = [re.escape(x) for x in _excluded_dirs] |
| _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \ |
| "|".join(_excluded_dirs) + r')$') |
| |
| _aux_cache_version = "1" |
| _owners_cache_version = "1" |
| |
| # Number of uncached packages to trigger cache update, since |
| # it's wasteful to update it for every vdb change. |
| _aux_cache_threshold = 5 |
| |
| _aux_cache_keys_re = re.compile(r'^NEEDED\..*$') |
| _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$') |
| |
| def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None): |
| """ |
| The categories parameter is unused since the dbapi class |
| now has a categories property that is generated from the |
| available packages. |
| """ |
| |
| # Used by emerge to check whether any packages |
| # have been added or removed. |
| self._pkgs_changed = False |
| |
| #cache for category directory mtimes |
| self.mtdircache = {} |
| |
| #cache for dependency checks |
| self.matchcache = {} |
| |
| #cache for cp_list results |
| self.cpcache = {} |
| |
| self.blockers = None |
| if settings is None: |
| settings = portage.settings |
| self.settings = settings |
| self.root = settings['ROOT'] |
| |
| if _unused_param is not None and _unused_param != self.root: |
| warnings.warn("The first parameter of the " + \ |
| "portage.dbapi.vartree.vardbapi" + \ |
| " constructor is now unused. Use " + \ |
| "settings['ROOT'] instead.", |
| DeprecationWarning, stacklevel=2) |
| |
| self._eroot = settings['EROOT'] |
| if vartree is None: |
| vartree = portage.db[self.root]["vartree"] |
| self.vartree = vartree |
| self._aux_cache_keys = set( |
| ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION", |
| "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS", |
| "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", |
| "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES", |
| "REQUIRED_USE"]) |
| self._aux_cache_obj = None |
| self._aux_cache_filename = os.path.join(self._eroot, |
| CACHE_PATH, "vdb_metadata.pickle") |
| self._counter_path = os.path.join(self._eroot, |
| CACHE_PATH, "counter") |
| |
| self._plib_registry = None |
| if _ENABLE_PRESERVE_LIBS: |
| try: |
| self._plib_registry = PreservedLibsRegistry(self.root, |
| os.path.join(self._eroot, PRIVATE_PATH, |
| "preserved_libs_registry")) |
| except PermissionDenied: |
| # apparently this user isn't allowed to access PRIVATE_PATH |
| pass |
| |
| self._linkmap = None |
| if _ENABLE_DYN_LINK_MAP: |
| self._linkmap = LinkageMap(self) |
| self._owners = self._owners_db(self) |
| |
| def getpath(self, mykey, filename=None): |
| # This is an optimized hotspot, so don't use unicode-wrapped |
| # os module and don't use os.path.join(). |
| rValue = self._eroot + VDB_PATH + _os.sep + mykey |
| if filename is not None: |
| # If filename is always relative, we can do just |
| # rValue += _os.sep + filename |
| rValue = _os.path.join(rValue, filename) |
| return rValue |
| |
| def _bump_mtime(self, cpv): |
| """ |
| This is called before an after any modifications, so that consumers |
| can use directory mtimes to validate caches. See bug #290428. |
| """ |
| base = self._eroot + VDB_PATH |
| cat = catsplit(cpv)[0] |
| catdir = base + _os.sep + cat |
| t = time.time() |
| t = (t, t) |
| try: |
| for x in (catdir, base): |
| os.utime(x, t) |
| except OSError: |
| ensure_dirs(catdir) |
| |
| def cpv_exists(self, mykey): |
| "Tells us whether an actual ebuild exists on disk (no masking)" |
| return os.path.exists(self.getpath(mykey)) |
| |
| def cpv_counter(self, mycpv): |
| "This method will grab the COUNTER. Returns a counter value." |
| try: |
| return long(self.aux_get(mycpv, ["COUNTER"])[0]) |
| except (KeyError, ValueError): |
| pass |
| writemsg_level(_("portage: COUNTER for %s was corrupted; " \ |
| "resetting to value of 0\n") % (mycpv,), |
| level=logging.ERROR, noiselevel=-1) |
| return 0 |
| |
| def cpv_inject(self, mycpv): |
| "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist" |
| ensure_dirs(self.getpath(mycpv)) |
| counter = self.counter_tick(mycpv=mycpv) |
| # write local package counter so that emerge clean does the right thing |
| write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter)) |
| |
| def isInjected(self, mycpv): |
| if self.cpv_exists(mycpv): |
| if os.path.exists(self.getpath(mycpv, filename="INJECTED")): |
| return True |
| if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")): |
| return True |
| return False |
| |
| def move_ent(self, mylist, repo_match=None): |
| origcp = mylist[1] |
| newcp = mylist[2] |
| |
| # sanity check |
| for atom in (origcp, newcp): |
| if not isjustname(atom): |
| raise InvalidPackageName(str(atom)) |
| origmatches = self.match(origcp, use_cache=0) |
| moves = 0 |
| if not origmatches: |
| return moves |
| for mycpv in origmatches: |
| mycpv_cp = cpv_getkey(mycpv) |
| if mycpv_cp != origcp: |
| # Ignore PROVIDE virtual match. |
| continue |
| if repo_match is not None \ |
| and not repo_match(self.aux_get(mycpv, ['repository'])[0]): |
| continue |
| mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1) |
| mynewcat = catsplit(newcp)[0] |
| origpath = self.getpath(mycpv) |
| if not os.path.exists(origpath): |
| continue |
| moves += 1 |
| if not os.path.exists(self.getpath(mynewcat)): |
| #create the directory |
| ensure_dirs(self.getpath(mynewcat)) |
| newpath = self.getpath(mynewcpv) |
| if os.path.exists(newpath): |
| #dest already exists; keep this puppy where it is. |
| continue |
| _movefile(origpath, newpath, mysettings=self.settings) |
| self._clear_pkg_cache(self._dblink(mycpv)) |
| self._clear_pkg_cache(self._dblink(mynewcpv)) |
| |
| # We need to rename the ebuild now. |
| old_pf = catsplit(mycpv)[1] |
| new_pf = catsplit(mynewcpv)[1] |
| if new_pf != old_pf: |
| try: |
| os.rename(os.path.join(newpath, old_pf + ".ebuild"), |
| os.path.join(newpath, new_pf + ".ebuild")) |
| except EnvironmentError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| write_atomic(os.path.join(newpath, "PF"), new_pf+"\n") |
| write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n") |
| fixdbentries([mylist], newpath) |
| return moves |
| |
| def cp_list(self, mycp, use_cache=1): |
| mysplit=catsplit(mycp) |
| if mysplit[0] == '*': |
| mysplit[0] = mysplit[0][1:] |
| try: |
| mystat = os.stat(self.getpath(mysplit[0])).st_mtime |
| except OSError: |
| mystat = 0 |
| if use_cache and mycp in self.cpcache: |
| cpc = self.cpcache[mycp] |
| if cpc[0] == mystat: |
| return cpc[1][:] |
| cat_dir = self.getpath(mysplit[0]) |
| try: |
| dir_list = os.listdir(cat_dir) |
| except EnvironmentError as e: |
| if e.errno == PermissionDenied.errno: |
| raise PermissionDenied(cat_dir) |
| del e |
| dir_list = [] |
| |
| returnme = [] |
| for x in dir_list: |
| if self._excluded_dirs.match(x) is not None: |
| continue |
| ps = pkgsplit(x) |
| if not ps: |
| self.invalidentry(os.path.join(self.getpath(mysplit[0]), x)) |
| continue |
| if len(mysplit) > 1: |
| if ps[0] == mysplit[1]: |
| returnme.append(mysplit[0]+"/"+x) |
| self._cpv_sort_ascending(returnme) |
| if use_cache: |
| self.cpcache[mycp] = [mystat, returnme[:]] |
| elif mycp in self.cpcache: |
| del self.cpcache[mycp] |
| return returnme |
| |
| def cpv_all(self, use_cache=1): |
| """ |
| Set use_cache=0 to bypass the portage.cachedir() cache in cases |
| when the accuracy of mtime staleness checks should not be trusted |
| (generally this is only necessary in critical sections that |
| involve merge or unmerge of packages). |
| """ |
| returnme = [] |
| basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep |
| |
| if use_cache: |
| from portage import listdir |
| else: |
| def listdir(p, **kwargs): |
| try: |
| return [x for x in os.listdir(p) \ |
| if os.path.isdir(os.path.join(p, x))] |
| except EnvironmentError as e: |
| if e.errno == PermissionDenied.errno: |
| raise PermissionDenied(p) |
| del e |
| return [] |
| |
| for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1): |
| if self._excluded_dirs.match(x) is not None: |
| continue |
| if not self._category_re.match(x): |
| continue |
| for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1): |
| if self._excluded_dirs.match(y) is not None: |
| continue |
| subpath = x + "/" + y |
| # -MERGING- should never be a cpv, nor should files. |
| try: |
| if catpkgsplit(subpath) is None: |
| self.invalidentry(self.getpath(subpath)) |
| continue |
| except InvalidData: |
| self.invalidentry(self.getpath(subpath)) |
| continue |
| returnme.append(subpath) |
| |
| return returnme |
| |
| def cp_all(self, use_cache=1): |
| mylist = self.cpv_all(use_cache=use_cache) |
| d={} |
| for y in mylist: |
| if y[0] == '*': |
| y = y[1:] |
| try: |
| mysplit = catpkgsplit(y) |
| except InvalidData: |
| self.invalidentry(self.getpath(y)) |
| continue |
| if not mysplit: |
| self.invalidentry(self.getpath(y)) |
| continue |
| d[mysplit[0]+"/"+mysplit[1]] = None |
| return list(d) |
| |
| def checkblockers(self, origdep): |
| pass |
| |
| def _clear_cache(self): |
| self.mtdircache.clear() |
| self.matchcache.clear() |
| self.cpcache.clear() |
| self._aux_cache_obj = None |
| |
| def _add(self, pkg_dblink): |
| self._pkgs_changed = True |
| self._clear_pkg_cache(pkg_dblink) |
| |
| def _remove(self, pkg_dblink): |
| self._pkgs_changed = True |
| self._clear_pkg_cache(pkg_dblink) |
| |
| def _clear_pkg_cache(self, pkg_dblink): |
| # Due to 1 second mtime granularity in <python-2.5, mtime checks |
| # are not always sufficient to invalidate vardbapi caches. Therefore, |
| # the caches need to be actively invalidated here. |
| self.mtdircache.pop(pkg_dblink.cat, None) |
| self.matchcache.pop(pkg_dblink.cat, None) |
| self.cpcache.pop(pkg_dblink.mysplit[0], None) |
| dircache.pop(pkg_dblink.dbcatdir, None) |
| |
| def match(self, origdep, use_cache=1): |
| "caching match function" |
| mydep = dep_expand( |
| origdep, mydb=self, use_cache=use_cache, settings=self.settings) |
| mykey = dep_getkey(mydep) |
| mycat = catsplit(mykey)[0] |
| if not use_cache: |
| if mycat in self.matchcache: |
| del self.mtdircache[mycat] |
| del self.matchcache[mycat] |
| return list(self._iter_match(mydep, |
| self.cp_list(mydep.cp, use_cache=use_cache))) |
| try: |
| curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime |
| except (IOError, OSError): |
| curmtime=0 |
| |
| if mycat not in self.matchcache or \ |
| self.mtdircache[mycat] != curmtime: |
| # clear cache entry |
| self.mtdircache[mycat] = curmtime |
| self.matchcache[mycat] = {} |
| if mydep not in self.matchcache[mycat]: |
| mymatch = list(self._iter_match(mydep, |
| self.cp_list(mydep.cp, use_cache=use_cache))) |
| self.matchcache[mycat][mydep] = mymatch |
| return self.matchcache[mycat][mydep][:] |
| |
| def findname(self, mycpv): |
| return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild") |
| |
| def flush_cache(self): |
| """If the current user has permission and the internal aux_get cache has |
| been updated, save it to disk and mark it unmodified. This is called |
| by emerge after it has loaded the full vdb for use in dependency |
| calculations. Currently, the cache is only written if the user has |
| superuser privileges (since that's required to obtain a lock), but all |
| users have read access and benefit from faster metadata lookups (as |
| long as at least part of the cache is still valid).""" |
| if self._aux_cache is not None and \ |
| len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \ |
| secpass >= 2: |
| self._owners.populate() # index any unindexed contents |
| valid_nodes = set(self.cpv_all()) |
| for cpv in list(self._aux_cache["packages"]): |
| if cpv not in valid_nodes: |
| del self._aux_cache["packages"][cpv] |
| del self._aux_cache["modified"] |
| try: |
| f = atomic_ofstream(self._aux_cache_filename, 'wb') |
| pickle.dump(self._aux_cache, f, protocol=2) |
| f.close() |
| apply_secpass_permissions( |
| self._aux_cache_filename, gid=portage_gid, mode=0o644) |
| except (IOError, OSError) as e: |
| pass |
| self._aux_cache["modified"] = set() |
| |
| @property |
| def _aux_cache(self): |
| if self._aux_cache_obj is None: |
| self._aux_cache_init() |
| return self._aux_cache_obj |
| |
| def _aux_cache_init(self): |
| aux_cache = None |
| open_kwargs = {} |
| if sys.hexversion >= 0x3000000: |
| # Buffered io triggers extreme performance issues in |
| # Unpickler.load() (problem observed with python-3.0.1). |
| # Unfortunately, performance is still poor relative to |
| # python-2.x, but buffering makes it much worse. |
| open_kwargs["buffering"] = 0 |
| try: |
| f = open(_unicode_encode(self._aux_cache_filename, |
| encoding=_encodings['fs'], errors='strict'), |
| mode='rb', **open_kwargs) |
| mypickle = pickle.Unpickler(f) |
| try: |
| mypickle.find_global = None |
| except AttributeError: |
| # TODO: If py3k, override Unpickler.find_class(). |
| pass |
| aux_cache = mypickle.load() |
| f.close() |
| del f |
| except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e: |
| if isinstance(e, pickle.UnpicklingError): |
| writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \ |
| (self._aux_cache_filename, e), noiselevel=-1) |
| del e |
| |
| if not aux_cache or \ |
| not isinstance(aux_cache, dict) or \ |
| aux_cache.get("version") != self._aux_cache_version or \ |
| not aux_cache.get("packages"): |
| aux_cache = {"version": self._aux_cache_version} |
| aux_cache["packages"] = {} |
| |
| owners = aux_cache.get("owners") |
| if owners is not None: |
| if not isinstance(owners, dict): |
| owners = None |
| elif "version" not in owners: |
| owners = None |
| elif owners["version"] != self._owners_cache_version: |
| owners = None |
| elif "base_names" not in owners: |
| owners = None |
| elif not isinstance(owners["base_names"], dict): |
| owners = None |
| |
| if owners is None: |
| owners = { |
| "base_names" : {}, |
| "version" : self._owners_cache_version |
| } |
| aux_cache["owners"] = owners |
| |
| aux_cache["modified"] = set() |
| self._aux_cache_obj = aux_cache |
| |
| def aux_get(self, mycpv, wants): |
| """This automatically caches selected keys that are frequently needed |
| by emerge for dependency calculations. The cached metadata is |
| considered valid if the mtime of the package directory has not changed |
| since the data was cached. The cache is stored in a pickled dict |
| object with the following format: |
| |
| {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}} |
| |
| If an error occurs while loading the cache pickle or the version is |
| unrecognized, the cache will simple be recreated from scratch (it is |
| completely disposable). |
| """ |
| cache_these_wants = self._aux_cache_keys.intersection(wants) |
| for x in wants: |
| if self._aux_cache_keys_re.match(x) is not None: |
| cache_these_wants.add(x) |
| |
| if not cache_these_wants: |
| return self._aux_get(mycpv, wants) |
| |
| cache_these = set(self._aux_cache_keys) |
| cache_these.update(cache_these_wants) |
| |
| mydir = self.getpath(mycpv) |
| mydir_stat = None |
| try: |
| mydir_stat = os.stat(mydir) |
| except OSError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| raise KeyError(mycpv) |
| mydir_mtime = mydir_stat[stat.ST_MTIME] |
| pkg_data = self._aux_cache["packages"].get(mycpv) |
| pull_me = cache_these.union(wants) |
| mydata = {"_mtime_" : mydir_mtime} |
| cache_valid = False |
| cache_incomplete = False |
| cache_mtime = None |
| metadata = None |
| if pkg_data is not None: |
| if not isinstance(pkg_data, tuple) or len(pkg_data) != 2: |
| pkg_data = None |
| else: |
| cache_mtime, metadata = pkg_data |
| if not isinstance(cache_mtime, (long, int)) or \ |
| not isinstance(metadata, dict): |
| pkg_data = None |
| |
| if pkg_data: |
| cache_mtime, metadata = pkg_data |
| cache_valid = cache_mtime == mydir_mtime |
| if cache_valid: |
| # Migrate old metadata to unicode. |
| for k, v in metadata.items(): |
| metadata[k] = _unicode_decode(v, |
| encoding=_encodings['repo.content'], errors='replace') |
| |
| mydata.update(metadata) |
| pull_me.difference_update(mydata) |
| |
| if pull_me: |
| # pull any needed data and cache it |
| aux_keys = list(pull_me) |
| for k, v in zip(aux_keys, |
| self._aux_get(mycpv, aux_keys, st=mydir_stat)): |
| mydata[k] = v |
| if not cache_valid or cache_these.difference(metadata): |
| cache_data = {} |
| if cache_valid and metadata: |
| cache_data.update(metadata) |
| for aux_key in cache_these: |
| cache_data[aux_key] = mydata[aux_key] |
| self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data) |
| self._aux_cache["modified"].add(mycpv) |
| |
| if _slot_re.match(mydata['SLOT']) is None: |
| # Empty or invalid slot triggers InvalidAtom exceptions when |
| # generating slot atoms for packages, so translate it to '0' here. |
| mydata['SLOT'] = _unicode_decode('0') |
| |
| return [mydata[x] for x in wants] |
| |
| def _aux_get(self, mycpv, wants, st=None): |
| mydir = self.getpath(mycpv) |
| if st is None: |
| try: |
| st = os.stat(mydir) |
| except OSError as e: |
| if e.errno == errno.ENOENT: |
| raise KeyError(mycpv) |
| elif e.errno == PermissionDenied.errno: |
| raise PermissionDenied(mydir) |
| else: |
| raise |
| if not stat.S_ISDIR(st.st_mode): |
| raise KeyError(mycpv) |
| results = [] |
| for x in wants: |
| if x == "_mtime_": |
| results.append(st[stat.ST_MTIME]) |
| continue |
| try: |
| myf = codecs.open( |
| _unicode_encode(os.path.join(mydir, x), |
| encoding=_encodings['fs'], errors='strict'), |
| mode='r', encoding=_encodings['repo.content'], |
| errors='replace') |
| try: |
| myd = myf.read() |
| finally: |
| myf.close() |
| # Preserve \n for metadata that is known to |
| # contain multiple lines. |
| if self._aux_multi_line_re.match(x) is None: |
| myd = " ".join(myd.split()) |
| except IOError: |
| myd = _unicode_decode('') |
| if x == "EAPI" and not myd: |
| results.append(_unicode_decode('0')) |
| else: |
| results.append(myd) |
| return results |
| |
| def aux_update(self, cpv, values): |
| mylink = self._dblink(cpv) |
| if not mylink.exists(): |
| raise KeyError(cpv) |
| self._bump_mtime(cpv) |
| self._clear_pkg_cache(mylink) |
| for k, v in values.items(): |
| if v: |
| mylink.setfile(k, v) |
| else: |
| try: |
| os.unlink(os.path.join(self.getpath(cpv), k)) |
| except EnvironmentError: |
| pass |
| self._bump_mtime(cpv) |
| |
| def counter_tick(self, myroot=None, mycpv=None): |
| """ |
| @param myroot: ignored, self._eroot is used instead |
| """ |
| return self.counter_tick_core(incrementing=1, mycpv=mycpv) |
| |
| def get_counter_tick_core(self, myroot=None, mycpv=None): |
| """ |
| Use this method to retrieve the counter instead |
| of having to trust the value of a global counter |
| file that can lead to invalid COUNTER |
| generation. When cache is valid, the package COUNTER |
| files are not read and we rely on the timestamp of |
| the package directory to validate cache. The stat |
| calls should only take a short time, so performance |
| is sufficient without having to rely on a potentially |
| corrupt global counter file. |
| |
| The global counter file located at |
| $CACHE_PATH/counter serves to record the |
| counter of the last installed package and |
| it also corresponds to the total number of |
| installation actions that have occurred in |
| the history of this package database. |
| |
| @param myroot: ignored, self._eroot is used instead |
| """ |
| myroot = None |
| cp_list = self.cp_list |
| max_counter = 0 |
| for cp in self.cp_all(): |
| for cpv in cp_list(cp): |
| try: |
| counter = int(self.aux_get(cpv, ["COUNTER"])[0]) |
| except (KeyError, OverflowError, ValueError): |
| continue |
| if counter > max_counter: |
| max_counter = counter |
| |
| new_vdb = False |
| counter = -1 |
| try: |
| cfile = codecs.open( |
| _unicode_encode(self._counter_path, |
| encoding=_encodings['fs'], errors='strict'), |
| mode='r', encoding=_encodings['repo.content'], |
| errors='replace') |
| except EnvironmentError as e: |
| new_vdb = not bool(self.cpv_all()) |
| if not new_vdb: |
| writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \ |
| self._counter_path, noiselevel=-1) |
| writemsg("!!! %s\n" % str(e), noiselevel=-1) |
| del e |
| else: |
| try: |
| try: |
| counter = long(cfile.readline().strip()) |
| finally: |
| cfile.close() |
| except (OverflowError, ValueError) as e: |
| writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \ |
| self._counter_path, noiselevel=-1) |
| writemsg("!!! %s\n" % str(e), noiselevel=-1) |
| del e |
| |
| # We must ensure that we return a counter |
| # value that is at least as large as the |
| # highest one from the installed packages, |
| # since having a corrupt value that is too low |
| # can trigger incorrect AUTOCLEAN behavior due |
| # to newly installed packages having lower |
| # COUNTERs than the previous version in the |
| # same slot. |
| if counter > max_counter: |
| max_counter = counter |
| |
| if counter < 0 and not new_vdb: |
| writemsg(_("!!! Initializing COUNTER to " \ |
| "value of %d\n") % max_counter, noiselevel=-1) |
| |
| return max_counter + 1 |
| |
| def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None): |
| """ |
| This method will grab the next COUNTER value and record it back |
| to the global file. Returns new counter value. |
| |
| @param myroot: ignored, self._eroot is used instead |
| """ |
| myroot = None |
| counter = self.get_counter_tick_core(mycpv=mycpv) - 1 |
| if incrementing: |
| #increment counter |
| counter += 1 |
| # use same permissions as config._init_dirs() |
| ensure_dirs(os.path.dirname(self._counter_path), |
| gid=portage_gid, mode=0o2750, mask=0o2) |
| # update new global counter file |
| write_atomic(self._counter_path, str(counter)) |
| return counter |
| |
| def _dblink(self, cpv): |
| category, pf = catsplit(cpv) |
| return dblink(category, pf, settings=self.settings, |
| vartree=self.vartree, treetype="vartree") |
| |
| def removeFromContents(self, pkg, paths, relative_paths=True): |
| """ |
| @param pkg: cpv for an installed package |
| @type pkg: string |
| @param paths: paths of files to remove from contents |
| @type paths: iterable |
| """ |
| if not hasattr(pkg, "getcontents"): |
| pkg = self._dblink(pkg) |
| root = self.settings['ROOT'] |
| root_len = len(root) - 1 |
| new_contents = pkg.getcontents().copy() |
| removed = 0 |
| |
| for filename in paths: |
| filename = _unicode_decode(filename, |
| encoding=_encodings['content'], errors='strict') |
| filename = normalize_path(filename) |
| if relative_paths: |
| relative_filename = filename |
| else: |
| relative_filename = filename[root_len:] |
| contents_key = pkg._match_contents(relative_filename) |
| if contents_key: |
| del new_contents[contents_key] |
| removed += 1 |
| |
| if removed: |
| self._bump_mtime(pkg.mycpv) |
| f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS")) |
| write_contents(new_contents, root, f) |
| f.close() |
| self._bump_mtime(pkg.mycpv) |
| pkg._clear_contents_cache() |
| |
| class _owners_cache(object): |
| """ |
| This class maintains an hash table that serves to index package |
| contents by mapping the basename of file to a list of possible |
| packages that own it. This is used to optimize owner lookups |
| by narrowing the search down to a smaller number of packages. |
| """ |
| try: |
| from hashlib import md5 as _new_hash |
| except ImportError: |
| from md5 import new as _new_hash |
| |
| _hash_bits = 16 |
| _hex_chars = int(_hash_bits / 4) |
| |
| def __init__(self, vardb): |
| self._vardb = vardb |
| |
| def add(self, cpv): |
| eroot_len = len(self._vardb._eroot) |
| contents = self._vardb._dblink(cpv).getcontents() |
| pkg_hash = self._hash_pkg(cpv) |
| if not contents: |
| # Empty path is a code used to represent empty contents. |
| self._add_path("", pkg_hash) |
| |
| for x in contents: |
| self._add_path(x[eroot_len:], pkg_hash) |
| |
| self._vardb._aux_cache["modified"].add(cpv) |
| |
| def _add_path(self, path, pkg_hash): |
| """ |
| Empty path is a code that represents empty contents. |
| """ |
| if path: |
| name = os.path.basename(path.rstrip(os.path.sep)) |
| if not name: |
| return |
| else: |
| name = path |
| name_hash = self._hash_str(name) |
| base_names = self._vardb._aux_cache["owners"]["base_names"] |
| pkgs = base_names.get(name_hash) |
| if pkgs is None: |
| pkgs = {} |
| base_names[name_hash] = pkgs |
| pkgs[pkg_hash] = None |
| |
| def _hash_str(self, s): |
| h = self._new_hash() |
| # Always use a constant utf_8 encoding here, since |
| # the "default" encoding can change. |
| h.update(_unicode_encode(s, |
| encoding=_encodings['repo.content'], |
| errors='backslashreplace')) |
| h = h.hexdigest() |
| h = h[-self._hex_chars:] |
| h = int(h, 16) |
| return h |
| |
| def _hash_pkg(self, cpv): |
| counter, mtime = self._vardb.aux_get( |
| cpv, ["COUNTER", "_mtime_"]) |
| try: |
| counter = int(counter) |
| except ValueError: |
| counter = 0 |
| return (cpv, counter, mtime) |
| |
| class _owners_db(object): |
| |
| def __init__(self, vardb): |
| self._vardb = vardb |
| |
| def populate(self): |
| self._populate() |
| |
| def _populate(self): |
| owners_cache = vardbapi._owners_cache(self._vardb) |
| cached_hashes = set() |
| base_names = self._vardb._aux_cache["owners"]["base_names"] |
| |
| # Take inventory of all cached package hashes. |
| for name, hash_values in list(base_names.items()): |
| if not isinstance(hash_values, dict): |
| del base_names[name] |
| continue |
| cached_hashes.update(hash_values) |
| |
| # Create sets of valid package hashes and uncached packages. |
| uncached_pkgs = set() |
| hash_pkg = owners_cache._hash_pkg |
| valid_pkg_hashes = set() |
| for cpv in self._vardb.cpv_all(): |
| hash_value = hash_pkg(cpv) |
| valid_pkg_hashes.add(hash_value) |
| if hash_value not in cached_hashes: |
| uncached_pkgs.add(cpv) |
| |
| # Cache any missing packages. |
| for cpv in uncached_pkgs: |
| owners_cache.add(cpv) |
| |
| # Delete any stale cache. |
| stale_hashes = cached_hashes.difference(valid_pkg_hashes) |
| if stale_hashes: |
| for base_name_hash, bucket in list(base_names.items()): |
| for hash_value in stale_hashes.intersection(bucket): |
| del bucket[hash_value] |
| if not bucket: |
| del base_names[base_name_hash] |
| |
| return owners_cache |
| |
| def get_owners(self, path_iter): |
| """ |
| @return the owners as a dblink -> set(files) mapping. |
| """ |
| owners = {} |
| for owner, f in self.iter_owners(path_iter): |
| owned_files = owners.get(owner) |
| if owned_files is None: |
| owned_files = set() |
| owners[owner] = owned_files |
| owned_files.add(f) |
| return owners |
| |
| def getFileOwnerMap(self, path_iter): |
| owners = self.get_owners(path_iter) |
| file_owners = {} |
| for pkg_dblink, files in owners.items(): |
| for f in files: |
| owner_set = file_owners.get(f) |
| if owner_set is None: |
| owner_set = set() |
| file_owners[f] = owner_set |
| owner_set.add(pkg_dblink) |
| return file_owners |
| |
| def iter_owners(self, path_iter): |
| """ |
| Iterate over tuples of (dblink, path). In order to avoid |
| consuming too many resources for too much time, resources |
| are only allocated for the duration of a given iter_owners() |
| call. Therefore, to maximize reuse of resources when searching |
| for multiple files, it's best to search for them all in a single |
| call. |
| """ |
| |
| if not isinstance(path_iter, list): |
| path_iter = list(path_iter) |
| owners_cache = self._populate() |
| vardb = self._vardb |
| root = vardb._eroot |
| hash_pkg = owners_cache._hash_pkg |
| hash_str = owners_cache._hash_str |
| base_names = self._vardb._aux_cache["owners"]["base_names"] |
| |
| dblink_cache = {} |
| |
| def dblink(cpv): |
| x = dblink_cache.get(cpv) |
| if x is None: |
| if len(dblink_cache) > 20: |
| # Ensure that we don't run out of memory. |
| raise StopIteration() |
| x = self._vardb._dblink(cpv) |
| dblink_cache[cpv] = x |
| return x |
| |
| while path_iter: |
| |
| path = path_iter.pop() |
| is_basename = os.sep != path[:1] |
| if is_basename: |
| name = path |
| else: |
| name = os.path.basename(path.rstrip(os.path.sep)) |
| |
| if not name: |
| continue |
| |
| name_hash = hash_str(name) |
| pkgs = base_names.get(name_hash) |
| owners = [] |
| if pkgs is not None: |
| try: |
| for hash_value in pkgs: |
| if not isinstance(hash_value, tuple) or \ |
| len(hash_value) != 3: |
| continue |
| cpv, counter, mtime = hash_value |
| if not isinstance(cpv, basestring): |
| continue |
| try: |
| current_hash = hash_pkg(cpv) |
| except KeyError: |
| continue |
| |
| if current_hash != hash_value: |
| continue |
| |
| if is_basename: |
| for p in dblink(cpv).getcontents(): |
| if os.path.basename(p) == name: |
| owners.append((cpv, p[len(root):])) |
| else: |
| if dblink(cpv).isowner(path): |
| owners.append((cpv, path)) |
| |
| except StopIteration: |
| path_iter.append(path) |
| del owners[:] |
| dblink_cache.clear() |
| gc.collect() |
| for x in self._iter_owners_low_mem(path_iter): |
| yield x |
| return |
| else: |
| for cpv, p in owners: |
| yield (dblink(cpv), p) |
| |
| def _iter_owners_low_mem(self, path_list): |
| """ |
| This implemention will make a short-lived dblink instance (and |
| parse CONTENTS) for every single installed package. This is |
| slower and but uses less memory than the method which uses the |
| basename cache. |
| """ |
| |
| if not path_list: |
| return |
| |
| path_info_list = [] |
| for path in path_list: |
| is_basename = os.sep != path[:1] |
| if is_basename: |
| name = path |
| else: |
| name = os.path.basename(path.rstrip(os.path.sep)) |
| path_info_list.append((path, name, is_basename)) |
| |
| root = self._vardb._eroot |
| for cpv in self._vardb.cpv_all(): |
| dblnk = self._vardb._dblink(cpv) |
| |
| for path, name, is_basename in path_info_list: |
| if is_basename: |
| for p in dblnk.getcontents(): |
| if os.path.basename(p) == name: |
| yield dblnk, p[len(root):] |
| else: |
| if dblnk.isowner(path): |
| yield dblnk, path |
| |
| class vartree(object): |
| "this tree will scan a var/db/pkg database located at root (passed to init)" |
| def __init__(self, root=None, virtual=None, categories=None, |
| settings=None): |
| |
| if settings is None: |
| settings = portage.settings |
| self.root = settings['ROOT'] |
| |
| if root is not None and root != self.root: |
| warnings.warn("The 'root' parameter of the " + \ |
| "portage.dbapi.vartree.vartree" + \ |
| " constructor is now unused. Use " + \ |
| "settings['ROOT'] instead.", |
| DeprecationWarning, stacklevel=2) |
| |
| self.settings = settings |
| self.dbapi = vardbapi(settings=settings, vartree=self) |
| self.populated = 1 |
| |
| def getpath(self, mykey, filename=None): |
| return self.dbapi.getpath(mykey, filename=filename) |
| |
| def zap(self, mycpv): |
| return |
| |
| def inject(self, mycpv): |
| return |
| |
| def get_provide(self, mycpv): |
| myprovides = [] |
| mylines = None |
| try: |
| mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"]) |
| if mylines: |
| myuse = myuse.split() |
| mylines = use_reduce(mylines, uselist=myuse, flat=True) |
| for myprovide in mylines: |
| mys = catpkgsplit(myprovide) |
| if not mys: |
| mys = myprovide.split("/") |
| myprovides += [mys[0] + "/" + mys[1]] |
| return myprovides |
| except SystemExit as e: |
| raise |
| except Exception as e: |
| mydir = self.dbapi.getpath(mycpv) |
| writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir, |
| noiselevel=-1) |
| if mylines: |
| writemsg(_("Possibly Invalid: '%s'\n") % str(mylines), |
| noiselevel=-1) |
| writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1) |
| return [] |
| |
| def get_all_provides(self): |
| myprovides = {} |
| for node in self.getallcpv(): |
| for mykey in self.get_provide(node): |
| if mykey in myprovides: |
| myprovides[mykey] += [node] |
| else: |
| myprovides[mykey] = [node] |
| return myprovides |
| |
| def dep_bestmatch(self, mydep, use_cache=1): |
| "compatibility method -- all matches, not just visible ones" |
| #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi)) |
| mymatch = best(self.dbapi.match( |
| dep_expand(mydep, mydb=self.dbapi, settings=self.settings), |
| use_cache=use_cache)) |
| if mymatch is None: |
| return "" |
| else: |
| return mymatch |
| |
| def dep_match(self, mydep, use_cache=1): |
| "compatibility method -- we want to see all matches, not just visible ones" |
| #mymatch = match(mydep,self.dbapi) |
| mymatch = self.dbapi.match(mydep, use_cache=use_cache) |
| if mymatch is None: |
| return [] |
| else: |
| return mymatch |
| |
| def exists_specific(self, cpv): |
| return self.dbapi.cpv_exists(cpv) |
| |
| def getallcpv(self): |
| """temporary function, probably to be renamed --- Gets a list of all |
| category/package-versions installed on the system.""" |
| return self.dbapi.cpv_all() |
| |
| def getallnodes(self): |
| """new behavior: these are all *unmasked* nodes. There may or may not be available |
| masked package for nodes in this nodes list.""" |
| return self.dbapi.cp_all() |
| |
| def getebuildpath(self, fullpackage): |
| cat, package = catsplit(fullpackage) |
| return self.getpath(fullpackage, filename=package+".ebuild") |
| |
| def getslot(self, mycatpkg): |
| "Get a slot for a catpkg; assume it exists." |
| try: |
| return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0] |
| except KeyError: |
| return "" |
| |
| def populate(self): |
| self.populated=1 |
| |
| class dblink(object): |
| """ |
| This class provides an interface to the installed package database |
| At present this is implemented as a text backend in /var/db/pkg. |
| """ |
| |
| import re |
| _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)') |
| |
| _contents_re = re.compile(r'^(' + \ |
| r'(?P<dir>(dev|dir|fif) (.+))|' + \ |
| r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \ |
| r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \ |
| r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \ |
| r')$' |
| ) |
| |
| def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None, |
| vartree=None, blockers=None, scheduler=None, pipe=None): |
| """ |
| Creates a DBlink object for a given CPV. |
| The given CPV may not be present in the database already. |
| |
| @param cat: Category |
| @type cat: String |
| @param pkg: Package (PV) |
| @type pkg: String |
| @param myroot: ignored, settings['ROOT'] is used instead |
| @type myroot: String (Path) |
| @param settings: Typically portage.settings |
| @type settings: portage.config |
| @param treetype: one of ['porttree','bintree','vartree'] |
| @type treetype: String |
| @param vartree: an instance of vartree corresponding to myroot. |
| @type vartree: vartree |
| """ |
| |
| if settings is None: |
| raise TypeError("settings argument is required") |
| |
| mysettings = settings |
| myroot = settings['ROOT'] |
| self.cat = cat |
| self.pkg = pkg |
| self.mycpv = self.cat + "/" + self.pkg |
| self.mysplit = list(catpkgsplit(self.mycpv)[1:]) |
| self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0]) |
| self.treetype = treetype |
| if vartree is None: |
| vartree = portage.db[myroot]["vartree"] |
| self.vartree = vartree |
| self._blockers = blockers |
| self._scheduler = scheduler |
| |
| # WARNING: EROOT support is experimental and may be incomplete |
| # for cases in which EPREFIX is non-empty. |
| self._eroot = mysettings['EROOT'] |
| self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH)) |
| self.dbcatdir = self.dbroot+"/"+cat |
| self.dbpkgdir = self.dbcatdir+"/"+pkg |
| self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg |
| self.dbdir = self.dbpkgdir |
| |
| self._lock_vdb = None |
| |
| self.settings = mysettings |
| self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1" |
| |
| self.myroot=myroot |
| self._installed_instance = None |
| self.contentscache = None |
| self._contents_inodes = None |
| self._contents_basenames = None |
| self._linkmap_broken = False |
| self._md5_merge_map = {} |
| self._hash_key = (self.myroot, self.mycpv) |
| self._protect_obj = None |
| self._pipe = pipe |
| |
| def __hash__(self): |
| return hash(self._hash_key) |
| |
| def __eq__(self, other): |
| return isinstance(other, dblink) and \ |
| self._hash_key == other._hash_key |
| |
| def _get_protect_obj(self): |
| |
| if self._protect_obj is None: |
| self._protect_obj = ConfigProtect(self._eroot, |
| portage.util.shlex_split( |
| self.settings.get("CONFIG_PROTECT", "")), |
| portage.util.shlex_split( |
| self.settings.get("CONFIG_PROTECT_MASK", ""))) |
| |
| return self._protect_obj |
| |
| def isprotected(self, obj): |
| return self._get_protect_obj().isprotected(obj) |
| |
| def updateprotect(self): |
| self._get_protect_obj().updateprotect() |
| |
| def lockdb(self): |
| if self._lock_vdb: |
| raise AssertionError("Lock already held.") |
| # At least the parent needs to exist for the lock file. |
| ensure_dirs(self.dbroot) |
| if os.environ.get("PORTAGE_LOCKS") != "false": |
| if self._scheduler is None: |
| self._lock_vdb = lockdir(self.dbroot) |
| else: |
| async_lock = AsynchronousLock(path=self.dbroot, |
| scheduler=self._scheduler) |
| async_lock.start() |
| async_lock.wait() |
| self._lock_vdb = async_lock |
| |
| def unlockdb(self): |
| if self._lock_vdb is not None: |
| if isinstance(self._lock_vdb, AsynchronousLock): |
| self._lock_vdb.unlock() |
| else: |
| unlockdir(self._lock_vdb) |
| self._lock_vdb = None |
| |
| def getpath(self): |
| "return path to location of db information (for >>> informational display)" |
| return self.dbdir |
| |
| def exists(self): |
| "does the db entry exist? boolean." |
| return os.path.exists(self.dbdir) |
| |
| def delete(self): |
| """ |
| Remove this entry from the database |
| """ |
| if not os.path.exists(self.dbdir): |
| return |
| |
| # Check validity of self.dbdir before attempting to remove it. |
| if not self.dbdir.startswith(self.dbroot): |
| writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \ |
| self.dbdir, noiselevel=-1) |
| return |
| |
| shutil.rmtree(self.dbdir) |
| # If empty, remove parent category directory. |
| try: |
| os.rmdir(os.path.dirname(self.dbdir)) |
| except OSError: |
| pass |
| self.vartree.dbapi._remove(self) |
| |
| def clearcontents(self): |
| """ |
| For a given db entry (self), erase the CONTENTS values. |
| """ |
| if os.path.exists(self.dbdir+"/CONTENTS"): |
| os.unlink(self.dbdir+"/CONTENTS") |
| |
| def _clear_contents_cache(self): |
| self.contentscache = None |
| self._contents_inodes = None |
| self._contents_basenames = None |
| |
| def getcontents(self): |
| """ |
| Get the installed files of a given package (aka what that package installed) |
| """ |
| contents_file = os.path.join(self.dbdir, "CONTENTS") |
| if self.contentscache is not None: |
| return self.contentscache |
| pkgfiles = {} |
| try: |
| myc = codecs.open(_unicode_encode(contents_file, |
| encoding=_encodings['fs'], errors='strict'), |
| mode='r', encoding=_encodings['repo.content'], |
| errors='replace') |
| except EnvironmentError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| self.contentscache = pkgfiles |
| return pkgfiles |
| mylines = myc.readlines() |
| myc.close() |
| null_byte = "\0" |
| normalize_needed = self._normalize_needed |
| contents_re = self._contents_re |
| obj_index = contents_re.groupindex['obj'] |
| dir_index = contents_re.groupindex['dir'] |
| sym_index = contents_re.groupindex['sym'] |
| # The old symlink format may exist on systems that have packages |
| # which were installed many years ago (see bug #351814). |
| oldsym_index = contents_re.groupindex['oldsym'] |
| # CONTENTS files already contain EPREFIX |
| myroot = self.settings['ROOT'] |
| if myroot == os.path.sep: |
| myroot = None |
| # used to generate parent dir entries |
| dir_entry = (_unicode_decode("dir"),) |
| eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1 |
| pos = 0 |
| errors = [] |
| for pos, line in enumerate(mylines): |
| if null_byte in line: |
| # Null bytes are a common indication of corruption. |
| errors.append((pos + 1, _("Null byte found in CONTENTS entry"))) |
| continue |
| line = line.rstrip("\n") |
| m = contents_re.match(line) |
| if m is None: |
| errors.append((pos + 1, _("Unrecognized CONTENTS entry"))) |
| continue |
| |
| if m.group(obj_index) is not None: |
| base = obj_index |
| #format: type, mtime, md5sum |
| data = (m.group(base+1), m.group(base+4), m.group(base+3)) |
| elif m.group(dir_index) is not None: |
| base = dir_index |
| #format: type |
| data = (m.group(base+1),) |
| elif m.group(sym_index) is not None: |
| base = sym_index |
| if m.group(oldsym_index) is None: |
| mtime = m.group(base+5) |
| else: |
| mtime = m.group(base+8) |
| #format: type, mtime, dest |
| data = (m.group(base+1), mtime, m.group(base+3)) |
| else: |
| # This won't happen as long the regular expression |
| # is written to only match valid entries. |
| raise AssertionError(_("required group not found " + \ |
| "in CONTENTS entry: '%s'") % line) |
| |
| path = m.group(base+2) |
| if normalize_needed.search(path) is not None: |
| path = normalize_path(path) |
| if not path.startswith(os.path.sep): |
| path = os.path.sep + path |
| |
| if myroot is not None: |
| path = os.path.join(myroot, path.lstrip(os.path.sep)) |
| |
| # Implicitly add parent directories, since we can't necessarily |
| # assume that they are explicitly listed in CONTENTS, and it's |
| # useful for callers if they can rely on parent directory entries |
| # being generated here (crucial for things like dblink.isowner()). |
| path_split = path.split(os.sep) |
| path_split.pop() |
| while len(path_split) > eroot_split_len: |
| parent = os.sep.join(path_split) |
| if parent in pkgfiles: |
| break |
| pkgfiles[parent] = dir_entry |
| path_split.pop() |
| |
| pkgfiles[path] = data |
| |
| if errors: |
| writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1) |
| for pos, e in errors: |
| writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1) |
| self.contentscache = pkgfiles |
| return pkgfiles |
| |
| def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True, |
| ldpath_mtimes=None, others_in_slot=None): |
| """ |
| Calls prerm |
| Unmerges a given package (CPV) |
| calls postrm |
| calls cleanrm |
| calls env_update |
| |
| @param pkgfiles: files to unmerge (generally self.getcontents() ) |
| @type pkgfiles: Dictionary |
| @param trimworld: Unused |
| @type trimworld: Boolean |
| @param cleanup: cleanup to pass to doebuild (see doebuild) |
| @type cleanup: Boolean |
| @param ldpath_mtimes: mtimes to pass to env_update (see env_update) |
| @type ldpath_mtimes: Dictionary |
| @param others_in_slot: all dblink instances in this slot, excluding self |
| @type others_in_slot: list |
| @rtype: Integer |
| @returns: |
| 1. os.EX_OK if everything went well. |
| 2. return code of the failed phase (for prerm, postrm, cleanrm) |
| |
| Notes: |
| The caller must ensure that lockdb() and unlockdb() are called |
| before and after this method. |
| """ |
| |
| if trimworld is not None: |
| warnings.warn("The trimworld parameter of the " + \ |
| "portage.dbapi.vartree.dblink.unmerge()" + \ |
| " method is now unused.", |
| DeprecationWarning, stacklevel=2) |
| |
| background = False |
| if self._scheduler is None: |
| # We create a scheduler instance and use it to |
| # log unmerge output separately from merge output. |
| self._scheduler = PollScheduler().sched_iface |
| if self.settings.get("PORTAGE_BACKGROUND") == "subprocess": |
| if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1": |
| self.settings["PORTAGE_BACKGROUND"] = "1" |
| self.settings.backup_changes("PORTAGE_BACKGROUND") |
| background = True |
| else: |
| self.settings.pop("PORTAGE_BACKGROUND", None) |
| elif self.settings.get("PORTAGE_BACKGROUND") == "1": |
| background = True |
| |
| self.vartree.dbapi._bump_mtime(self.mycpv) |
| showMessage = self._display_merge |
| if self.vartree.dbapi._categories is not None: |
| self.vartree.dbapi._categories = None |
| # When others_in_slot is supplied, the security check has already been |
| # done for this slot, so it shouldn't be repeated until the next |
| # replacement or unmerge operation. |
| if others_in_slot is None: |
| slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0] |
| slot_matches = self.vartree.dbapi.match( |
| "%s:%s" % (portage.cpv_getkey(self.mycpv), slot)) |
| others_in_slot = [] |
| for cur_cpv in slot_matches: |
| if cur_cpv == self.mycpv: |
| continue |
| others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1], |
| settings=self.settings, vartree=self.vartree, |
| treetype="vartree", pipe=self._pipe)) |
| |
| retval = self._security_check([self] + others_in_slot) |
| if retval: |
| return retval |
| |
| contents = self.getcontents() |
| # Now, don't assume that the name of the ebuild is the same as the |
| # name of the dir; the package may have been moved. |
| myebuildpath = None |
| failures = 0 |
| ebuild_phase = "prerm" |
| log_path = None |
| mystuff = os.listdir(self.dbdir) |
| for x in mystuff: |
| if x.endswith(".ebuild"): |
| myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild") |
| if x[:-7] != self.pkg: |
| # Clean up after vardbapi.move_ent() breakage in |
| # portage versions before 2.1.2 |
| os.rename(os.path.join(self.dbdir, x), myebuildpath) |
| write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n") |
| break |
| |
| self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi) |
| if myebuildpath: |
| try: |
| doebuild_environment(myebuildpath, "prerm", |
| settings=self.settings, db=self.vartree.dbapi) |
| except UnsupportedAPIException as e: |
| failures += 1 |
| # Sometimes this happens due to corruption of the EAPI file. |
| showMessage(_("!!! FAILED prerm: %s\n") % \ |
| os.path.join(self.dbdir, "EAPI"), |
| level=logging.ERROR, noiselevel=-1) |
| showMessage(_unicode_decode("%s\n") % (e,), |
| level=logging.ERROR, noiselevel=-1) |
| myebuildpath = None |
| |
| builddir_lock = None |
| scheduler = self._scheduler |
| retval = os.EX_OK |
| try: |
| builddir_lock = EbuildBuildDir( |
| scheduler=scheduler, |
| settings=self.settings) |
| builddir_lock.lock() |
| prepare_build_dirs(settings=self.settings, cleanup=True) |
| log_path = self.settings.get("PORTAGE_LOG_FILE") |
| if myebuildpath: |
| phase = EbuildPhase(background=background, |
| phase=ebuild_phase, scheduler=scheduler, |
| settings=self.settings) |
| phase.start() |
| retval = phase.wait() |
| |
| # XXX: Decide how to handle failures here. |
| if retval != os.EX_OK: |
| failures += 1 |
| showMessage(_("!!! FAILED prerm: %s\n") % retval, |
| level=logging.ERROR, noiselevel=-1) |
| |
| self._unmerge_pkgfiles(pkgfiles, others_in_slot) |
| self._clear_contents_cache() |
| |
| # Remove the registration of preserved libs for this pkg instance |
| plib_registry = self.vartree.dbapi._plib_registry |
| if plib_registry is None: |
| # preserve-libs is entirely disabled |
| pass |
| else: |
| plib_registry.unregister(self.mycpv, self.settings["SLOT"], |
| self.vartree.dbapi.cpv_counter(self.mycpv)) |
| |
| if myebuildpath: |
| ebuild_phase = "postrm" |
| phase = EbuildPhase(background=background, |
| phase=ebuild_phase, scheduler=scheduler, |
| settings=self.settings) |
| phase.start() |
| retval = phase.wait() |
| |
| # XXX: Decide how to handle failures here. |
| if retval != os.EX_OK: |
| failures += 1 |
| showMessage(_("!!! FAILED postrm: %s\n") % retval, |
| level=logging.ERROR, noiselevel=-1) |
| |
| # Skip this if another package in the same slot has just been |
| # merged on top of this package, since the other package has |
| # already called LinkageMap.rebuild() and passed it's NEEDED file |
| # in as an argument. |
| if not others_in_slot: |
| self._linkmap_rebuild(exclude_pkgs=(self.mycpv,)) |
| |
| # remove preserved libraries that don't have any consumers left |
| cpv_lib_map = self._find_unused_preserved_libs() |
| if cpv_lib_map: |
| self._remove_preserved_libs(cpv_lib_map) |
| for cpv, removed in cpv_lib_map.items(): |
| if not self.vartree.dbapi.cpv_exists(cpv): |
| for dblnk in others_in_slot: |
| if dblnk.mycpv == cpv: |
| # This one just got merged so it doesn't |
| # register with cpv_exists() yet. |
| self.vartree.dbapi.removeFromContents( |
| dblnk, removed) |
| break |
| continue |
| self.vartree.dbapi.removeFromContents(cpv, removed) |
| else: |
| # Prune any preserved libs that may have |
| # been unmerged with this package. |
| if plib_registry is None: |
| # preserve-libs is entirely disabled |
| pass |
| else: |
| plib_registry.pruneNonExisting() |
| |
| finally: |
| self.vartree.dbapi._bump_mtime(self.mycpv) |
| if builddir_lock: |
| try: |
| if myebuildpath: |
| if retval != os.EX_OK: |
| msg_lines = [] |
| msg = _("The '%(ebuild_phase)s' " |
| "phase of the '%(cpv)s' package " |
| "has failed with exit value %(retval)s.") % \ |
| {"ebuild_phase":ebuild_phase, "cpv":self.mycpv, |
| "retval":retval} |
| from textwrap import wrap |
| msg_lines.extend(wrap(msg, 72)) |
| msg_lines.append("") |
| |
| ebuild_name = os.path.basename(myebuildpath) |
| ebuild_dir = os.path.dirname(myebuildpath) |
| msg = _("The problem occurred while executing " |
| "the ebuild file named '%(ebuild_name)s' " |
| "located in the '%(ebuild_dir)s' directory. " |
| "If necessary, manually remove " |
| "the environment.bz2 file and/or the " |
| "ebuild file located in that directory.") % \ |
| {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir} |
| msg_lines.extend(wrap(msg, 72)) |
| msg_lines.append("") |
| |
| msg = _("Removal " |
| "of the environment.bz2 file is " |
| "preferred since it may allow the " |
| "removal phases to execute successfully. " |
| "The ebuild will be " |
| "sourced and the eclasses " |
| "from the current portage tree will be used " |
| "when necessary. Removal of " |
| "the ebuild file will cause the " |
| "pkg_prerm() and pkg_postrm() removal " |
| "phases to be skipped entirely.") |
| msg_lines.extend(wrap(msg, 72)) |
| |
| self._eerror(ebuild_phase, msg_lines) |
| |
| self._elog_process(phasefilter=("prerm", "postrm")) |
| |
| if retval == os.EX_OK and builddir_lock is not None: |
| # myebuildpath might be None, so ensure |
| # it has a sane value for the clean phase, |
| # even though it won't really be sourced. |
| myebuildpath = os.path.join(self.dbdir, |
| self.pkg + ".ebuild") |
| doebuild_environment(myebuildpath, "cleanrm", |
| settings=self.settings, db=self.vartree.dbapi) |
| phase = EbuildPhase(background=background, |
| phase="cleanrm", scheduler=scheduler, |
| settings=self.settings) |
| phase.start() |
| retval = phase.wait() |
| finally: |
| if builddir_lock is not None: |
| builddir_lock.unlock() |
| |
| if log_path is not None: |
| |
| if not failures and 'unmerge-logs' not in self.settings.features: |
| try: |
| os.unlink(log_path) |
| except OSError: |
| pass |
| |
| try: |
| st = os.stat(log_path) |
| except OSError: |
| pass |
| else: |
| if st.st_size == 0: |
| try: |
| os.unlink(log_path) |
| except OSError: |
| pass |
| |
| if log_path is not None and os.path.exists(log_path): |
| # Restore this since it gets lost somewhere above and it |
| # needs to be set for _display_merge() to be able to log. |
| # Note that the log isn't necessarily supposed to exist |
| # since if PORT_LOGDIR is unset then it's a temp file |
| # so it gets cleaned above. |
| self.settings["PORTAGE_LOG_FILE"] = log_path |
| else: |
| self.settings.pop("PORTAGE_LOG_FILE", None) |
| |
| if 'no-env-update' not in self.settings.features: |
| env_update(target_root=self.settings['ROOT'], |
| prev_mtimes=ldpath_mtimes, |
| contents=contents, env=self.settings.environ(), |
| writemsg_level=self._display_merge) |
| return os.EX_OK |
| |
| def _display_merge(self, msg, level=0, noiselevel=0): |
| if not self._verbose and noiselevel >= 0 and level < logging.WARN: |
| return |
| if self._scheduler is None: |
| writemsg_level(msg, level=level, noiselevel=noiselevel) |
| else: |
| log_path = self.settings.get("PORTAGE_LOG_FILE") |
| background = self.settings.get("PORTAGE_BACKGROUND") == "1" |
| |
| if log_path is None: |
| if not (background and level < logging.WARN): |
| writemsg_level(msg, level=level, noiselevel=noiselevel) |
| else: |
| self._scheduler.output(msg, |
| background=background, log_path=log_path) |
| |
| def _unmerge_pkgfiles(self, pkgfiles, others_in_slot): |
| """ |
| |
| Unmerges the contents of a package from the liveFS |
| Removes the VDB entry for self |
| |
| @param pkgfiles: typically self.getcontents() |
| @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] } |
| @param others_in_slot: all dblink instances in this slot, excluding self |
| @type others_in_slot: list |
| @rtype: None |
| """ |
| |
| os = _os_merge |
| perf_md5 = perform_md5 |
| showMessage = self._display_merge |
| |
| if not pkgfiles: |
| showMessage(_("No package files given... Grabbing a set.\n")) |
| pkgfiles = self.getcontents() |
| |
| if others_in_slot is None: |
| others_in_slot = [] |
| slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0] |
| slot_matches = self.vartree.dbapi.match( |
| "%s:%s" % (portage.cpv_getkey(self.mycpv), slot)) |
| for cur_cpv in slot_matches: |
| if cur_cpv == self.mycpv: |
| continue |
| others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1], |
| settings=self.settings, |
| vartree=self.vartree, treetype="vartree", pipe=self._pipe)) |
| |
| dest_root = self._eroot |
| dest_root_len = len(dest_root) - 1 |
| |
| conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE) |
| cfgfiledict = grabdict(conf_mem_file) |
| stale_confmem = [] |
| |
| unmerge_orphans = "unmerge-orphans" in self.settings.features |
| calc_prelink = "prelink-checksums" in self.settings.features |
| |
| if pkgfiles: |
| self.updateprotect() |
| mykeys = list(pkgfiles) |
| mykeys.sort() |
| mykeys.reverse() |
| |
| #process symlinks second-to-last, directories last. |
| mydirs = set() |
| ignored_unlink_errnos = ( |
| errno.EBUSY, errno.ENOENT, |
| errno.ENOTDIR, errno.EISDIR) |
| ignored_rmdir_errnos = ( |
| errno.EEXIST, errno.ENOTEMPTY, |
| errno.EBUSY, errno.ENOENT, |
| errno.ENOTDIR, errno.EISDIR, |
| errno.EPERM) |
| modprotect = os.path.join(self._eroot, "lib/modules/") |
| |
| def unlink(file_name, lstatobj): |
| if bsd_chflags: |
| if lstatobj.st_flags != 0: |
| bsd_chflags.lchflags(file_name, 0) |
| parent_name = os.path.dirname(file_name) |
| # Use normal stat/chflags for the parent since we want to |
| # follow any symlinks to the real parent directory. |
| pflags = os.stat(parent_name).st_flags |
| if pflags != 0: |
| bsd_chflags.chflags(parent_name, 0) |
| try: |
| if not stat.S_ISLNK(lstatobj.st_mode): |
| # Remove permissions to ensure that any hardlinks to |
| # suid/sgid files are rendered harmless. |
| os.chmod(file_name, 0) |
| os.unlink(file_name) |
| except OSError as ose: |
| # If the chmod or unlink fails, you are in trouble. |
| # With Prefix this can be because the file is owned |
| # by someone else (a screwup by root?), on a normal |
| # system maybe filesystem corruption. In any case, |
| # if we backtrace and die here, we leave the system |
| # in a totally undefined state, hence we just bleed |
| # like hell and continue to hopefully finish all our |
| # administrative and pkg_postinst stuff. |
| self._eerror("postrm", |
| ["Could not chmod or unlink '%s': %s" % \ |
| (file_name, ose)]) |
| finally: |
| if bsd_chflags and pflags != 0: |
| # Restore the parent flags we saved before unlinking |
| bsd_chflags.chflags(parent_name, pflags) |
| |
| def show_unmerge(zing, desc, file_type, file_name): |
| showMessage("%s %s %s %s\n" % \ |
| (zing, desc.ljust(8), file_type, file_name)) |
| |
| unmerge_desc = {} |
| unmerge_desc["cfgpro"] = _("cfgpro") |
| unmerge_desc["replaced"] = _("replaced") |
| unmerge_desc["!dir"] = _("!dir") |
| unmerge_desc["!empty"] = _("!empty") |
| unmerge_desc["!fif"] = _("!fif") |
| unmerge_desc["!found"] = _("!found") |
| unmerge_desc["!md5"] = _("!md5") |
| unmerge_desc["!mtime"] = _("!mtime") |
| unmerge_desc["!obj"] = _("!obj") |
| unmerge_desc["!sym"] = _("!sym") |
| |
| real_root = self.settings['ROOT'] |
| real_root_len = len(real_root) - 1 |
| eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1 |
| |
| for i, objkey in enumerate(mykeys): |
| |
| obj = normalize_path(objkey) |
| if os is _os_merge: |
| try: |
| _unicode_encode(obj, |
| encoding=_encodings['merge'], errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| _unicode_encode(obj, |
| encoding=_encodings['fs'], errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os = portage.os |
| perf_md5 = portage.checksum.perform_md5 |
| |
| file_data = pkgfiles[objkey] |
| file_type = file_data[0] |
| statobj = None |
| try: |
| statobj = os.stat(obj) |
| except OSError: |
| pass |
| lstatobj = None |
| try: |
| lstatobj = os.lstat(obj) |
| except (OSError, AttributeError): |
| pass |
| islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode) |
| if lstatobj is None: |
| show_unmerge("---", unmerge_desc["!found"], file_type, obj) |
| continue |
| # don't use EROOT, CONTENTS entries already contain EPREFIX |
| if obj.startswith(real_root): |
| relative_path = obj[real_root_len:] |
| is_owned = False |
| for dblnk in others_in_slot: |
| if dblnk.isowner(relative_path): |
| is_owned = True |
| break |
| if is_owned: |
| # A new instance of this package claims the file, so |
| # don't unmerge it. |
| show_unmerge("---", unmerge_desc["replaced"], file_type, obj) |
| continue |
| elif relative_path in cfgfiledict: |
| stale_confmem.append(relative_path) |
| # next line includes a tweak to protect modules from being unmerged, |
| # but we don't protect modules from being overwritten if they are |
| # upgraded. We effectively only want one half of the config protection |
| # functionality for /lib/modules. For portage-ng both capabilities |
| # should be able to be independently specified. |
| # TODO: For rebuilds, re-parent previous modules to the new |
| # installed instance (so they are not orphans). For normal |
| # uninstall (not rebuild/reinstall), remove the modules along |
| # with all other files (leave no orphans). |
| if obj.startswith(modprotect): |
| show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj) |
| continue |
| |
| # Don't unlink symlinks to directories here since that can |
| # remove /lib and /usr/lib symlinks. |
| if unmerge_orphans and \ |
| lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \ |
| not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \ |
| not self.isprotected(obj): |
| try: |
| unlink(obj, lstatobj) |
| except EnvironmentError as e: |
| if e.errno not in ignored_unlink_errnos: |
| raise |
| del e |
| show_unmerge("<<<", "", file_type, obj) |
| continue |
| |
| lmtime = str(lstatobj[stat.ST_MTIME]) |
| if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]): |
| show_unmerge("---", unmerge_desc["!mtime"], file_type, obj) |
| continue |
| |
| if pkgfiles[objkey][0] == "dir": |
| if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode): |
| show_unmerge("---", unmerge_desc["!dir"], file_type, obj) |
| continue |
| mydirs.add(obj) |
| elif pkgfiles[objkey][0] == "sym": |
| if not islink: |
| show_unmerge("---", unmerge_desc["!sym"], file_type, obj) |
| continue |
| # Go ahead and unlink symlinks to directories here when |
| # they're actually recorded as symlinks in the contents. |
| # Normally, symlinks such as /lib -> lib64 are not recorded |
| # as symlinks in the contents of a package. If a package |
| # installs something into ${D}/lib/, it is recorded in the |
| # contents as a directory even if it happens to correspond |
| # to a symlink when it's merged to the live filesystem. |
| try: |
| unlink(obj, lstatobj) |
| show_unmerge("<<<", "", file_type, obj) |
| except (OSError, IOError) as e: |
| if e.errno not in ignored_unlink_errnos: |
| raise |
| del e |
| show_unmerge("!!!", "", file_type, obj) |
| elif pkgfiles[objkey][0] == "obj": |
| if statobj is None or not stat.S_ISREG(statobj.st_mode): |
| show_unmerge("---", unmerge_desc["!obj"], file_type, obj) |
| continue |
| mymd5 = None |
| try: |
| mymd5 = perf_md5(obj, calc_prelink=calc_prelink) |
| except FileNotFound as e: |
| # the file has disappeared between now and our stat call |
| show_unmerge("---", unmerge_desc["!obj"], file_type, obj) |
| continue |
| |
| # string.lower is needed because db entries used to be in upper-case. The |
| # string.lower allows for backwards compatibility. |
| if mymd5 != pkgfiles[objkey][2].lower(): |
| show_unmerge("---", unmerge_desc["!md5"], file_type, obj) |
| continue |
| try: |
| unlink(obj, lstatobj) |
| except (OSError, IOError) as e: |
| if e.errno not in ignored_unlink_errnos: |
| raise |
| del e |
| show_unmerge("<<<", "", file_type, obj) |
| elif pkgfiles[objkey][0] == "fif": |
| if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]): |
| show_unmerge("---", unmerge_desc["!fif"], file_type, obj) |
| continue |
| show_unmerge("---", "", file_type, obj) |
| elif pkgfiles[objkey][0] == "dev": |
| show_unmerge("---", "", file_type, obj) |
| |
| mydirs = sorted(mydirs) |
| mydirs.reverse() |
| |
| for obj in mydirs: |
| try: |
| if bsd_chflags: |
| lstatobj = os.lstat(obj) |
| if lstatobj.st_flags != 0: |
| bsd_chflags.lchflags(obj, 0) |
| parent_name = os.path.dirname(obj) |
| # Use normal stat/chflags for the parent since we want to |
| # follow any symlinks to the real parent directory. |
| pflags = os.stat(parent_name).st_flags |
| if pflags != 0: |
| bsd_chflags.chflags(parent_name, 0) |
| try: |
| os.rmdir(obj) |
| finally: |
| if bsd_chflags and pflags != 0: |
| # Restore the parent flags we saved before unlinking |
| bsd_chflags.chflags(parent_name, pflags) |
| show_unmerge("<<<", "", "dir", obj) |
| except EnvironmentError as e: |
| if e.errno not in ignored_rmdir_errnos: |
| raise |
| if e.errno != errno.ENOENT: |
| show_unmerge("---", unmerge_desc["!empty"], "dir", obj) |
| del e |
| |
| # Remove stale entries from config memory. |
| if stale_confmem: |
| for filename in stale_confmem: |
| del cfgfiledict[filename] |
| writedict(cfgfiledict, conf_mem_file) |
| |
| #remove self from vartree database so that our own virtual gets zapped if we're the last node |
| self.vartree.zap(self.mycpv) |
| |
| def isowner(self, filename, destroot=None): |
| """ |
| Check if a file belongs to this package. This may |
| result in a stat call for the parent directory of |
| every installed file, since the inode numbers are |
| used to work around the problem of ambiguous paths |
| caused by symlinked directories. The results of |
| stat calls are cached to optimize multiple calls |
| to this method. |
| |
| @param filename: |
| @type filename: |
| @param destroot: |
| @type destroot: |
| @rtype: Boolean |
| @returns: |
| 1. True if this package owns the file. |
| 2. False if this package does not own the file. |
| """ |
| |
| if destroot is not None and destroot != self._eroot: |
| warnings.warn("The second parameter of the " + \ |
| "portage.dbapi.vartree.dblink.isowner()" + \ |
| " is now unused. Instead " + \ |
| "self.settings['EROOT'] will be used.", |
| DeprecationWarning, stacklevel=2) |
| |
| return bool(self._match_contents(filename)) |
| |
| def _match_contents(self, filename, destroot=None): |
| """ |
| The matching contents entry is returned, which is useful |
| since the path may differ from the one given by the caller, |
| due to symlinks. |
| |
| @rtype: String |
| @return: the contents entry corresponding to the given path, or False |
| if the file is not owned by this package. |
| """ |
| |
| filename = _unicode_decode(filename, |
| encoding=_encodings['content'], errors='strict') |
| |
| if destroot is not None and destroot != self._eroot: |
| warnings.warn("The second parameter of the " + \ |
| "portage.dbapi.vartree.dblink._match_contents()" + \ |
| " is now unused. Instead " + \ |
| "self.settings['ROOT'] will be used.", |
| DeprecationWarning, stacklevel=2) |
| |
| # don't use EROOT here, image already contains EPREFIX |
| destroot = self.settings['ROOT'] |
| |
| # The given filename argument might have a different encoding than the |
| # the filenames contained in the contents, so use separate wrapped os |
| # modules for each. The basename is more likely to contain non-ascii |
| # characters than the directory path, so use os_filename_arg for all |
| # operations involving the basename of the filename arg. |
| os_filename_arg = _os_merge |
| os = _os_merge |
| |
| try: |
| _unicode_encode(filename, |
| encoding=_encodings['merge'], errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| _unicode_encode(filename, |
| encoding=_encodings['fs'], errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os_filename_arg = portage.os |
| |
| destfile = normalize_path( |
| os_filename_arg.path.join(destroot, |
| filename.lstrip(os_filename_arg.path.sep))) |
| |
| pkgfiles = self.getcontents() |
| if pkgfiles and destfile in pkgfiles: |
| return destfile |
| if pkgfiles: |
| basename = os_filename_arg.path.basename(destfile) |
| if self._contents_basenames is None: |
| |
| try: |
| for x in pkgfiles: |
| _unicode_encode(x, |
| encoding=_encodings['merge'], |
| errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| for x in pkgfiles: |
| _unicode_encode(x, |
| encoding=_encodings['fs'], |
| errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os = portage.os |
| |
| self._contents_basenames = set( |
| os.path.basename(x) for x in pkgfiles) |
| if basename not in self._contents_basenames: |
| # This is a shortcut that, in most cases, allows us to |
| # eliminate this package as an owner without the need |
| # to examine inode numbers of parent directories. |
| return False |
| |
| # Use stat rather than lstat since we want to follow |
| # any symlinks to the real parent directory. |
| parent_path = os_filename_arg.path.dirname(destfile) |
| try: |
| parent_stat = os_filename_arg.stat(parent_path) |
| except EnvironmentError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| return False |
| if self._contents_inodes is None: |
| |
| if os is _os_merge: |
| try: |
| for x in pkgfiles: |
| _unicode_encode(x, |
| encoding=_encodings['merge'], |
| errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| for x in pkgfiles: |
| _unicode_encode(x, |
| encoding=_encodings['fs'], |
| errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os = portage.os |
| |
| self._contents_inodes = {} |
| parent_paths = set() |
| for x in pkgfiles: |
| p_path = os.path.dirname(x) |
| if p_path in parent_paths: |
| continue |
| parent_paths.add(p_path) |
| try: |
| s = os.stat(p_path) |
| except OSError: |
| pass |
| else: |
| inode_key = (s.st_dev, s.st_ino) |
| # Use lists of paths in case multiple |
| # paths reference the same inode. |
| p_path_list = self._contents_inodes.get(inode_key) |
| if p_path_list is None: |
| p_path_list = [] |
| self._contents_inodes[inode_key] = p_path_list |
| if p_path not in p_path_list: |
| p_path_list.append(p_path) |
| |
| p_path_list = self._contents_inodes.get( |
| (parent_stat.st_dev, parent_stat.st_ino)) |
| if p_path_list: |
| for p_path in p_path_list: |
| x = os_filename_arg.path.join(p_path, basename) |
| if x in pkgfiles: |
| return x |
| |
| return False |
| |
| def _linkmap_rebuild(self, **kwargs): |
| """ |
| Rebuild the self._linkmap if it's not broken due to missing |
| scanelf binary. Also, return early if preserve-libs is disabled |
| and the preserve-libs registry is empty. |
| """ |
| if self._linkmap_broken or \ |
| self.vartree.dbapi._linkmap is None or \ |
| self.vartree.dbapi._plib_registry is None or \ |
| ("preserve-libs" not in self.settings.features and \ |
| not self.vartree.dbapi._plib_registry.hasEntries()): |
| return |
| try: |
| self.vartree.dbapi._linkmap.rebuild(**kwargs) |
| except CommandNotFound as e: |
| self._linkmap_broken = True |
| self._display_merge(_("!!! Disabling preserve-libs " \ |
| "due to error: Command Not Found: %s\n") % (e,), |
| level=logging.ERROR, noiselevel=-1) |
| |
| def _find_libs_to_preserve(self): |
| """ |
| Get set of relative paths for libraries to be preserved. The file |
| paths are selected from self._installed_instance.getcontents(). |
| """ |
| if self._linkmap_broken or \ |
| self.vartree.dbapi._linkmap is None or \ |
| self.vartree.dbapi._plib_registry is None or \ |
| self._installed_instance is None or \ |
| "preserve-libs" not in self.settings.features: |
| return None |
| |
| os = _os_merge |
| linkmap = self.vartree.dbapi._linkmap |
| installed_instance = self._installed_instance |
| old_contents = installed_instance.getcontents() |
| root = self.settings['ROOT'] |
| root_len = len(root) - 1 |
| lib_graph = digraph() |
| path_node_map = {} |
| |
| def path_to_node(path): |
| node = path_node_map.get(path) |
| if node is None: |
| node = LinkageMap._LibGraphNode(path, root) |
| alt_path_node = lib_graph.get(node) |
| if alt_path_node is not None: |
| node = alt_path_node |
| node.alt_paths.add(path) |
| path_node_map[path] = node |
| return node |
| |
| consumer_map = {} |
| provider_nodes = set() |
| # Create provider nodes and add them to the graph. |
| for f_abs in old_contents: |
| |
| if os is _os_merge: |
| try: |
| _unicode_encode(f_abs, |
| encoding=_encodings['merge'], errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| _unicode_encode(f_abs, |
| encoding=_encodings['fs'], errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os = portage.os |
| |
| f = f_abs[root_len:] |
| if self.isowner(f): |
| continue |
| try: |
| consumers = linkmap.findConsumers(f) |
| except KeyError: |
| continue |
| if not consumers: |
| continue |
| provider_node = path_to_node(f) |
| lib_graph.add(provider_node, None) |
| provider_nodes.add(provider_node) |
| consumer_map[provider_node] = consumers |
| |
| # Create consumer nodes and add them to the graph. |
| # Note that consumers can also be providers. |
| for provider_node, consumers in consumer_map.items(): |
| for c in consumers: |
| if self.isowner(c): |
| continue |
| consumer_node = path_to_node(c) |
| if installed_instance.isowner(c) and \ |
| consumer_node not in provider_nodes: |
| # This is not a provider, so it will be uninstalled. |
| continue |
| lib_graph.add(provider_node, consumer_node) |
| |
| # Locate nodes which should be preserved. They consist of all |
| # providers that are reachable from consumers that are not |
| # providers themselves. |
| preserve_nodes = set() |
| for consumer_node in lib_graph.root_nodes(): |
| if consumer_node in provider_nodes: |
| continue |
| # Preserve all providers that are reachable from this consumer. |
| node_stack = lib_graph.child_nodes(consumer_node) |
| while node_stack: |
| provider_node = node_stack.pop() |
| if provider_node in preserve_nodes: |
| continue |
| preserve_nodes.add(provider_node) |
| node_stack.extend(lib_graph.child_nodes(provider_node)) |
| |
| preserve_paths = set() |
| for preserve_node in preserve_nodes: |
| # Make sure that at least one of the paths is not a symlink. |
| # This prevents symlinks from being erroneously preserved by |
| # themselves when the old instance installed symlinks that |
| # the new instance does not install. |
| have_lib = False |
| for f in preserve_node.alt_paths: |
| f_abs = os.path.join(root, f.lstrip(os.sep)) |
| try: |
| if stat.S_ISREG(os.lstat(f_abs).st_mode): |
| have_lib = True |
| break |
| except OSError: |
| continue |
| |
| if have_lib: |
| preserve_paths.update(preserve_node.alt_paths) |
| |
| return preserve_paths |
| |
| def _add_preserve_libs_to_contents(self, preserve_paths): |
| """ |
| Preserve libs returned from _find_libs_to_preserve(). |
| """ |
| |
| if not preserve_paths: |
| return |
| |
| os = _os_merge |
| showMessage = self._display_merge |
| root = self.settings['ROOT'] |
| |
| # Copy contents entries from the old package to the new one. |
| new_contents = self.getcontents().copy() |
| old_contents = self._installed_instance.getcontents() |
| for f in sorted(preserve_paths): |
| f = _unicode_decode(f, |
| encoding=_encodings['content'], errors='strict') |
| f_abs = os.path.join(root, f.lstrip(os.sep)) |
| contents_entry = old_contents.get(f_abs) |
| if contents_entry is None: |
| # This will probably never happen, but it might if one of the |
| # paths returned from findConsumers() refers to one of the libs |
| # that should be preserved yet the path is not listed in the |
| # contents. Such a path might belong to some other package, so |
| # it shouldn't be preserved here. |
| showMessage(_("!!! File '%s' will not be preserved " |
| "due to missing contents entry\n") % (f_abs,), |
| level=logging.ERROR, noiselevel=-1) |
| preserve_paths.remove(f) |
| continue |
| new_contents[f_abs] = contents_entry |
| obj_type = contents_entry[0] |
| showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs), |
| noiselevel=-1) |
| # Add parent directories to contents if necessary. |
| parent_dir = os.path.dirname(f_abs) |
| while len(parent_dir) > len(root): |
| new_contents[parent_dir] = ["dir"] |
| prev = parent_dir |
| parent_dir = os.path.dirname(parent_dir) |
| if prev == parent_dir: |
| break |
| outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS")) |
| write_contents(new_contents, root, outfile) |
| outfile.close() |
| self._clear_contents_cache() |
| |
| def _find_unused_preserved_libs(self): |
| """ |
| Find preserved libraries that don't have any consumers left. |
| """ |
| |
| if self._linkmap_broken or \ |
| self.vartree.dbapi._linkmap is None or \ |
| self.vartree.dbapi._plib_registry is None or \ |
| not self.vartree.dbapi._plib_registry.hasEntries(): |
| return {} |
| |
| # Since preserved libraries can be consumers of other preserved |
| # libraries, use a graph to track consumer relationships. |
| plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs() |
| lib_graph = digraph() |
| preserved_nodes = set() |
| preserved_paths = set() |
| path_cpv_map = {} |
| path_node_map = {} |
| root = self.settings['ROOT'] |
| |
| def path_to_node(path): |
| node = path_node_map.get(path) |
| if node is None: |
| node = LinkageMap._LibGraphNode(path, root) |
| alt_path_node = lib_graph.get(node) |
| if alt_path_node is not None: |
| node = alt_path_node |
| node.alt_paths.add(path) |
| path_node_map[path] = node |
| return node |
| |
| linkmap = self.vartree.dbapi._linkmap |
| for cpv, plibs in plib_dict.items(): |
| for f in plibs: |
| path_cpv_map[f] = cpv |
| preserved_node = path_to_node(f) |
| if not preserved_node.file_exists(): |
| continue |
| lib_graph.add(preserved_node, None) |
| preserved_paths.add(f) |
| preserved_nodes.add(preserved_node) |
| for c in self.vartree.dbapi._linkmap.findConsumers(f): |
| consumer_node = path_to_node(c) |
| if not consumer_node.file_exists(): |
| continue |
| # Note that consumers may also be providers. |
| lib_graph.add(preserved_node, consumer_node) |
| |
| # Eliminate consumers having providers with the same soname as an |
| # installed library that is not preserved. This eliminates |
| # libraries that are erroneously preserved due to a move from one |
| # directory to another. |
| provider_cache = {} |
| for preserved_node in preserved_nodes: |
| soname = linkmap.getSoname(preserved_node) |
| for consumer_node in lib_graph.parent_nodes(preserved_node): |
| if consumer_node in preserved_nodes: |
| continue |
| providers = provider_cache.get(consumer_node) |
| if providers is None: |
| providers = linkmap.findProviders(consumer_node) |
| provider_cache[consumer_node] = providers |
| providers = providers.get(soname) |
| if providers is None: |
| continue |
| for provider in providers: |
| if provider in preserved_paths: |
| continue |
| provider_node = path_to_node(provider) |
| if not provider_node.file_exists(): |
| continue |
| if provider_node in preserved_nodes: |
| continue |
| # An alternative provider seems to be |
| # installed, so drop this edge. |
| lib_graph.remove_edge(preserved_node, consumer_node) |
| break |
| |
| cpv_lib_map = {} |
| while not lib_graph.empty(): |
| root_nodes = preserved_nodes.intersection(lib_graph.root_nodes()) |
| if not root_nodes: |
| break |
| lib_graph.difference_update(root_nodes) |
| unlink_list = set() |
| for node in root_nodes: |
| unlink_list.update(node.alt_paths) |
| unlink_list = sorted(unlink_list) |
| for obj in unlink_list: |
| cpv = path_cpv_map.get(obj) |
| if cpv is None: |
| # This means that a symlink is in the preserved libs |
| # registry, but the actual lib it points to is not. |
| self._display_merge(_("!!! symlink to lib is preserved, " |
| "but not the lib itself:\n!!! '%s'\n") % (obj,), |
| level=logging.ERROR, noiselevel=-1) |
| continue |
| removed = cpv_lib_map.get(cpv) |
| if removed is None: |
| removed = set() |
| cpv_lib_map[cpv] = removed |
| removed.add(obj) |
| |
| return cpv_lib_map |
| |
| def _remove_preserved_libs(self, cpv_lib_map): |
| """ |
| Remove files returned from _find_unused_preserved_libs(). |
| """ |
| |
| os = _os_merge |
| |
| files_to_remove = set() |
| for files in cpv_lib_map.values(): |
| files_to_remove.update(files) |
| files_to_remove = sorted(files_to_remove) |
| showMessage = self._display_merge |
| root = self.settings['ROOT'] |
| |
| parent_dirs = set() |
| for obj in files_to_remove: |
| obj = os.path.join(root, obj.lstrip(os.sep)) |
| parent_dirs.add(os.path.dirname(obj)) |
| if os.path.islink(obj): |
| obj_type = _("sym") |
| else: |
| obj_type = _("obj") |
| try: |
| os.unlink(obj) |
| except OSError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| else: |
| showMessage(_("<<< !needed %s %s\n") % (obj_type, obj), |
| noiselevel=-1) |
| |
| # Remove empty parent directories if possible. |
| while parent_dirs: |
| x = parent_dirs.pop() |
| while True: |
| try: |
| os.rmdir(x) |
| except OSError: |
| break |
| prev = x |
| x = os.path.dirname(x) |
| if x == prev: |
| break |
| |
| self.vartree.dbapi._plib_registry.pruneNonExisting() |
| |
| def _collision_protect(self, srcroot, destroot, mypkglist, mycontents): |
| |
| os = _os_merge |
| |
| collision_ignore = set([normalize_path(myignore) for myignore in \ |
| portage.util.shlex_split( |
| self.settings.get("COLLISION_IGNORE", ""))]) |
| |
| # For collisions with preserved libraries, the current package |
| # will assume ownership and the libraries will be unregistered. |
| if self.vartree.dbapi._plib_registry is None: |
| # preserve-libs is entirely disabled |
| plib_cpv_map = None |
| plib_paths = None |
| plib_inodes = {} |
| else: |
| plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs() |
| plib_cpv_map = {} |
| plib_paths = set() |
| for cpv, paths in plib_dict.items(): |
| plib_paths.update(paths) |
| for f in paths: |
| plib_cpv_map[f] = cpv |
| plib_inodes = self._lstat_inode_map(plib_paths) |
| |
| plib_collisions = {} |
| |
| showMessage = self._display_merge |
| stopmerge = False |
| collisions = [] |
| destroot = self.settings['ROOT'] |
| showMessage(_(" %s checking %d files for package collisions\n") % \ |
| (colorize("GOOD", "*"), len(mycontents))) |
| for i, f in enumerate(mycontents): |
| if i % 1000 == 0 and i != 0: |
| showMessage(_("%d files checked ...\n") % i) |
| |
| dest_path = normalize_path( |
| os.path.join(destroot, f.lstrip(os.path.sep))) |
| try: |
| dest_lstat = os.lstat(dest_path) |
| except EnvironmentError as e: |
| if e.errno == errno.ENOENT: |
| del e |
| continue |
| elif e.errno == errno.ENOTDIR: |
| del e |
| # A non-directory is in a location where this package |
| # expects to have a directory. |
| dest_lstat = None |
| parent_path = dest_path |
| while len(parent_path) > len(destroot): |
| parent_path = os.path.dirname(parent_path) |
| try: |
| dest_lstat = os.lstat(parent_path) |
| break |
| except EnvironmentError as e: |
| if e.errno != errno.ENOTDIR: |
| raise |
| del e |
| if not dest_lstat: |
| raise AssertionError( |
| "unable to find non-directory " + \ |
| "parent for '%s'" % dest_path) |
| dest_path = parent_path |
| f = os.path.sep + dest_path[len(destroot):] |
| if f in collisions: |
| continue |
| else: |
| raise |
| if f[0] != "/": |
| f="/"+f |
| |
| plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino)) |
| if plibs: |
| for path in plibs: |
| cpv = plib_cpv_map[path] |
| paths = plib_collisions.get(cpv) |
| if paths is None: |
| paths = set() |
| plib_collisions[cpv] = paths |
| paths.add(path) |
| # The current package will assume ownership and the |
| # libraries will be unregistered, so exclude this |
| # path from the normal collisions. |
| continue |
| |
| isowned = False |
| full_path = os.path.join(destroot, f.lstrip(os.path.sep)) |
| for ver in mypkglist: |
| if ver.isowner(f): |
| isowned = True |
| break |
| if not isowned and self.isprotected(full_path): |
| isowned = True |
| if not isowned: |
| stopmerge = True |
| if collision_ignore: |
| if f in collision_ignore: |
| stopmerge = False |
| else: |
| for myignore in collision_ignore: |
| if f.startswith(myignore + os.path.sep): |
| stopmerge = False |
| break |
| if stopmerge: |
| collisions.append(f) |
| return collisions, plib_collisions |
| |
| def _lstat_inode_map(self, path_iter): |
| """ |
| Use lstat to create a map of the form: |
| {(st_dev, st_ino) : set([path1, path2, ...])} |
| Multiple paths may reference the same inode due to hardlinks. |
| All lstat() calls are relative to self.myroot. |
| """ |
| |
| os = _os_merge |
| |
| root = self.settings['ROOT'] |
| inode_map = {} |
| for f in path_iter: |
| path = os.path.join(root, f.lstrip(os.sep)) |
| try: |
| st = os.lstat(path) |
| except OSError as e: |
| if e.errno not in (errno.ENOENT, errno.ENOTDIR): |
| raise |
| del e |
| continue |
| key = (st.st_dev, st.st_ino) |
| paths = inode_map.get(key) |
| if paths is None: |
| paths = set() |
| inode_map[key] = paths |
| paths.add(f) |
| return inode_map |
| |
| def _security_check(self, installed_instances): |
| if not installed_instances: |
| return 0 |
| |
| os = _os_merge |
| |
| showMessage = self._display_merge |
| |
| file_paths = set() |
| for dblnk in installed_instances: |
| file_paths.update(dblnk.getcontents()) |
| inode_map = {} |
| real_paths = set() |
| for i, path in enumerate(file_paths): |
| |
| if os is _os_merge: |
| try: |
| _unicode_encode(path, |
| encoding=_encodings['merge'], errors='strict') |
| except UnicodeEncodeError: |
| # The package appears to have been merged with a |
| # different value of sys.getfilesystemencoding(), |
| # so fall back to utf_8 if appropriate. |
| try: |
| _unicode_encode(path, |
| encoding=_encodings['fs'], errors='strict') |
| except UnicodeEncodeError: |
| pass |
| else: |
| os = portage.os |
| |
| try: |
| s = os.lstat(path) |
| except OSError as e: |
| if e.errno not in (errno.ENOENT, errno.ENOTDIR): |
| raise |
| del e |
| continue |
| if not stat.S_ISREG(s.st_mode): |
| continue |
| path = os.path.realpath(path) |
| if path in real_paths: |
| continue |
| real_paths.add(path) |
| if s.st_nlink > 1 and \ |
| s.st_mode & (stat.S_ISUID | stat.S_ISGID): |
| k = (s.st_dev, s.st_ino) |
| inode_map.setdefault(k, []).append((path, s)) |
| suspicious_hardlinks = [] |
| for path_list in inode_map.values(): |
| path, s = path_list[0] |
| if len(path_list) == s.st_nlink: |
| # All hardlinks seem to be owned by this package. |
| continue |
| suspicious_hardlinks.append(path_list) |
| if not suspicious_hardlinks: |
| return 0 |
| |
| msg = [] |
| msg.append(_("suid/sgid file(s) " |
| "with suspicious hardlink(s):")) |
| msg.append("") |
| for path_list in suspicious_hardlinks: |
| for path, s in path_list: |
| msg.append("\t%s" % path) |
| msg.append("") |
| msg.append(_("See the Gentoo Security Handbook " |
| "guide for advice on how to proceed.")) |
| |
| self._eerror("preinst", msg) |
| |
| return 1 |
| |
| def _eqawarn(self, phase, lines): |
| self._elog("eqawarn", phase, lines) |
| |
| def _eerror(self, phase, lines): |
| self._elog("eerror", phase, lines) |
| |
| def _elog(self, funcname, phase, lines): |
| func = getattr(portage.elog.messages, funcname) |
| if self._scheduler is None: |
| for l in lines: |
| func(l, phase=phase, key=self.mycpv) |
| else: |
| background = self.settings.get("PORTAGE_BACKGROUND") == "1" |
| log_path = None |
| if self.settings.get("PORTAGE_BACKGROUND") != "subprocess": |
| log_path = self.settings.get("PORTAGE_LOG_FILE") |
| out = portage.StringIO() |
| for line in lines: |
| func(line, phase=phase, key=self.mycpv, out=out) |
| msg = out.getvalue() |
| self._scheduler.output(msg, |
| background=background, log_path=log_path) |
| |
| def _elog_process(self, phasefilter=None): |
| cpv = self.mycpv |
| if self._pipe is None: |
| elog_process(cpv, self.settings, phasefilter=phasefilter) |
| else: |
| logdir = os.path.join(self.settings["T"], "logging") |
| ebuild_logentries = collect_ebuild_messages(logdir) |
| py_logentries = collect_messages(key=cpv).get(cpv, {}) |
| logentries = _merge_logentries(py_logentries, ebuild_logentries) |
| funcnames = { |
| "INFO": "einfo", |
| "LOG": "elog", |
| "WARN": "ewarn", |
| "QA": "eqawarn", |
| "ERROR": "eerror" |
| } |
| str_buffer = [] |
| for phase, messages in logentries.items(): |
| for key, lines in messages: |
| funcname = funcnames[key] |
| if isinstance(lines, basestring): |
| lines = [lines] |
| for line in lines: |
| fields = (funcname, phase, cpv, line.rstrip('\n')) |
| str_buffer.append(' '.join(fields)) |
| str_buffer.append('\n') |
| if str_buffer: |
| os.write(self._pipe, _unicode_encode(''.join(str_buffer))) |
| |
| def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0, |
| mydbapi=None, prev_mtimes=None): |
| """ |
| |
| This function does the following: |
| |
| calls self._preserve_libs if FEATURES=preserve-libs |
| calls self._collision_protect if FEATURES=collision-protect |
| calls doebuild(mydo=pkg_preinst) |
| Merges the package to the livefs |
| unmerges old version (if required) |
| calls doebuild(mydo=pkg_postinst) |
| calls env_update |
| |
| @param srcroot: Typically this is ${D} |
| @type srcroot: String (Path) |
| @param destroot: ignored, self.settings['ROOT'] is used instead |
| @type destroot: String (Path) |
| @param inforoot: root of the vardb entry ? |
| @type inforoot: String (Path) |
| @param myebuild: path to the ebuild that we are processing |
| @type myebuild: String (Path) |
| @param mydbapi: dbapi which is handed to doebuild. |
| @type mydbapi: portdbapi instance |
| @param prev_mtimes: { Filename:mtime } mapping for env_update |
| @type prev_mtimes: Dictionary |
| @rtype: Boolean |
| @returns: |
| 1. 0 on success |
| 2. 1 on failure |
| |
| secondhand is a list of symlinks that have been skipped due to their target |
| not existing; we will merge these symlinks at a later time. |
| """ |
| |
| os = _os_merge |
| |
| srcroot = _unicode_decode(srcroot, |
| encoding=_encodings['content'], errors='strict') |
| destroot = self.settings['ROOT'] |
| inforoot = _unicode_decode(inforoot, |
| encoding=_encodings['content'], errors='strict') |
| myebuild = _unicode_decode(myebuild, |
| encoding=_encodings['content'], errors='strict') |
| |
| showMessage = self._display_merge |
| scheduler = self._scheduler |
| |
| srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep |
| |
| if not os.path.isdir(srcroot): |
| showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot, |
| level=logging.ERROR, noiselevel=-1) |
| return 1 |
| |
| slot = '' |
| for var_name in ('CHOST', 'SLOT'): |
| if var_name == 'CHOST' and self.cat == 'virtual': |
| try: |
| os.unlink(os.path.join(inforoot, var_name)) |
| except OSError: |
| pass |
| continue |
| |
| try: |
| val = codecs.open(_unicode_encode( |
| os.path.join(inforoot, var_name), |
| encoding=_encodings['fs'], errors='strict'), |
| mode='r', encoding=_encodings['repo.content'], |
| errors='replace').readline().strip() |
| except EnvironmentError as e: |
| if e.errno != errno.ENOENT: |
| raise |
| del e |
| val = '' |
| |
| if var_name == 'SLOT': |
| slot = val |
| |
| if not slot.strip(): |
| slot = self.settings.get(var_name, '') |
| if not slot.strip(): |
| showMessage(_("!!! SLOT is undefined\n"), |
| level=logging.ERROR, noiselevel=-1) |
| return 1 |
| write_atomic(os.path.join(inforoot, var_name), slot + '\n') |
| |
| if val != self.settings.get(var_name, ''): |
| self._eqawarn('preinst', |
| [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \ |
| {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}]) |
| |
| def eerror(lines): |
| self._eerror("preinst", lines) |
| |
| if not os.path.exists(self.dbcatdir): |
|