blob: e7af5306a3a3e7970d55fc297590322e7816e83b [file] [log] [blame]
# portage.py -- core Portage functionality
# Copyright 1998-2004 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $
VERSION="$Rev$"[6:-2] + "-svn"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
# ===========================================================================
try:
import sys
except SystemExit, e:
raise
except:
print "Failed to import sys! Something is _VERY_ wrong with python."
raise SystemExit, 127
try:
import os,string,types,atexit,signal,fcntl
import time,cPickle,traceback,copy
import re,pwd,grp,commands
import shlex,shutil
import stat
import commands
from time import sleep
from random import shuffle
from cache.cache_errors import CacheError
except SystemExit, e:
raise
except Exception, e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
sys.stderr.write(" "+str(e)+"\n\n");
sys.exit(127)
except:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
sys.exit(127)
try:
# XXX: This should get renamed to bsd_chflags, I think.
import chflags
bsd_chflags = chflags
except SystemExit, e:
raise
except:
# XXX: This should get renamed to bsd_chflags, I think.
bsd_chflags = None
try:
import cvstree
import xpak
import getbinpkg
import portage_dep
# XXX: This needs to get cleaned up.
import output
from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
xtermTitle, xtermTitleReset, yellow
import portage_const
from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
INCREMENTALS, STICKIES, EAPI
from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
portage_uid, portage_gid
import portage_util
from portage_util import grabdict, grabdict_package, grabfile, grabfile_package, \
map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
unique_array, varexpand, writedict, writemsg, getconfig, dump_traceback
import portage_exception
import portage_gpg
import portage_locks
import portage_exec
from portage_locks import unlockfile,unlockdir,lockfile,lockdir
import portage_checksum
from portage_checksum import perform_md5,perform_checksum,prelink_capable
import eclass_cache
from portage_localization import _
# Need these functions directly in portage namespace to not break every external tool in existence
from portage_versions import ververify,vercmp,catsplit,catpkgsplit,pkgsplit,pkgcmp
except SystemExit, e:
raise
except Exception, e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
sys.stderr.write("!!! a recovery of portage.\n")
sys.stderr.write(" "+str(e)+"\n\n")
sys.exit(127)
# ===========================================================================
# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
# ===========================================================================
def exithandler(signum,frame):
"""Handles ^C interrupts in a sane manner"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# 0=send to *everybody* in process group
sys.exit(1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def load_mod(name):
modname = string.join(string.split(name,".")[:-1],".")
mod = __import__(modname)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
for x in key_order:
if top_dict.has_key(x) and top_dict[x].has_key(key):
if FullCopy:
return copy.deepcopy(top_dict[x][key])
else:
return top_dict[x][key]
if EmptyOnError:
return ""
else:
raise KeyError, "Key not found in list; '%s'" % key
def getcwd():
"this fixes situations where the current directory doesn't exist"
try:
return os.getcwd()
except SystemExit, e:
raise
except:
os.chdir("/")
return "/"
getcwd()
def abssymlink(symlink):
"This reads symlinks, resolving the relative symlinks, and returning the absolute."
mylink=os.readlink(symlink)
if mylink[0] != '/':
mydir=os.path.dirname(symlink)
mylink=mydir+"/"+mylink
return os.path.normpath(mylink)
def suffix_array(array,suffix,doblanks=1):
"""Appends a given suffix to each element in an Array/List/Tuple.
Returns a List."""
if type(array) not in [types.ListType, types.TupleType]:
raise TypeError, "List or Tuple expected. Got %s" % type(array)
newarray=[]
for x in array:
if x or doblanks:
newarray.append(x + suffix)
else:
newarray.append(x)
return newarray
def prefix_array(array,prefix,doblanks=1):
"""Prepends a given prefix to each element in an Array/List/Tuple.
Returns a List."""
if type(array) not in [types.ListType, types.TupleType]:
raise TypeError, "List or Tuple expected. Got %s" % type(array)
newarray=[]
for x in array:
if x or doblanks:
newarray.append(prefix + x)
else:
newarray.append(x)
return newarray
def normalize_path(mypath):
newpath = os.path.normpath(mypath)
if len(newpath) > 1:
if newpath[:2] == "//":
newpath = newpath[1:]
return newpath
dircache = {}
cacheHit=0
cacheMiss=0
cacheStale=0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
global cacheHit,cacheMiss,cacheStale
mypath = normalize_path(my_original_path)
if dircache.has_key(mypath):
cacheHit += 1
cached_mtime, list, ftype = dircache[mypath]
else:
cacheMiss += 1
cached_mtime, list, ftype = -1, [], []
try:
pathstat = os.stat(mypath)
if stat.S_ISDIR(pathstat[stat.ST_MODE]):
mtime = pathstat[stat.ST_MTIME]
else:
raise Exception
except SystemExit, e:
raise
except:
if EmptyOnError:
return [], []
return None, None
# Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
if mtime != cached_mtime or time.time() - mtime < 4:
if dircache.has_key(mypath):
cacheStale += 1
list = os.listdir(mypath)
ftype = []
for x in list:
try:
if followSymlinks:
pathstat = os.stat(mypath+"/"+x)
else:
pathstat = os.lstat(mypath+"/"+x)
if stat.S_ISREG(pathstat[stat.ST_MODE]):
ftype.append(0)
elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
ftype.append(1)
elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
ftype.append(2)
else:
ftype.append(3)
except SystemExit, e:
raise
except:
ftype.append(3)
dircache[mypath] = mtime, list, ftype
ret_list = []
ret_ftype = []
for x in range(0, len(list)):
if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
ret_list.append(list[x])
ret_ftype.append(ftype[x])
elif (list[x] not in ignorelist):
ret_list.append(list[x])
ret_ftype.append(ftype[x])
writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
return ret_list, ret_ftype
def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
EmptyOnError=False, dirsonly=False):
list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
if list is None:
list=[]
if ftype is None:
ftype=[]
if not filesonly and not recursive:
return list
if recursive:
x=0
while x<len(ftype):
if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
followSymlinks)
l=l[:]
for y in range(0,len(l)):
l[y]=list[x]+"/"+l[y]
list=list+l
ftype=ftype+f
x+=1
if filesonly:
rlist=[]
for x in range(0,len(ftype)):
if ftype[x]==0:
rlist=rlist+[list[x]]
elif dirsonly:
rlist = []
for x in range(0, len(ftype)):
if ftype[x] == 1:
rlist = rlist + [list[x]]
else:
rlist=list
return rlist
starttime=long(time.time())
features=[]
def tokenize(mystring):
"""breaks a string like 'foo? (bar) oni? (blah (blah))'
into embedded lists; returns None on paren mismatch"""
# This function is obsoleted.
# Use dep_parenreduce
newtokens=[]
curlist=newtokens
prevlists=[]
level=0
accum=""
for x in mystring:
if x=="(":
if accum:
curlist.append(accum)
accum=""
prevlists.append(curlist)
curlist=[]
level=level+1
elif x==")":
if accum:
curlist.append(accum)
accum=""
if level==0:
writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
return None
newlist=curlist
curlist=prevlists.pop()
curlist.append(newlist)
level=level-1
elif x in string.whitespace:
if accum:
curlist.append(accum)
accum=""
else:
accum=accum+x
if accum:
curlist.append(accum)
if (level!=0):
writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
return None
return newtokens
def flatten(mytokens):
"""this function now turns a [1,[2,3]] list into
a [1,2,3] list and returns it."""
newlist=[]
for x in mytokens:
if type(x)==types.ListType:
newlist.extend(flatten(x))
else:
newlist.append(x)
return newlist
#beautiful directed graph object
class digraph:
def __init__(self):
self.dict={}
#okeys = keys, in order they were added (to optimize firstzero() ordering)
self.okeys=[]
def addnode(self,mykey,myparent):
if not self.dict.has_key(mykey):
self.okeys.append(mykey)
if myparent==None:
self.dict[mykey]=[0,[]]
else:
self.dict[mykey]=[0,[myparent]]
self.dict[myparent][0]=self.dict[myparent][0]+1
return
if myparent and (not myparent in self.dict[mykey][1]):
self.dict[mykey][1].append(myparent)
self.dict[myparent][0]=self.dict[myparent][0]+1
def delnode(self,mykey):
if not self.dict.has_key(mykey):
return
for x in self.dict[mykey][1]:
self.dict[x][0]=self.dict[x][0]-1
del self.dict[mykey]
while 1:
try:
self.okeys.remove(mykey)
except ValueError:
break
def allnodes(self):
"returns all nodes in the dictionary"
return self.dict.keys()
def firstzero(self):
"returns first node with zero references, or NULL if no such node exists"
for x in self.okeys:
if self.dict[x][0]==0:
return x
return None
def depth(self, mykey):
depth=0
while (self.dict[mykey][1]):
depth=depth+1
mykey=self.dict[mykey][1][0]
return depth
def allzeros(self):
"returns all nodes with zero references, or NULL if no such node exists"
zerolist = []
for x in self.dict.keys():
mys = string.split(x)
if mys[0] != "blocks" and self.dict[x][0]==0:
zerolist.append(x)
return zerolist
def hasallzeros(self):
"returns 0/1, Are all nodes zeros? 1 : 0"
zerolist = []
for x in self.dict.keys():
if self.dict[x][0]!=0:
return 0
return 1
def empty(self):
if len(self.dict)==0:
return 1
return 0
def hasnode(self,mynode):
return self.dict.has_key(mynode)
def copy(self):
mygraph=digraph()
for x in self.dict.keys():
mygraph.dict[x]=self.dict[x][:]
mygraph.okeys=self.okeys[:]
return mygraph
def elog_process(cpv, mysettings):
mylogfiles = listdir(mysettings["T"]+"/logging/")
# shortcut for packages without any messages
if len(mylogfiles) == 0:
return
# exploit listdir() file order so we process log entries in cronological order
mylogfiles.reverse()
mylogentries = {}
for f in mylogfiles:
msgfunction, msgtype = f.split(".")
if not msgtype.upper() in mysettings["PORTAGE_ELOG_CLASSES"].split() \
and not msgtype.lower() in mysettings["PORTAGE_ELOG_CLASSES"].split():
continue
if msgfunction not in portage_const.EBUILD_PHASES:
print "!!! can't process invalid log file: %s" % f
continue
if not msgfunction in mylogentries:
mylogentries[msgfunction] = []
msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
mylogentries[msgfunction].append((msgtype, msgcontent))
# in case the filters matched all messages
if len(mylogentries) == 0:
return
# generate a single string with all log messages
fulllog = ""
for phase in portage_const.EBUILD_PHASES:
if not phase in mylogentries:
continue
for msgtype,msgcontent in mylogentries[phase]:
fulllog += "%s: %s\n" % (msgtype, phase)
for line in msgcontent:
fulllog += line
fulllog += "\n"
# pass the processing to the individual modules
logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
for s in logsystems:
try:
# FIXME: ugly ad.hoc import code
# TODO: implement a common portage module loader
logmodule = __import__("elog_modules.mod_"+s)
m = getattr(logmodule, "mod_"+s)
m.process(mysettings, cpv, mylogentries, fulllog)
except (ImportError, AttributeError), e:
print "!!! Error while importing logging modules while loading \"mod_%s\":" % s
print e
except portage_exception.PortageException, e:
print e
# valid end of version components; integers specify offset from release version
# pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
# all but _p (where it is required) can be followed by an optional trailing integer
endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
# as there's no reliable way to set {}.keys() order
# netversion_keys will be used instead of endversion.keys
# to have fixed search order, so that "pre" is checked
# before "p"
endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
#parse /etc/env.d and generate /etc/profile.env
def env_update(makelinks=1):
global root
if not os.path.exists(root+"etc/env.d"):
prevmask=os.umask(0)
os.makedirs(root+"etc/env.d",0755)
os.umask(prevmask)
fns=listdir(root+"etc/env.d",EmptyOnError=1)
fns.sort()
pos=0
while (pos<len(fns)):
if len(fns[pos])<=2:
del fns[pos]
continue
if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
del fns[pos]
continue
pos=pos+1
specials={
"KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
"INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
"CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
"PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
}
colon_separated = [
"ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
"LDPATH", "MANPATH",
"PATH", "PRELINK_PATH",
"PRELINK_PATH_MASK", "PYTHONPATH"
]
env={}
for x in fns:
# don't process backup files
if x[-1]=='~' or x[-4:]==".bak":
continue
myconfig=getconfig(root+"etc/env.d/"+x)
if myconfig==None:
writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
#parse error
continue
# process PATH, CLASSPATH, LDPATH
for myspec in specials.keys():
if myconfig.has_key(myspec):
if myspec in colon_separated:
specials[myspec].extend(myconfig[myspec].split(":"))
else:
specials[myspec].append(myconfig[myspec])
del myconfig[myspec]
# process all other variables
for myenv in myconfig.keys():
env[myenv]=myconfig[myenv]
if os.path.exists(root+"etc/ld.so.conf"):
myld=open(root+"etc/ld.so.conf")
myldlines=myld.readlines()
myld.close()
oldld=[]
for x in myldlines:
#each line has at least one char (a newline)
if x[0]=="#":
continue
oldld.append(x[:-1])
# os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
# Where is the new ld.so.conf generated? (achim)
else:
oldld=None
ld_cache_update=False
if os.environ.has_key("PORTAGE_CALLER") and \
os.environ["PORTAGE_CALLER"] == "env-update":
ld_cache_update = True
newld=specials["LDPATH"]
if (oldld!=newld):
#ld.so.conf needs updating and ldconfig needs to be run
myfd=open(root+"etc/ld.so.conf","w")
myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
myfd.write("# contents of /etc/env.d directory\n")
for x in specials["LDPATH"]:
myfd.write(x+"\n")
myfd.close()
ld_cache_update=True
# Update prelink.conf if we are prelink-enabled
if prelink_capable:
newprelink=open(root+"etc/prelink.conf","w")
newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
newprelink.write("# contents of /etc/env.d directory\n")
for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
newprelink.write("-l "+x+"\n");
for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
if not x:
continue
if x[-1]!='/':
x=x+"/"
plmasked=0
for y in specials["PRELINK_PATH_MASK"]:
if not y:
continue
if y[-1]!='/':
y=y+"/"
if y==x[0:len(y)]:
plmasked=1
break
if not plmasked:
newprelink.write("-h "+x+"\n")
for x in specials["PRELINK_PATH_MASK"]:
newprelink.write("-b "+x+"\n")
newprelink.close()
if not mtimedb.has_key("ldpath"):
mtimedb["ldpath"]={}
for x in specials["LDPATH"]+['/usr/lib','/lib']:
try:
newldpathtime=os.stat(x)[stat.ST_MTIME]
except SystemExit, e:
raise
except:
newldpathtime=0
if mtimedb["ldpath"].has_key(x):
if mtimedb["ldpath"][x]==newldpathtime:
pass
else:
mtimedb["ldpath"][x]=newldpathtime
ld_cache_update=True
else:
mtimedb["ldpath"][x]=newldpathtime
ld_cache_update=True
# ldconfig has very different behaviour between FreeBSD and Linux
if ostype=="Linux" or ostype.lower().endswith("gnu"):
if (ld_cache_update or makelinks):
# We can't update links if we haven't cleaned other versions first, as
# an older package installed ON TOP of a newer version will cause ldconfig
# to overwrite the symlinks we just made. -X means no links. After 'clean'
# we can safely create links.
writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
if makelinks:
commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
else:
commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
elif ostype in ("FreeBSD","DragonFly"):
if (ld_cache_update):
writemsg(">>> Regenerating "+str(root)+"var/run/ld-elf.so.hints...\n")
commands.getstatusoutput("cd / ; /sbin/ldconfig -elf -i -f "+str(root)+"var/run/ld-elf.so.hints "+str(root)+"etc/ld.so.conf")
del specials["LDPATH"]
penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
cenvnotice = penvnotice[:]
penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
#create /etc/profile.env for bash support
outfile=open(root+"/etc/profile.env","w")
outfile.write(penvnotice)
for path in specials.keys():
if len(specials[path])==0:
continue
outstring="export "+path+"='"
if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
for x in specials[path][:-1]:
outstring += x+" "
else:
for x in specials[path][:-1]:
outstring=outstring+x+":"
outstring=outstring+specials[path][-1]+"'"
outfile.write(outstring+"\n")
#create /etc/profile.env
for x in env.keys():
if type(env[x])!=types.StringType:
continue
outfile.write("export "+x+"='"+env[x]+"'\n")
outfile.close()
#create /etc/csh.env for (t)csh support
outfile=open(root+"/etc/csh.env","w")
outfile.write(cenvnotice)
for path in specials.keys():
if len(specials[path])==0:
continue
outstring="setenv "+path+" '"
if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
for x in specials[path][:-1]:
outstring += x+" "
else:
for x in specials[path][:-1]:
outstring=outstring+x+":"
outstring=outstring+specials[path][-1]+"'"
outfile.write(outstring+"\n")
#get it out of the way
del specials[path]
#create /etc/csh.env
for x in env.keys():
if type(env[x])!=types.StringType:
continue
outfile.write("setenv "+x+" '"+env[x]+"'\n")
outfile.close()
def new_protect_filename(mydest, newmd5=None):
"""Resolves a config-protect filename for merging, optionally
using the last filename if the md5 matches.
(dest,md5) ==> 'string' --- path_to_target_filename
(dest) ==> ('next', 'highest') --- next_target and most-recent_target
"""
# config protection filename format:
# ._cfg0000_foo
# 0123456789012
prot_num=-1
last_pfile=""
if (len(mydest) == 0):
raise ValueError, "Empty path provided where a filename is required"
if (mydest[-1]=="/"): # XXX add better directory checking
raise ValueError, "Directory provided but this function requires a filename"
if not os.path.exists(mydest):
return mydest
real_filename = os.path.basename(mydest)
real_dirname = os.path.dirname(mydest)
for pfile in listdir(real_dirname):
if pfile[0:5] != "._cfg":
continue
if pfile[10:] != real_filename:
continue
try:
new_prot_num = int(pfile[5:9])
if new_prot_num > prot_num:
prot_num = new_prot_num
last_pfile = pfile
except SystemExit, e:
raise
except:
continue
prot_num = prot_num + 1
new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
if last_pfile and newmd5:
if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
return old_pfile
else:
return new_pfile
elif newmd5:
return new_pfile
else:
return (new_pfile, old_pfile)
#XXX: These two are now implemented in portage_util.py but are needed here
#XXX: until the isvalidatom() dependency is sorted out.
def grabdict_package(myfilename,juststrings=0,recursive=0):
pkgs=grabdict(myfilename, juststrings=juststrings, empty=1,recursive=recursive)
for x in pkgs.keys():
if not isvalidatom(x):
del(pkgs[x])
writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
return pkgs
def grabfile_package(myfilename,compatlevel=0,recursive=0):
pkgs=grabfile(myfilename,compatlevel,recursive=recursive)
for x in range(len(pkgs)-1,-1,-1):
pkg = pkgs[x]
if pkg[0] == "-":
pkg = pkg[1:]
if pkg[0] == "*":
pkg = pkg[1:]
if not isvalidatom(pkg):
writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
del(pkgs[x])
return pkgs
# returns a tuple. (version[string], error[string])
# They are pretty much mutually exclusive.
# Either version is a string and error is none, or
# version is None and error is a string
#
def ExtractKernelVersion(base_dir):
lines = []
pathname = os.path.join(base_dir, 'Makefile')
try:
f = open(pathname, 'r')
except OSError, details:
return (None, str(details))
except IOError, details:
return (None, str(details))
try:
for i in range(4):
lines.append(f.readline())
except OSError, details:
return (None, str(details))
except IOError, details:
return (None, str(details))
lines = map(string.strip, lines)
version = ''
#XXX: The following code relies on the ordering of vars within the Makefile
for line in lines:
# split on the '=' then remove annoying whitespace
items = string.split(line, '=')
items = map(string.strip, items)
if items[0] == 'VERSION' or \
items[0] == 'PATCHLEVEL':
version += items[1]
version += "."
elif items[0] == 'SUBLEVEL':
version += items[1]
elif items[0] == 'EXTRAVERSION' and \
items[-1] != items[0]:
version += items[1]
# Grab a list of files named localversion* and sort them
localversions = os.listdir(base_dir)
for x in range(len(localversions)-1,-1,-1):
if localversions[x][:12] != "localversion":
del localversions[x]
localversions.sort()
# Append the contents of each to the version string, stripping ALL whitespace
for lv in localversions:
version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
kernelconfig = getconfig(base_dir+"/.config")
if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
return (version,None)
autouse_val = None
def autouse(myvartree,use_cache=1):
"returns set of USE variables auto-enabled due to packages being installed"
global usedefaults, autouse_val
if autouse_val is not None:
return autouse_val
if profiledir==None:
autouse_val = ""
return ""
myusevars=""
for myuse in usedefaults:
dep_met = True
for mydep in usedefaults[myuse]:
if not myvartree.dep_match(mydep,use_cache=True):
dep_met = False
break
if dep_met:
myusevars += " "+myuse
autouse_val = myusevars
return myusevars
def check_config_instance(test):
if not test or (str(test.__class__) != 'portage.config'):
raise TypeError, "Invalid type for config object: %s" % test.__class__
class config:
def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
self.already_in_regenerate = 0
self.locked = 0
self.mycpv = None
self.puse = []
self.modifiedkeys = []
self.virtuals = {}
self.v_count = 0
# Virtuals obtained from the vartree
self.treeVirtuals = {}
# Virtuals by user specification. Includes negatives.
self.userVirtuals = {}
# Virtual negatives from user specifications.
self.negVirtuals = {}
self.user_profile_dir = None
if clone:
self.incrementals = copy.deepcopy(clone.incrementals)
self.profile_path = copy.deepcopy(clone.profile_path)
self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
self.module_priority = copy.deepcopy(clone.module_priority)
self.modules = copy.deepcopy(clone.modules)
self.depcachedir = copy.deepcopy(clone.depcachedir)
self.packages = copy.deepcopy(clone.packages)
self.virtuals = copy.deepcopy(clone.virtuals)
self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
self.userVirtuals = copy.deepcopy(clone.userVirtuals)
self.negVirtuals = copy.deepcopy(clone.negVirtuals)
self.use_defs = copy.deepcopy(clone.use_defs)
self.usemask = copy.deepcopy(clone.usemask)
self.configlist = copy.deepcopy(clone.configlist)
self.configlist[-1] = os.environ.copy()
self.configdict = { "globals": self.configlist[0],
"defaults": self.configlist[1],
"conf": self.configlist[2],
"pkg": self.configlist[3],
"auto": self.configlist[4],
"backupenv": self.configlist[5],
"env": self.configlist[6] }
self.profiles = copy.deepcopy(clone.profiles)
self.backupenv = copy.deepcopy(clone.backupenv)
self.pusedict = copy.deepcopy(clone.pusedict)
self.categories = copy.deepcopy(clone.categories)
self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
self.pmaskdict = copy.deepcopy(clone.pmaskdict)
self.punmaskdict = copy.deepcopy(clone.punmaskdict)
self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
self.pprovideddict = copy.deepcopy(clone.pprovideddict)
self.lookuplist = copy.deepcopy(clone.lookuplist)
self.uvlist = copy.deepcopy(clone.uvlist)
self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
else:
self.depcachedir = DEPCACHE_PATH
if not config_profile_path:
global profiledir
writemsg("config_profile_path not specified to class config\n")
self.profile_path = profiledir[:]
else:
self.profile_path = config_profile_path[:]
if not config_incrementals:
writemsg("incrementals not specified to class config\n")
self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
else:
self.incrementals = copy.deepcopy(config_incrementals)
self.module_priority = ["user","default"]
self.modules = {}
self.modules["user"] = getconfig(MODULES_FILE_PATH)
if self.modules["user"] == None:
self.modules["user"] = {}
self.modules["default"] = {
"portdbapi.metadbmodule": "cache.metadata.database",
"portdbapi.auxdbmodule": "cache.flat_hash.database",
}
self.usemask=[]
self.configlist=[]
self.backupenv={}
# back up our incremental variables:
self.configdict={}
# configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
# The symlink might not exist or might not be a symlink.
try:
self.profiles=[abssymlink(self.profile_path)]
except SystemExit, e:
raise
except:
self.profiles=[self.profile_path]
mypath = self.profiles[0]
while os.path.exists(mypath+"/parent"):
mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
if os.path.exists(mypath):
self.profiles.insert(0,mypath)
if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
pass
else:
# XXX: This should depend on ROOT?
if os.path.exists("/"+CUSTOM_PROFILE_PATH):
self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
self.profiles.append(self.user_profile_dir[:])
self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
self.packages = stack_lists(self.packages_list, incremental=1)
del self.packages_list
#self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
# revmaskdict
self.prevmaskdict={}
for x in self.packages:
mycatpkg=dep_getkey(x)
if not self.prevmaskdict.has_key(mycatpkg):
self.prevmaskdict[mycatpkg]=[x]
else:
self.prevmaskdict[mycatpkg].append(x)
# get profile-masked use flags -- INCREMENTAL Child over parent
usemask_lists = [grabfile(os.path.join(x, "use.mask")) for x in self.profiles]
self.usemask = stack_lists(usemask_lists, incremental=True)
del usemask_lists
use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
del use_defs_lists
try:
mygcfg_dlists = [getconfig(os.path.join(x, "make.globals")) for x in self.profiles+["/etc"]]
self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
if self.mygcfg == None:
self.mygcfg = {}
except SystemExit, e:
raise
except Exception, e:
writemsg("!!! %s\n" % (e))
writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
sys.exit(1)
self.configlist.append(self.mygcfg)
self.configdict["globals"]=self.configlist[-1]
self.mygcfg = {}
if self.profiles:
try:
mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
#self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
if self.mygcfg == None:
self.mygcfg = {}
except SystemExit, e:
raise
except Exception, e:
writemsg("!!! %s\n" % (e))
writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
writemsg("!!! on #gentoo (irc.freenode.org)\n")
sys.exit(1)
self.configlist.append(self.mygcfg)
self.configdict["defaults"]=self.configlist[-1]
try:
# XXX: Should depend on root?
self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
if self.mygcfg == None:
self.mygcfg = {}
except SystemExit, e:
raise
except Exception, e:
writemsg("!!! %s\n" % (e))
writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
sys.exit(1)
self.configlist.append(self.mygcfg)
self.configdict["conf"]=self.configlist[-1]
self.configlist.append({})
self.configdict["pkg"]=self.configlist[-1]
#auto-use:
self.configlist.append({})
self.configdict["auto"]=self.configlist[-1]
#backup-env (for recording our calculated incremental variables:)
self.backupenv = os.environ.copy()
self.configlist.append(self.backupenv) # XXX Why though?
self.configdict["backupenv"]=self.configlist[-1]
self.configlist.append(os.environ.copy())
self.configdict["env"]=self.configlist[-1]
# make lookuplist for loading package.*
self.lookuplist=self.configlist[:]
self.lookuplist.reverse()
if os.environ.get("PORTAGE_CALLER","") == "repoman":
# repoman shouldn't use local settings.
locations = [self["PORTDIR"] + "/profiles"]
self.pusedict = {}
self.pkeywordsdict = {}
self.punmaskdict = {}
else:
locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
for ov in self["PORTDIR_OVERLAY"].split():
ov = os.path.normpath(ov)
if os.path.isdir(ov+"/profiles"):
locations.append(ov+"/profiles")
pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use", recursive=1)
self.pusedict = {}
for key in pusedict.keys():
cp = dep_getkey(key)
if not self.pusedict.has_key(cp):
self.pusedict[cp] = {}
self.pusedict[cp][key] = pusedict[key]
#package.keywords
pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords", recursive=1)
self.pkeywordsdict = {}
for key in pkgdict.keys():
# default to ~arch if no specific keyword is given
if not pkgdict[key]:
mykeywordlist = []
if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
else:
groups = []
for keyword in groups:
if not keyword[0] in "~-":
mykeywordlist.append("~"+keyword)
pkgdict[key] = mykeywordlist
cp = dep_getkey(key)
if not self.pkeywordsdict.has_key(cp):
self.pkeywordsdict[cp] = {}
self.pkeywordsdict[cp][key] = pkgdict[key]
#package.unmask
pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask",recursive=1)
self.punmaskdict = {}
for x in pkgunmasklines:
mycatpkg=dep_getkey(x)
if self.punmaskdict.has_key(mycatpkg):
self.punmaskdict[mycatpkg].append(x)
else:
self.punmaskdict[mycatpkg]=[x]
#getting categories from an external file now
categories = [grabfile(os.path.join(x, "categories")) for x in locations]
self.categories = stack_lists(categories, incremental=1)
del categories
archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
archlist = stack_lists(archlist, incremental=1)
self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
# get virtuals -- needs categories
self.loadVirtuals('/')
#package.mask
pkgmasklines = [grabfile_package(os.path.join(x, "package.mask")) for x in self.profiles]
for l in locations:
pkgmasklines.append(grabfile_package(l+os.path.sep+"package.mask", recursive=1))
pkgmasklines = stack_lists(pkgmasklines, incremental=1)
self.pmaskdict = {}
for x in pkgmasklines:
mycatpkg=dep_getkey(x)
if self.pmaskdict.has_key(mycatpkg):
self.pmaskdict[mycatpkg].append(x)
else:
self.pmaskdict[mycatpkg]=[x]
pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
for x in range(len(pkgprovidedlines)-1, -1, -1):
cpvr = catpkgsplit(pkgprovidedlines[x])
if not cpvr or cpvr[0] == "null":
writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
del pkgprovidedlines[x]
self.pprovideddict = {}
for x in pkgprovidedlines:
cpv=catpkgsplit(x)
if not x:
continue
mycatpkg=dep_getkey(x)
if self.pprovideddict.has_key(mycatpkg):
self.pprovideddict[mycatpkg].append(x)
else:
self.pprovideddict[mycatpkg]=[x]
self.lookuplist=self.configlist[:]
self.lookuplist.reverse()
useorder=self["USE_ORDER"]
if not useorder:
# reasonable defaults; this is important as without USE_ORDER,
# USE will always be "" (nothing set)!
useorder="env:pkg:conf:auto:defaults"
useordersplit=useorder.split(":")
self.uvlist=[]
for x in useordersplit:
if self.configdict.has_key(x):
if "PKGUSE" in self.configdict[x].keys():
del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
#prepend db to list to get correct order
self.uvlist[0:0]=[self.configdict[x]]
self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
self.backupenv["PORTAGE_GID"]=str(portage_gid)
if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
# port_logdir is defined, but empty. this causes a traceback in doebuild.
writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
del self["PORT_LOGDIR"]
if self["PORTAGE_CACHEDIR"]:
# XXX: Deprecated -- April 15 -- NJ
writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
self.depcachedir = self["PORTAGE_CACHEDIR"]
del self["PORTAGE_CACHEDIR"]
if self["PORTAGE_DEPCACHEDIR"]:
#the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
# XXX: Could move with a CHROOT functionality addition.
self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
del self["PORTAGE_DEPCACHEDIR"]
overlays = string.split(self["PORTDIR_OVERLAY"])
if overlays:
new_ov=[]
for ov in overlays:
ov=os.path.normpath(ov)
if os.path.isdir(ov):
new_ov.append(ov)
else:
writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
self["PORTDIR_OVERLAY"] = string.join(new_ov)
self.backup_changes("PORTDIR_OVERLAY")
self.regenerate()
self.features = portage_util.unique_array(self["FEATURES"].split())
#XXX: Should this be temporary? Is it possible at all to have a default?
if "gpg" in self.features:
if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
self.features.remove("gpg")
if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
if "sandbox" in self.features:
self.features.remove("sandbox")
if "usersandbox" in self.features:
self.features.remove("usersandbox")
self.features.sort()
self["FEATURES"] = " ".join(["-*"]+self.features)
self.backup_changes("FEATURES")
if not len(self["CBUILD"]) and len(self["CHOST"]):
self["CBUILD"] = self["CHOST"]
self.backup_changes("CBUILD")
if mycpv:
self.setcpv(mycpv)
def loadVirtuals(self,root):
self.virtuals = self.getvirtuals(root)
def load_best_module(self,property_string):
best_mod = best_from_dict(property_string,self.modules,self.module_priority)
try:
mod = load_mod(best_mod)
except:
dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
sys.exit(1)
return mod
def lock(self):
self.locked = 1
def unlock(self):
self.locked = 0
def modifying(self):
if self.locked:
raise Exception, "Configuration is locked."
def backup_changes(self,key=None):
if key and self.configdict["env"].has_key(key):
self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
else:
raise KeyError, "No such key defined in environment: %s" % key
def reset(self,keeping_pkg=0,use_cache=1):
"reset environment to original settings"
for x in self.configlist[-1].keys():
if x not in self.backupenv.keys():
del self.configlist[-1][x]
self.configdict["env"].update(self.backupenv)
self.modifiedkeys = []
if not keeping_pkg:
self.puse = ""
self.configdict["pkg"].clear()
self.regenerate(use_cache=use_cache)
def load_infodir(self,infodir):
if self.configdict.has_key("pkg"):
for x in self.configdict["pkg"].keys():
del self.configdict["pkg"][x]
else:
writemsg("No pkg setup for settings instance?\n")
sys.exit(17)
if os.path.exists(infodir):
if os.path.exists(infodir+"/environment"):
self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
myre = re.compile('^[A-Z]+$')
for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
if myre.match(filename):
try:
mydata = string.strip(open(infodir+"/"+filename).read())
if len(mydata)<2048:
if filename == "USE":
self.configdict["pkg"][filename] = "-* "+mydata
else:
self.configdict["pkg"][filename] = mydata
except SystemExit, e:
raise
except:
writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
pass
return 1
return 0
def setcpv(self,mycpv,use_cache=1):
self.modifying()
self.mycpv = mycpv
cp = dep_getkey(mycpv)
newpuse = ""
if self.pusedict.has_key(cp):
self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
if self.pusekey:
newpuse = string.join(self.pusedict[cp][self.pusekey])
if newpuse == self.puse:
return
self.puse = newpuse
self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
self.reset(keeping_pkg=1,use_cache=use_cache)
def setinst(self,mycpv,mydbapi):
# Grab the virtuals this package provides and add them into the tree virtuals.
provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
if isinstance(mydbapi, portdbapi):
myuse = self["USE"]
else:
myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
cp = dep_getkey(mycpv)
for virt in virts:
virt = dep_getkey(virt)
if not self.treeVirtuals.has_key(virt):
self.treeVirtuals[virt] = []
# XXX: Is this bad? -- It's a permanent modification
if cp not in self.treeVirtuals[virt]:
self.treeVirtuals[virt].append(cp)
self.virtuals = self.__getvirtuals_compile()
def regenerate(self,useonly=0,use_cache=1):
global usesplit,profiledir
if self.already_in_regenerate:
# XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
writemsg("!!! Looping in regenerate.\n",1)
return
else:
self.already_in_regenerate = 1
if useonly:
myincrementals=["USE"]
else:
myincrementals=portage_const.INCREMENTALS
for mykey in myincrementals:
if mykey=="USE":
mydbs=self.uvlist
# XXX Global usage of db... Needs to go away somehow.
if db.has_key(root) and db[root].has_key("vartree"):
self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
else:
self.configdict["auto"]["USE"]=""
else:
mydbs=self.configlist[:-1]
myflags=[]
for curdb in mydbs:
if not curdb.has_key(mykey):
continue
#variables are already expanded
mysplit=curdb[mykey].split()
for x in mysplit:
if x=="-*":
# "-*" is a special "minus" var that means "unset all settings".
# so USE="-* gnome" will have *just* gnome enabled.
myflags=[]
continue
if x[0]=="+":
# Not legal. People assume too much. Complain.
writemsg(red("USE flags should not start with a '+': %s\n" % x))
x=x[1:]
if (x[0]=="-"):
if (x[1:] in myflags):
# Unset/Remove it.
del myflags[myflags.index(x[1:])]
continue
# We got here, so add it now.
if x not in myflags:
myflags.append(x)
myflags.sort()
#store setting in last element of configlist, the original environment:
self.configlist[-1][mykey]=string.join(myflags," ")
del myflags
#cache split-up USE var in a global
usesplit=[]
for x in string.split(self.configlist[-1]["USE"]):
if x not in self.usemask:
usesplit.append(x)
if self.has_key("USE_EXPAND"):
for var in string.split(self["USE_EXPAND"]):
if self.has_key(var):
for x in string.split(self[var]):
mystr = string.lower(var)+"_"+x
if mystr not in usesplit and mystr not in self.usemask:
usesplit.append(mystr)
# Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
if self.configdict["defaults"].has_key("ARCH"):
if self.configdict["defaults"]["ARCH"]:
if self.configdict["defaults"]["ARCH"] not in usesplit:
usesplit.insert(0,self.configdict["defaults"]["ARCH"])
self.configlist[-1]["USE"]=string.join(usesplit," ")
self.already_in_regenerate = 0
def getvirtuals(self, myroot):
if self.virtuals:
return self.virtuals
myvirts = {}
# This breaks catalyst/portage when setting to a fresh/empty root.
# Virtuals cannot be calculated because there is nothing to work
# from. So the only ROOT prefixed dir should be local configs.
#myvirtdirs = prefix_array(self.profiles,myroot+"/")
myvirtdirs = copy.deepcopy(self.profiles)
while self.user_profile_dir in myvirtdirs:
myvirtdirs.remove(self.user_profile_dir)
# Rules
# R1: Collapse profile virtuals
# R2: Extract user-negatives.
# R3: Collapse user-virtuals.
# R4: Apply user negatives to all except user settings.
# Order of preference:
# 1. user-declared that are installed
# 3. installed and in profile
# 4. installed
# 2. user-declared set
# 5. profile
self.dirVirtuals = [grabdict(os.path.join(x, "virtuals")) for x in myvirtdirs]
self.dirVirtuals.reverse()
if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
# Store all the negatives for later.
for x in self.userVirtuals.keys():
self.negVirtuals[x] = []
for y in self.userVirtuals[x]:
if y[0] == '-':
self.negVirtuals[x].append(y[:])
# Collapse the user virtuals so that we don't deal with negatives.
self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
# Collapse all the profile virtuals including user negations.
self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
# Repoman does not use user or tree virtuals.
if os.environ.get("PORTAGE_CALLER","") != "repoman":
# XXX: vartree does not use virtuals, does user set matter?
temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
# Reduce the provides into a list by CP.
self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
return self.__getvirtuals_compile()
def __getvirtuals_compile(self):
"""Actually generate the virtuals we have collected.
The results are reversed so the list order is left to right.
Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
# Virtuals by profile+tree preferences.
ptVirtuals = {}
# Virtuals by user+tree preferences.
utVirtuals = {}
# If a user virtual is already installed, we preference it.
for x in self.userVirtuals.keys():
utVirtuals[x] = []
if self.treeVirtuals.has_key(x):
for y in self.userVirtuals[x]:
if y in self.treeVirtuals[x]:
utVirtuals[x].append(y)
#print "F:",utVirtuals
#utVirtuals[x].reverse()
#print "R:",utVirtuals
# If a profile virtual is already installed, we preference it.
for x in self.dirVirtuals.keys():
ptVirtuals[x] = []
if self.treeVirtuals.has_key(x):
for y in self.dirVirtuals[x]:
if y in self.treeVirtuals[x]:
ptVirtuals[x].append(y)
# UserInstalled, ProfileInstalled, Installed, User, Profile
biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
self.userVirtuals, self.dirVirtuals]
# We reverse each dictlist so that the order matches everything
# else in portage. [-*, a, b] [b, c, d] ==> [b, a]
for dictlist in biglist:
for key in dictlist:
dictlist[key].reverse()
# User settings and profile settings take precedence over tree.
val = stack_dictlist(biglist,incremental=1)
return val
def __delitem__(self,mykey):
for x in self.lookuplist:
if x != None:
if mykey in x:
del x[mykey]
def __getitem__(self,mykey):
match = ''
for x in self.lookuplist:
if x == None:
writemsg("!!! lookuplist is null.\n")
elif x.has_key(mykey):
match = x[mykey]
break
if 0 and match and mykey in ["PORTAGE_BINHOST"]:
# These require HTTP Encoding
try:
import urllib
if urllib.unquote(match) != match:
writemsg("Note: %s already contains escape codes." % (mykey))
else:
match = urllib.quote(match)
except SystemExit, e:
raise
except:
writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
pass
elif mykey == "CONFIG_PROTECT_MASK":
match += " /etc/env.d"
return match
def has_key(self,mykey):
for x in self.lookuplist:
if x.has_key(mykey):
return 1
return 0
def keys(self):
mykeys=[]
for x in self.lookuplist:
for y in x.keys():
if y not in mykeys:
mykeys.append(y)
return mykeys
def __setitem__(self,mykey,myvalue):
"set a value; will be thrown away at reset() time"
if type(myvalue) != types.StringType:
raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
self.modifying()
self.modifiedkeys += [mykey]
self.configdict["env"][mykey]=myvalue
def environ(self):
"return our locally-maintained environment"
mydict={}
for x in self.keys():
mydict[x]=self[x]
if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
mydict["HOME"]=mydict["BUILD_PREFIX"][:]
return mydict
# XXX This would be to replace getstatusoutput completely.
# XXX Issue: cannot block execution. Deadlock condition.
def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
"""spawn a subprocess with optional sandbox protection,
depending on whether sandbox is enabled. The "free" argument,
when set to 1, will disable sandboxing. This allows us to
spawn processes that are supposed to modify files outside of the
sandbox. We can't use os.system anymore because it messes up
signal handling. Using spawn allows our Portage signal handler
to work."""
if type(mysettings) == types.DictType:
env=mysettings
keywords["opt_name"]="[ %s ]" % "portage"
else:
check_config_instance(mysettings)
env=mysettings.environ()
keywords["opt_name"]="[%s]" % mysettings["PF"]
# XXX: Negative RESTRICT word
droppriv=(droppriv and ("userpriv" in features) and not \
(("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
("userpriv" in string.split(mysettings["RESTRICT"]))))
if droppriv and not uid and portage_gid and portage_uid:
keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
if not free:
free=((droppriv and "usersandbox" not in features) or \
(not droppriv and "sandbox" not in features and "usersandbox" not in features))
if not free:
keywords["opt_name"] += " sandbox"
return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
else:
keywords["opt_name"] += " bash"
return portage_exec.spawn_bash(mystring,env=env,**keywords)
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
"fetch files. Will use digest file if available."
# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
if ("mirror" in mysettings["RESTRICT"].split()) or \
("nomirror" in mysettings["RESTRICT"].split()):
if ("mirror" in features) and ("lmirror" not in features):
# lmirror should allow you to bypass mirror restrictions.
# XXX: This is not a good thing, and is temporary at best.
print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
return 1
global thirdpartymirrors
check_config_instance(mysettings)
custommirrors=grabdict(CUSTOM_MIRRORS_FILE,recursive=1)
mymirrors=[]
if listonly or ("distlocks" not in features):
use_locks = 0
fetch_to_ro = 0
if "skiprocheck" in features:
fetch_to_ro = 1
if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
if use_locks:
writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
# use_locks = 0
# local mirrors are always added
if custommirrors.has_key("local"):
mymirrors += custommirrors["local"]
if ("nomirror" in mysettings["RESTRICT"].split()) or \
("mirror" in mysettings["RESTRICT"].split()):
# We don't add any mirrors.
pass
else:
if try_mirrors:
for x in mysettings["GENTOO_MIRRORS"].split():
if x:
if x[-1] == '/':
mymirrors += [x[:-1]]
else:
mymirrors += [x]
mydigests = {}
digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
if os.path.exists(digestfn):
mydigests = digestParseFile(digestfn)
fsmirrors = []
for x in range(len(mymirrors)-1,-1,-1):
if mymirrors[x] and mymirrors[x][0]=='/':
fsmirrors += [mymirrors[x]]
del mymirrors[x]
for myuri in myuris:
myfile=os.path.basename(myuri)
try:
destdir = mysettings["DISTDIR"]+"/"
if not os.path.exists(destdir+myfile):
for mydir in fsmirrors:
if os.path.exists(mydir+"/"+myfile):
writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
break
except (OSError,IOError),e:
# file does not exist
writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
gotit=0
if "fetch" in mysettings["RESTRICT"].split():
# fetch is restricted. Ensure all files have already been downloaded; otherwise,
# print message and exit.
gotit=1
for myuri in myuris:
myfile=os.path.basename(myuri)
try:
mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
except (OSError,IOError),e:
# file does not exist
writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
gotit=0
if not gotit:
print
print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
print "!!! This probably means that this ebuild's files must be downloaded"
print "!!! manually. See the comments in the ebuild for more information."
print
spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
return 0
return 1
locations=mymirrors[:]
filedict={}
primaryuri_indexes={}
for myuri in myuris:
myfile=os.path.basename(myuri)
if not filedict.has_key(myfile):
filedict[myfile]=[]
for y in range(0,len(locations)):
filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
if myuri[:9]=="mirror://":
eidx = myuri.find("/", 9)
if eidx != -1:
mirrorname = myuri[9:eidx]
# Try user-defined mirrors first
if custommirrors.has_key(mirrorname):
for cmirr in custommirrors[mirrorname]:
filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
# remove the mirrors we tried from the list of official mirrors
if cmirr.strip() in thirdpartymirrors[mirrorname]:
thirdpartymirrors[mirrorname].remove(cmirr)
# now try the official mirrors
if thirdpartymirrors.has_key(mirrorname):
try:
shuffle(thirdpartymirrors[mirrorname])
except SystemExit, e:
raise
except:
writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
time.sleep(10)
for locmirr in thirdpartymirrors[mirrorname]:
filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
if not filedict[myfile]:
writemsg("No known mirror by the name: %s\n" % (mirrorname))
else:
writemsg("Invalid mirror definition in SRC_URI:\n")
writemsg(" %s\n" % (myuri))
else:
if "primaryuri" in mysettings["RESTRICT"].split():
# Use the source site first.
if primaryuri_indexes.has_key(myfile):
primaryuri_indexes[myfile] += 1
else:
primaryuri_indexes[myfile] = 0
filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
else:
filedict[myfile].append(myuri)
missingSourceHost = False
for myfile in filedict.keys(): # Gives a list, not just the first one
if not filedict[myfile]:
writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
missingSourceHost = True
if missingSourceHost:
return 0
del missingSourceHost
can_fetch=True
if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
if not fetch_to_ro:
print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
can_fetch=False
else:
mystat=os.stat(mysettings["DISTDIR"]+"/")
if mystat.st_gid != portage_gid:
try:
os.chown(mysettings["DISTDIR"],-1,portage_gid)
except OSError, oe:
if oe.errno == 1:
print red("!!!")+" Unable to chgrp of %s to portage, continuing\n" % mysettings["DISTDIR"]
else:
raise oe
# writable by portage_gid? This is specific to root, adjust perms if needed automatically.
if not stat.S_IMODE(mystat.st_mode) & 020:
try:
os.chmod(mysettings["DISTDIR"],stat.S_IMODE(mystat.st_mode) | 020)
except OSError, oe:
if oe.errno == 1:
print red("!!!")+" Unable to chmod %s to perms 0755. Non-root users will experience issues.\n" % mysettings["DISTDIR"]
else:
raise oe
if use_locks and locks_in_subdir:
if os.path.exists(mysettings["DISTDIR"]+"/"+locks_in_subdir):
if not os.access(mysettings["DISTDIR"]+"/"+locks_in_subdir,os.W_OK):
writemsg("!!! No write access to write to %s. Aborting.\n" % mysettings["DISTDIR"]+"/"+locks_in_subdir)
return 0
else:
old_umask=os.umask(0002)
os.mkdir(mysettings["DISTDIR"]+"/"+locks_in_subdir,0775)
if os.stat(mysettings["DISTDIR"]+"/"+locks_in_subdir).st_gid != portage_gid:
try:
os.chown(mysettings["DISTDIR"]+"/"+locks_in_subdir,-1,portage_gid)
except SystemExit, e:
raise
except:
pass
os.umask(old_umask)
for myfile in filedict.keys():
fetched=0
file_lock = None
if listonly:
writemsg("\n")
else:
if use_locks and can_fetch:
if locks_in_subdir:
file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
else:
file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
try:
for loc in filedict[myfile]:
if listonly:
writemsg(loc+" ")
continue
# allow different fetchcommands per protocol
protocol = loc[0:loc.find("://")]
if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
else:
fetchcommand=mysettings["FETCHCOMMAND"]
if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
else:
resumecommand=mysettings["RESUMECOMMAND"]
fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
try:
mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
if mydigests.has_key(myfile):
#if we have the digest file, we know the final size and can resume the download.
if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
fetched=1
else:
#we already have it downloaded, skip.
#if our file is bigger than the recorded size, digestcheck should catch it.
if not fetchonly:
fetched=2
else:
# Verify checksums at each fetch for fetchonly.
verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
if not verified_ok:
writemsg("!!! Previously fetched file: "+str(myfile)+"\n")
writemsg("!!! Reason: "+reason[0]+"\n")
writemsg("!!! Got: "+reason[1]+"\n")
writemsg("!!! Expected: "+reason[2]+"\n")
writemsg("Refetching...\n\n")
os.unlink(mysettings["DISTDIR"]+"/"+myfile)
fetched=0
else:
for x_key in mydigests[myfile].keys():
writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
fetched=2
break #No need to keep looking for this file, we have it!
else:
#we don't have the digest file, but the file exists. Assume it is fully downloaded.
fetched=2
except (OSError,IOError),e:
writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
fetched=0
if not can_fetch:
if fetched != 2:
if fetched == 0:
writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
else:
writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
return 0
else:
continue
# check if we can actually write to the directory/existing file.
if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
fetched=0
break
elif fetched!=2:
#we either need to resume or start the download
#you can't use "continue" when you're inside a "try" block
if fetched==1:
#resume mode:
writemsg(">>> Resuming download...\n")
locfetch=resumecommand
else:
#normal mode:
locfetch=fetchcommand
writemsg(">>> Downloading "+str(loc)+"\n")
myfetch=string.replace(locfetch,"${URI}",loc)
myfetch=string.replace(myfetch,"${FILE}",myfile)
try:
if selinux_enabled:
con=selinux.getcontext()
con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
selinux.setexec(con)
myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
selinux.setexec(None)
else:
myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
finally:
#if root, -always- set the perms.
if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
try:
os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
except SystemExit, e:
raise
except:
portage_util.writemsg("chown failed on distfile: " + str(myfile))
os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
if mydigests!=None and mydigests.has_key(myfile):
try:
mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
# no exception? file exists. let digestcheck() report
# an appropriately for size or checksum errors
if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
# Fetch failed... Try the next one... Kill 404 files though.
if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
try:
if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
try:
os.unlink(mysettings["DISTDIR"]+"/"+myfile)
writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
except SystemExit, e:
raise
except:
pass
except SystemExit, e:
raise
except:
pass
continue
if not fetchonly:
fetched=2
break
else:
# File is the correct size--check the checksums for the fetched
# file NOW, for those users who don't have a stable/continuous
# net connection. This way we have a chance to try to download
# from another mirror...
verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
if not verified_ok:
writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n")
writemsg("!!! Reason: "+reason[0]+"\n")
writemsg("!!! Got: "+reason[1]+"\n")
writemsg("!!! Expected: "+reason[2]+"\n")
writemsg("Removing corrupt distfile...\n")
os.unlink(mysettings["DISTDIR"]+"/"+myfile)
fetched=0
else:
for x_key in mydigests[myfile].keys():
writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
fetched=2
break
except (OSError,IOError),e:
writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
fetched=0
else:
if not myret:
fetched=2
break
elif mydigests!=None:
writemsg("No digest file available and download failed.\n\n")
finally:
if use_locks and file_lock:
portage_locks.unlockfile(file_lock)
if listonly:
writemsg("\n")
if (fetched!=2) and not listonly:
writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
return 0
return 1
def digestCreate(myfiles,basedir,oldDigest={}):
"""Takes a list of files and the directory they are in and returns the
dict of dict[filename][CHECKSUM_KEY] = hash
returns None on error."""
mydigests={}
for x in myfiles:
print "<<<",x
myfile=os.path.normpath(basedir+"///"+x)
if os.path.exists(myfile):
if not os.access(myfile, os.R_OK):
print "!!! Given file does not appear to be readable. Does it exist?"
print "!!! File:",myfile
return None
mydigests[x] = portage_checksum.perform_multiple_checksums(myfile, hashes=portage_const.MANIFEST1_HASH_FUNCTIONS)
mysize = os.stat(myfile)[stat.ST_SIZE]
else:
if x in oldDigest:
# DeepCopy because we might not have a unique reference.
mydigests[x] = copy.deepcopy(oldDigest[x])
mysize = copy.deepcopy(oldDigest[x]["size"])
else:
print "!!! We have a source URI, but no file..."
print "!!! File:",myfile
return None
if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
raise portage_exception.DigestException, "Size mismatch during checksums"
mydigests[x]["size"] = copy.deepcopy(mysize)
return mydigests
def digestCreateLines(filelist, mydict):
mylines = []
mydigests = copy.deepcopy(mydict)
for myarchive in filelist:
mysize = mydigests[myarchive]["size"]
if len(mydigests[myarchive]) == 0:
raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
for sumName in mydigests[myarchive].keys():
if sumName not in portage_checksum.get_valid_checksum_keys():
continue
mysum = mydigests[myarchive][sumName]
myline = sumName[:]
myline += " "+mysum
myline += " "+myarchive
myline += " "+str(mysize)
mylines.append(myline)
return mylines
def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
"""generates digest file if missing. Assumes all files are available. If
overwrite=0, the digest will only be created if it doesn't already exist."""
# archive files
basedir=mysettings["DISTDIR"]+"/"
digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
# portage files -- p(ortagefiles)basedir
pbasedir=mysettings["O"]+"/"
manifestfn=pbasedir+"Manifest"
if not manifestonly:
if not os.path.isdir(mysettings["FILESDIR"]):
os.makedirs(mysettings["FILESDIR"])
mycvstree=cvstree.getentries(pbasedir, recursive=1)
if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
if not cvstree.isadded(mycvstree,"files"):
if "autoaddcvs" in features:
print ">>> Auto-adding files/ dir to CVS..."
spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
else:
print "--- Warning: files/ is not added to cvs."
if (not overwrite) and os.path.exists(digestfn):
return 1
print green(">>> Generating digest file...")
# Track the old digest so we can assume checksums without requiring
# all files to be downloaded. 'Assuming'
myolddigest = {}
if os.path.exists(digestfn):
myolddigest = digestParseFile(digestfn)
myarchives.sort()
try:
mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
except portage_exception.DigestException, s:
print "!!!",s
return 0
if mydigests==None: # There was a problem, exit with an errorcode.
return 0
try:
outfile=open(digestfn, "w+")
except SystemExit, e:
raise
except Exception, e:
print "!!! Filesystem error skipping generation. (Read-Only?)"
print "!!!",e
return 0
for x in digestCreateLines(myarchives, mydigests):
outfile.write(x+"\n")
outfile.close()
try:
os.chown(digestfn,os.getuid(),portage_gid)
os.chmod(digestfn,0664)
except SystemExit, e:
raise
except Exception,e:
print e
print green(">>> Generating manifest file...")
mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
mypfiles.sort()
for x in ["Manifest"]:
if x in mypfiles:
mypfiles.remove(x)
mydigests=digestCreate(mypfiles, pbasedir)
if mydigests==None: # There was a problem, exit with an errorcode.
return 0
try:
outfile=open(manifestfn, "w+")
except SystemExit, e:
raise
except Exception, e:
print "!!! Filesystem error skipping generation. (Read-Only?)"
print "!!!",e
return 0
for x in digestCreateLines(mypfiles, mydigests):
outfile.write(x+"\n")
outfile.close()
try:
os.chown(manifestfn,os.getuid(),portage_gid)
os.chmod(manifestfn,0664)
except SystemExit, e:
raise
except Exception,e:
print e
if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
mycvstree=cvstree.getentries(pbasedir, recursive=1)
myunaddedfiles=""
if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
if digestfn[:len(pbasedir)]==pbasedir:
myunaddedfiles=digestfn[len(pbasedir):]+" "
else:
myunaddedfiles=digestfn+" "
if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
if manifestfn[:len(pbasedir)]==pbasedir:
myunaddedfiles+=manifestfn[len(pbasedir):]+" "
else:
myunaddedfiles+=manifestfn
if myunaddedfiles:
if "autoaddcvs" in features:
print blue(">>> Auto-adding digest file(s) to CVS...")
spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
else:
print "--- Warning: digests are not yet added into CVS."
print darkgreen(">>> Computed message digests.")
print
return 1
def digestParseFile(myfilename):
"""(filename) -- Parses a given file for entries matching:
<checksumkey> <checksum_hex_string> <filename> <filesize>
Ignores lines that don't start with a valid checksum identifier
and returns a dict with the filenames as keys and {checksumkey:checksum}
as the values."""
if not os.path.exists(myfilename):
return None
mylines = portage_util.grabfile(myfilename, compat_level=1)
mydigests={}
for x in mylines:
myline=string.split(x)
if len(myline) < 4:
#invalid line
continue
if myline[0] not in portage_checksum.get_valid_checksum_keys():
continue
mykey = myline.pop(0)
myhash = myline.pop(0)
mysize = long(myline.pop())
myfn = string.join(myline, " ")
if myfn not in mydigests:
mydigests[myfn] = {}
mydigests[myfn][mykey] = myhash
if "size" in mydigests[myfn]:
if mydigests[myfn]["size"] != mysize:
raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
else:
mydigests[myfn]["size"] = mysize
return mydigests
# XXXX strict was added here to fix a missing name error.
# XXXX It's used below, but we're not paying attention to how we get it?
def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
"""(fileslist, digestdict, basedir) -- Takes a list of files and a dict
of their digests and checks the digests against the indicated files in
the basedir given. Returns 1 only if all files exist and match the checksums.
"""
for x in myfiles:
if not mydigests.has_key(x):
print
print red("!!! No message digest entry found for file \""+x+".\"")
print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
print "!!! If you are certain of the authenticity of the file then you may type"
print "!!! the following to generate a new digest:"
print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
return 0
myfile=os.path.normpath(basedir+"/"+x)
if not os.path.exists(myfile):
if strict:
print "!!! File does not exist:",myfile
return 0
continue
ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
if not ok:
print
print red("!!! Digest verification Failed:")
print red("!!!")+" "+str(myfile)
print red("!!! Reason: ")+reason[0]
print red("!!! Got: ")+str(reason[1])
print red("!!! Expected: ")+str(reason[2])
print
return 0
else:
print ">>> checksums "+note+" ;-)",x
return 1
def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
"""Verifies checksums. Assumes all files have been downloaded."""
# archive files
basedir=mysettings["DISTDIR"]+"/"
digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
# portage files -- p(ortagefiles)basedir
pbasedir=mysettings["O"]+"/"
manifestfn=pbasedir+"Manifest"
if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
if "digest" in features:
print ">>> No package digest/Manifest file found."
print ">>> \"digest\" mode enabled; auto-generating new digest..."
return digestgen(myfiles,mysettings)
else:
if not os.path.exists(manifestfn):
if strict:
print red("!!! No package manifest found:"),manifestfn
return 0
else:
print "--- No package manifest found:",manifestfn
if not os.path.exists(digestfn):
print "!!! No package digest file found:",digestfn
print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
return 0
mydigests=digestParseFile(digestfn)
if mydigests==None:
print "!!! Failed to parse digest file:",digestfn
return 0
mymdigests=digestParseFile(manifestfn)
if "strict" not in features:
# XXX: Remove this when manifests become mainstream.
pass
elif mymdigests==None:
print "!!! Failed to parse manifest file:",manifestfn
if strict:
return 0
else:
# Check the portage-related files here.
mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
manifest_files = mymdigests.keys()
# Files unrelated to the build process are ignored for verification by default
for x in ["Manifest", "ChangeLog", "metadata.xml"]:
while x in mymfiles:
mymfiles.remove(x)
while x in manifest_files:
manifest_files.remove(x)
for x in range(len(mymfiles)-1,-1,-1):
if mymfiles[x] in manifest_files:
manifest_files.remove(mymfiles[x])
elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
# we filter here, rather then above; manifest might have files flagged by the filter.
# if something is returned, then it's flagged as a bad file
# manifest doesn't know about it, so we kill it here.
del mymfiles[x]
else:
print red("!!! Security Violation: A file exists that is not in the manifest.")
print "!!! File:",mymfiles[x]
if strict:
return 0
if manifest_files and strict:
print red("!!! Files listed in the manifest do not exist!")
for x in manifest_files:
print x
return 0
if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
if strict:
print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
print
return 0
else:
print "--- Manifest check failed. 'strict' not enabled; ignoring."
print
if justmanifest:
return 1
# Just return the status, as it's the last check.
return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
# parse actionmap to spawn ebuild with the appropriate args
def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
if alwaysdep or ("noauto" not in features):
# process dependency first
if "dep" in actionmap[mydo].keys():
retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
if retval:
return retval
# spawn ebuild.sh
mycommand = EBUILD_SH_BINARY + " "
if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
con=selinux.getcontext()
con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
selinux.setexec(con)
retval=spawn(mycommand + mydo,mysettings,debug=debug,
free=actionmap[mydo]["args"][0],
droppriv=actionmap[mydo]["args"][1],logfile=logfile)
selinux.setexec(None)
else:
retval=spawn(mycommand + mydo,mysettings, debug=debug,
free=actionmap[mydo]["args"][0],
droppriv=actionmap[mydo]["args"][1],logfile=logfile)
return retval
# chunked out deps for each phase, so that ebuild binary can use it
# to collapse targets down.
actionmap_deps={
"depend": [],
"setup": [],
"unpack": ["setup"],
"compile":["unpack"],
"test": ["compile"],
"install":["test"],
"rpm": ["install"],
"package":["install"],
}
def eapi_is_supported(eapi):
return str(eapi).strip() == str(portage_const.EAPI).strip()
def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree=None):
global db, actionmap_deps
if not tree:
dump_traceback("Warning: tree not specified to doebuild")
tree = "porttree"
ebuild_path = os.path.abspath(myebuild)
pkg_dir = os.path.dirname(ebuild_path)
if mysettings.configdict["pkg"].has_key("CATEGORY"):
cat = mysettings.configdict["pkg"]["CATEGORY"]
else:
cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
mypv = os.path.basename(ebuild_path)[:-7]
mycpv = cat+"/"+mypv
mysplit=pkgsplit(mypv,silent=0)
if mysplit==None:
writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
return 1
if mydo != "depend":
# XXX: We're doing a little hack here to curtain the gvisible locking
# XXX: that creates a deadlock... Really need to isolate that.
mysettings.reset(use_cache=use_cache)
mysettings.setcpv(mycpv,use_cache=use_cache)
validcommands = ["help","clean","prerm","postrm","preinst","postinst",
"config","setup","depend","fetch","digest",
"unpack","compile","test","install","rpm","qmerge","merge",
"package","unmerge", "manifest"]
if mydo not in validcommands:
validcommands.sort()
writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
for vcount in range(len(validcommands)):
if vcount%6 == 0:
writemsg("\n!!! ")
writemsg(string.ljust(validcommands[vcount], 11))
writemsg("\n")
return 1
if not os.path.exists(myebuild):
writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
return 1
if debug: # Otherwise it overrides emerge's settings.
# We have no other way to set debug... debug can't be passed in
# due to how it's coded... Don't overwrite this so we can use it.
mysettings["PORTAGE_DEBUG"]=str(debug)
mysettings["ROOT"] = myroot
mysettings["STARTDIR"] = getcwd()
mysettings["EBUILD"] = ebuild_path
mysettings["O"] = pkg_dir
mysettings["CATEGORY"] = cat
mysettings["FILESDIR"] = pkg_dir+"/files"
mysettings["PF"] = mypv
mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
mysettings["PROFILE_PATHS"]