blob: 4f57262e9a19e4ff8fd9a87d59b256dee19a3c34 [file] [log] [blame]
#!/bin/bash
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH}"/helper-functions.sh || exit 1
if [[ -z $1 ]] ; then
__helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
if ! ___eapi_has_prefix_variables; then
ED=${D} EPREFIX=
fi
SIZE_LIMIT=''
while [[ $# -gt 0 ]] ; do
case $1 in
--ignore)
shift
for skip in "$@" ; do
[[ -d ${ED}${skip} || -f ${ED}${skip} ]] \
&& >> "${ED}${skip}.ecompress.skip"
done
exit 0
;;
--queue)
shift
set -- "${@/%/.ecompress.dir}"
set -- "${@/#/${ED}}"
ret=0
for x in "$@" ; do
# Stash the limit in the .dir file so we can reload it later.
printf "${SIZE_LIMIT}" > "${x}"
((ret|=$?))
done
[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
;;
--dequeue)
[[ -n $2 ]] && __vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
find "${ED}" -name '*.ecompress.dir' -print0 \
| sed -e 's:\.ecompress\.dir::g' -e "s:${ED}:/:g" \
| ${XARGS} -0 ecompressdir
find "${ED}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
exit 0
;;
--limit)
SIZE_LIMIT=$2
shift
;;
--*)
__helpers_die "${0##*/}: unknown arguments '$*'"
exit 1
;;
*)
break
;;
esac
shift
done
# figure out the new suffix
suffix=$(ecompress --suffix)
# funk_up_dir(action, suffix, binary, [size_limit])
# - action: compress or decompress
# - suffix: the compression suffix to work with
# - binary: the program to execute that'll compress/decompress
# - size_limit: if compressing, skip files smaller than this
# The directory we act on is implied in the ${dir} variable
funk_up_dir() {
local act=$1 suffix=$2 binary=$3 size_limit=$4
local negate=""
[[ ${act} == "compress" ]] && negate="!"
local ret=0
# first we act on all the files
local args=(
-type f
${negate} -iname "*${suffix}"
)
[[ -n ${size_limit} ]] && args+=( -size "+${size_limit}c" )
find "${dir}" "${args[@]}" -print0 | ${XARGS} -0 ${binary}
((ret|=$?))
# Repeat until nothing changes, in order to handle multiple
# levels of indirection (see bug #470916).
local -i indirection=0
while true ; do
local something_changed=
while read -r -d $'\0' brokenlink ; do
[[ -e ${brokenlink} ]] && continue
olddest=$(readlink "${brokenlink}")
# Ignore temporarily broken symlinks due to
# _relocate_skip_dirs (bug #399595), and handle
# absolute symlinks to files that aren't merged
# yet (bug #405327).
if [[ ${olddest} == /* ]] ; then
[ -e "${D}${olddest}" ] && continue
skip_dir_dest=${T}/ecompress-skip/${olddest#${EPREFIX}}
else
skip_dir_dest=${T}/ecompress-skip/${actual_dir#${ED}}/${brokenlink%/*}/${olddest}
fi
[[ -e ${skip_dir_dest} ]] && continue
if [[ ${act} == "compress" ]] ; then
newdest=${olddest}${suffix}
else
[[ ${olddest} == *${suffix} ]] || continue
newdest=${olddest%${suffix}}
fi
if [[ "${newdest}" == /* ]] ; then
[[ -f "${D}${newdest}" ]] || continue
else
[[ -f "${dir}/${brokenlink%/*}/${newdest}" ]] || continue
fi
something_changed=${brokenlink}
rm -f "${brokenlink}"
[[ ${act} == "compress" ]] \
&& ln -snf "${newdest}" "${brokenlink}${suffix}" \
|| ln -snf "${newdest}" "${brokenlink%${suffix}}"
((ret|=$?))
done < <(find "${dir}" -type l -print0)
[[ -n ${something_changed} ]] || break
(( indirection++ ))
if (( indirection >= 100 )) ; then
# Protect against possibility of a bug triggering an endless loop.
eerror "ecompressdir: too many levels of indirection for" \
"'${actual_dir#${ED}}/${something_changed#./}'"
break
fi
done
return ${ret}
}
# _relocate_skip_dirs(srctree, dsttree)
# Move all files and directories we want to skip running compression
# on from srctree to dsttree.
_relocate_skip_dirs() {
local srctree="$1" dsttree="$2"
[[ -d ${srctree} ]] || return 0
find "${srctree}" -name '*.ecompress.skip' -print0 | \
while read -r -d $'\0' src ; do
src=${src%.ecompress.skip}
dst="${dsttree}${src#${srctree}}"
parent=${dst%/*}
mkdir -p "${parent}"
mv "${src}" "${dst}"
mv "${src}.ecompress.skip" "${dst}.ecompress.skip"
done
}
hide_skip_dirs() { _relocate_skip_dirs "${ED}" "${T}"/ecompress-skip/ ; }
restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${ED}" ; }
ret=0
rm -rf "${T}"/ecompress-skip
decompressors=(
".Z" "gunzip -f"
".gz" "gunzip -f"
".bz2" "bunzip2 -f"
".xz" "unxz -f"
".lzma" "unxz -f"
)
__multijob_init
for dir in "$@" ; do
dir=${dir#/}
dir="${ED}${dir}"
if [[ ! -d ${dir} ]] ; then
__vecho "${0##*/}: /${dir#${ED}} does not exist!"
continue
fi
cd "${dir}"
actual_dir=${dir}
dir=. # use relative path to avoid 'Argument list too long' errors
# hide all the stuff we want to skip
hide_skip_dirs "${dir}"
# since we've been requested to compress the whole dir,
# delete any individual queued requests
size_limit=${SIZE_LIMIT:-$(<"${actual_dir}.ecompress.dir")}
rm -f "${actual_dir}.ecompress.dir"
find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
# not uncommon for packages to compress doc files themselves
for (( i = 0; i < ${#decompressors[@]}; i += 2 )) ; do
# It's faster to parallelize at this stage than to try to
# parallelize the compressors. This is because the find|xargs
# ends up launching less compressors overall, so the overhead
# of forking children ends up dominating.
(
__multijob_child_init
funk_up_dir "decompress" "${decompressors[i]}" "${decompressors[i+1]}"
) &
__multijob_post_fork
: $(( ret |= $? ))
done
__multijob_finish
: $(( ret |= $? ))
# forcibly break all hard links as some compressors whine about it
find "${dir}" -type f -links +1 -exec env file="{}" sh -c \
'cp -p "${file}" "${file}.ecompress.break" ; mv -f "${file}.ecompress.break" "${file}"' \;
# now lets do our work
if [[ -n ${suffix} ]] ; then
__vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${ED}}"
funk_up_dir "compress" "${suffix}" "ecompress" "${size_limit}"
: $(( ret |= $? ))
fi
# finally, restore the skipped stuff
restore_skip_dirs
done
[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}