Get xbuddy to work with image_to_live correctly.
There's a lot of small issues here that just piled up. XBuddy pretty
much didn't work at all with image_to_live -- xbuddy:parrot/latest-canary/test
just threw errors. This CL fixes that and addresses some other issues I found
along the way.
1) Added a new flag to xbuddy to return an update_url and had image_to_live
use this specifically rather than rely on --pregenereate_update hack that
FLAGS_image uses.
2) Killed log_thread. This always left a hanging log because of -t -t because
it effectively disconnected the thread from the main script.
3) Optimized update worfklow (from 1) to try to download test / stateful
from GS if they are available as downloading the whole image + processing
an update sucks.
4) Cleaned up some docstrings and fixed board overrides.
5) Piped dev image logic through.
BUG=None
TEST=all unittests, image_to_live with various xbuddy strings and FLAGS_image
+ AU vm tests in CQ.
Change-Id: I4e60394451f7ff3e31be48167d32240160c18895
Reviewed-on: https://chromium-review.googlesource.com/171260
Tested-by: Chris Sosa <sosa@chromium.org>
Reviewed-by: Don Garrett <dgarrett@chromium.org>
Commit-Queue: Chris Sosa <sosa@chromium.org>
Reviewed-by: Gilad Arnold <garnold@chromium.org>
diff --git a/artifact_info.py b/artifact_info.py
index feb6555..89fbf3e 100644
--- a/artifact_info.py
+++ b/artifact_info.py
@@ -32,6 +32,9 @@
# The test image - the base image with both develolper and test enhancements.
TEST_IMAGE = 'test_image'
+# The developer image - the base image with developer enhancements.
+DEV_IMAGE = 'dev_image'
+
#### Autotest related packages. ####
# Autotest -- the main autotest directory without the test_suites subdir.
diff --git a/autoupdate.py b/autoupdate.py
index d2fe95d..97315d6 100644
--- a/autoupdate.py
+++ b/autoupdate.py
@@ -355,12 +355,11 @@
os.system('rm -rf "%s"' % output_dir)
raise AutoupdateError('Failed to generate update in %s' % output_dir)
- def GenerateUpdateImageWithCache(self, image_path, static_image_dir):
+ def GenerateUpdateImageWithCache(self, image_path):
"""Force generates an update payload based on the given image_path.
Args:
image_path: full path to the image.
- static_image_dir: the directory to move images to after generating.
Returns:
update directory relative to static_image_dir.
Raises:
@@ -372,13 +371,12 @@
if self.pregenerated_path:
return self.pregenerated_path
- # Which sub_dir of static_image_dir should hold our cached update image.
- cache_sub_dir = self.FindCachedUpdateImageSubDir(self.src_image,
- image_path)
+ # Which sub_dir should hold our cached update image.
+ cache_sub_dir = self.FindCachedUpdateImageSubDir(self.src_image, image_path)
_Log('Caching in sub_dir "%s"', cache_sub_dir)
# The cached payloads exist in a cache dir.
- cache_dir = os.path.join(static_image_dir, cache_sub_dir)
+ cache_dir = os.path.join(self.static_dir, cache_sub_dir)
cache_update_payload = os.path.join(cache_dir,
constants.UPDATE_FILE)
@@ -397,22 +395,23 @@
return cache_sub_dir
- def _SymlinkUpdateFiles(self, image_dir):
- """Set files in the base static_dir to link to most recent update files.
+ def _SymlinkUpdateFiles(self, target_dir, link_dir):
+ """Symlinks the update-related files from target_dir to link_dir.
Every time an update is called, clear existing files/symlinks in the
- devserver's static_dir, and replace them with symlinks.
- This allows the base of archive_dir to serve the most recent update.
+ link_dir, and replace them with symlinks to the target_dir.
Args:
- image_dir: Where update files are staged.
+ target_dir: Location of the target files.
+ link_dir: Directory where the links should exist after.
"""
- if self.static_dir == image_dir:
- _Log("Serving from static directory.")
+ _Log('Linking %s to %s', target_dir, link_dir)
+ if link_dir == target_dir:
+ _Log('Cannot symlink into the same directory.')
return
for f in UPDATE_FILES:
- link = os.path.join(self.static_dir, f)
- target = os.path.join(image_dir, f)
+ link = os.path.join(link_dir, f)
+ target = os.path.join(target_dir, f)
common_util.SymlinkFile(target, link)
def GetUpdateForLabel(self, client_version, label,
@@ -451,9 +450,11 @@
return label
elif os.path.exists(static_image_path) and common_util.IsInsideChroot():
# Image was found for the given label. Generate update if we can.
- rel_path = self.GenerateUpdateImageWithCache(
- static_image_path, static_image_dir=static_image_dir)
- return _NonePathJoin(label, rel_path)
+ rel_path = self.GenerateUpdateImageWithCache(static_image_path)
+ # Add links from the static directory to the update.
+ cache_path = _NonePathJoin(self.static_dir, rel_path)
+ self._SymlinkUpdateFiles(cache_path, static_image_dir)
+ return label
# The label didn't resolve.
return None
@@ -659,8 +660,10 @@
src_path = os.path.abspath(self.forced_image)
if os.path.exists(src_path) and common_util.IsInsideChroot():
# Image was found for the given label. Generate update if we can.
- path_to_payload = self.GenerateUpdateImageWithCache(
- src_path, static_image_dir=self.static_dir)
+ path_to_payload = self.GenerateUpdateImageWithCache(src_path)
+ # Add links from the static directory to the update.
+ cache_path = _NonePathJoin(self.static_dir, path_to_payload)
+ self._SymlinkUpdateFiles(cache_path, self.static_dir)
else:
label = label or ''
label_list = label.split('/')
@@ -674,7 +677,7 @@
if label_list[0] == 'xbuddy':
# If path explicitly calls xbuddy, pop off the tag.
label_list.pop()
- x_label, image_name = self.xbuddy.Translate(label_list, board)
+ x_label, image_name = self.xbuddy.Translate(label_list, board=board)
if image_name not in constants.ALL_IMAGES:
raise AutoupdateError(
"Use an image alias: dev, base, test, or recovery.")
@@ -691,11 +694,7 @@
if path_to_payload is None:
raise AutoupdateError('Failed to get an update for: %s' % label)
else:
- # Add links from the static directory to the update.
- self._SymlinkUpdateFiles(
- _NonePathJoin(self.static_dir, path_to_payload))
-
- return path_to_payload
+ return path_to_payload
def HandleUpdatePing(self, data, label=''):
"""Handles an update ping from an update client.
diff --git a/autoupdate_unittest.py b/autoupdate_unittest.py
index 36bc562..9a45b69 100755
--- a/autoupdate_unittest.py
+++ b/autoupdate_unittest.py
@@ -119,13 +119,12 @@
common_util.IsInsideChroot().AndReturn(True)
self._xbuddy._GetArtifact(
- [''], self.test_board, lookup_only=True).AndReturn(
+ [''], board=self.test_board, lookup_only=True).AndReturn(
(latest_label, constants.TEST_IMAGE_FILE))
au_mock.GenerateUpdateImageWithCache(
os.path.join(self.static_image_dir, self.test_board, self.latest_dir,
- constants.TEST_IMAGE_FILE),
- static_image_dir=latest_image_dir).AndReturn('update.gz')
+ constants.TEST_IMAGE_FILE)).AndReturn('update.gz')
self.mox.ReplayAll()
test_data = _TEST_REQUEST % self.test_dict
@@ -150,18 +149,17 @@
cache_image_dir = os.path.join(self.static_image_dir, 'cache')
# Mock out GenerateUpdateImageWithCache to make an update file in cache
- def mock_fn(_image, static_image_dir):
+ def mock_fn(_image):
print 'mock_fn'
# No good way to introduce an update file during execution.
- cache_dir = os.path.join(static_image_dir, 'cache')
+ cache_dir = os.path.join(self.static_image_dir, 'cache')
common_util.MkDirP(cache_dir)
update_image = os.path.join(cache_dir, constants.UPDATE_FILE)
with open(update_image, 'w') as fh:
fh.write('')
common_util.IsInsideChroot().AndReturn(True)
- au_mock.GenerateUpdateImageWithCache(forced_image,
- static_image_dir=self.static_image_dir).WithSideEffects(
+ au_mock.GenerateUpdateImageWithCache(forced_image).WithSideEffects(
mock_fn).AndReturn('cache')
common_util.GetFileSha1(os.path.join(
@@ -195,7 +193,7 @@
au_mock.forced_image = "xbuddy:b/v/a"
self._xbuddy._GetArtifact(
- ['b', 'v', 'a'], None).AndReturn(('label', constants.TEST_IMAGE_FILE))
+ ['b', 'v', 'a']).AndReturn(('label', constants.TEST_IMAGE_FILE))
au_mock.GetUpdateForLabel(
autoupdate.FORCED_UPDATE, 'b/v/a').AndReturn('p')
diff --git a/build_artifact.py b/build_artifact.py
index ea019b1..9c8e5d4 100755
--- a/build_artifact.py
+++ b/build_artifact.py
@@ -470,6 +470,9 @@
artifact_info.RECOVERY_IMAGE:
ImplDescription(ZipfileBuildArtifact, IMAGE_FILE,
files_to_extract=[devserver_constants.RECOVERY_IMAGE_FILE]),
+ artifact_info.DEV_IMAGE:
+ ImplDescription(ZipfileBuildArtifact, IMAGE_FILE,
+ files_to_extract=[devserver_constants.IMAGE_FILE]),
artifact_info.TEST_IMAGE:
ImplDescription(ZipfileBuildArtifact, IMAGE_FILE,
files_to_extract=[devserver_constants.TEST_IMAGE_FILE]),
diff --git a/common_util.py b/common_util.py
index db88772..c6a317c 100644
--- a/common_util.py
+++ b/common_util.py
@@ -271,7 +271,9 @@
atomically.
"""
if not os.path.exists(target):
+ _Log('Could not find target for symlink: %s', target)
return
+
_Log('Creating symlink: %s --> %s', link, target)
# Use the created link_base file to prevent other calls to SymlinkFile() to
diff --git a/devserver.py b/devserver.py
index 7d8fd55..be8352d 100755
--- a/devserver.py
+++ b/devserver.py
@@ -58,6 +58,7 @@
from cherrypy.process import plugins
import autoupdate
+import build_artifact
import common_util
import downloader
import log_util
@@ -691,6 +692,10 @@
return_dir: {true|false}
if set to true, returns the url to the update.gz
instead.
+ for_update: {true|false}
+ if for_update, pre-generates the update payload for the image
+ and returns the update path to pass to the
+ update_engine_client.
Example URL:
http://host:port/xbuddy/x86-generic/R26-4000.0.0/test
@@ -707,12 +712,35 @@
"""
boolean_string = kwargs.get('return_dir')
return_dir = xbuddy.XBuddy.ParseBoolean(boolean_string)
+ boolean_string = kwargs.get('for_update')
+ for_update = xbuddy.XBuddy.ParseBoolean(boolean_string)
- build_id, file_name = self._xbuddy.Get(args)
+ if for_update and return_dir:
+ raise DevServerHTTPError(500, 'Cannot specify both update and return_dir')
+
+ # For updates, we optimize downloading of test images.
+ file_name = None
+ build_id = None
+ if for_update:
+ try:
+ build_id = self._xbuddy.StageTestAritfactsForUpdate(args)
+ except build_artifact.ArtifactDownloadError:
+ build_id = None
+
+ if not build_id:
+ build_id, file_name = self._xbuddy.Get(args)
+
if return_dir:
directory = os.path.join(cherrypy.request.base, 'static', build_id)
_Log("Directory requested, returning: %s", directory)
return directory
+ elif for_update:
+ # Forces paylaod to be in cache and symlinked into build_id dir.
+ updater.GetUpdateForLabel(autoupdate.FORCED_UPDATE, build_id,
+ image_name=file_name)
+ update_uri = os.path.join(cherrypy.request.base, 'update', build_id)
+ _Log("Update requested, returning: %s", update_uri)
+ return update_uri
else:
build_id = '/' + os.path.join('static', build_id, file_name)
_Log("Payload requested, returning: %s", build_id)
@@ -1044,6 +1072,8 @@
options.board,
root_dir=root_dir,
static_dir=options.static_dir)
+ if options.clear_cache and options.xbuddy_manage_builds:
+ _xbuddy.CleanCache()
# We allow global use here to share with cherrypy classes.
# pylint: disable=W0603
diff --git a/downloader.py b/downloader.py
index 7bb2dd0..1177db9 100755
--- a/downloader.py
+++ b/downloader.py
@@ -124,10 +124,14 @@
non-specified artifacts in the background following the principle of
spatial locality.
- artifacts: A list of artifact names that correspond to
- artifacts defined in artifact_info.py to stage.
- files: A list of filenames to stage from an archive_url.
- async: If True, return without waiting for download to complete.
+ Args:
+ artifacts: A list of artifact names that correspond to
+ artifacts defined in artifact_info.py to stage.
+ files: A list of filenames to stage from an archive_url.
+ async: If True, return without waiting for download to complete.
+
+ Raises:
+ gsutil_util.GSUtilError: If we failed to download the artifact.
"""
common_util.MkDirP(self._build_dir)
@@ -179,10 +183,15 @@
def _DownloadArtifactsSerially(self, artifacts, no_wait):
"""Simple function to download all the given artifacts serially.
- @param artifacts: A list of build_artifact.BuildArtifact instances to
- download.
- @param no_wait: If True, don't block waiting for artifact to exist if we
- fail to immediately find it.
+ Args:
+ artifacts: A list of build_artifact.BuildArtifact instances to
+ download.
+ no_wait: If True, don't block waiting for artifact to exist if we
+ fail to immediately find it.
+
+ Raises:
+ build_artifact.ArtifactDownloadError: If we failed to download the
+ artifact.
"""
try:
diff --git a/host/image_to_live.sh b/host/image_to_live.sh
index f80591f..53f7df1 100755
--- a/host/image_to_live.sh
+++ b/host/image_to_live.sh
@@ -81,6 +81,7 @@
UPDATER_DOWNLOADING="UPDATE_STATUS_DOWNLOADING"
IMAGE_PATH=""
+UPDATE_PATH=""
ROOTFS_MOUNTPT=""
STATEFUL_MOUNTPT=""
@@ -137,19 +138,18 @@
[[ "${FLAGS_image}" == xbuddy:* ]]
}
+# Starts the devserver and returns the update url to use to get the update.
start_dev_server() {
kill_all_devservers
- local devserver_flags="--pregenerate_update"
+ local devserver_flags="--board=${FLAGS_board} --port=${FLAGS_devserver_port}"
# Parse devserver flags.
if [ -n "${FLAGS_image}" ]; then
if is_xbuddy_path; then
info "Image flag is an xBuddy path to an image."
- devserver_flags="${devserver_flags} \
- --image ${FLAGS_image}"
else
info "Forcing the devserver to serve a local image."
- devserver_flags="${devserver_flags} \
- --image $(reinterpret_path_for_chroot ${FLAGS_image})"
+ devserver_flags="${devserver_flags} --pregenerate_update \
+ --image=$(reinterpret_path_for_chroot ${FLAGS_image})"
IMAGE_PATH="${FLAGS_image}"
fi
elif [ -n "${FLAGS_archive_dir}" ]; then
@@ -162,17 +162,17 @@
IMAGE_PATH="$(${SCRIPTS_DIR}/get_latest_image.sh --board="${FLAGS_board}")"
IMAGE_PATH="${IMAGE_PATH}/chromiumos_image.bin"
devserver_flags="${devserver_flags} \
- --image $(reinterpret_path_for_chroot ${IMAGE_PATH})"
+ --image=$(reinterpret_path_for_chroot ${IMAGE_PATH})"
fi
if [ -n "${FLAGS_payload}" ]; then
devserver_flags="${devserver_flags} \
- --payload $(reinterpret_path_for_chroot ${FLAGS_payload})"
+ --payload=$(reinterpret_path_for_chroot ${FLAGS_payload})"
fi
if [ -n "${FLAGS_proxy_port}" ]; then
devserver_flags="${devserver_flags} \
- --proxy_port ${FLAGS_proxy_port}"
+ --proxy_port=${FLAGS_proxy_port}"
fi
if [ ${FLAGS_no_patch_kernel} -eq ${FLAGS_TRUE} ]; then
@@ -183,7 +183,8 @@
devserver_flags="${devserver_flags} \
--src_image=\"$(reinterpret_path_for_chroot ${FLAGS_src_image})\""
fi
-
+ # Remove any extra whitespace between words in flags.
+ devserver_flags=$(echo ${devserver_flags} | sed 's/ \+/ /g')
info "Starting devserver with flags ${devserver_flags}"
# Clobber dev_server log in case image_to_live is run with sudo previously.
@@ -192,28 +193,46 @@
fi
# Need to inherit environment variables to discover gsutil credentials.
- cros_sdk -- sudo -E start_devserver ${devserver_flags} \
- --board=${FLAGS_board} \
- --port=${FLAGS_devserver_port} > ${FLAGS_server_log} 2>&1 &
+ if cros_sdk -- cros_workon list --host |
+ grep chromeos-base/cros-devutils &> /dev/null; then
+ info "cros_workon for devserver detected. Running devserver from source."
+ cros_sdk -- sudo -E ../platform/dev/devserver.py ${devserver_flags} \
+ > ${FLAGS_server_log} 2>&1 &
+ else
+ cros_sdk -- sudo -E start_devserver ${devserver_flags} \
+ > ${FLAGS_server_log} 2>&1 &
+ fi
- info "Waiting on devserver to start"
- info "note: be patient as the server generates the update before starting."
+ info "Waiting on devserver to start. " \
+ "Be patient as the server generates the update before starting."
until netstat -lnp 2>&1 | grep :${FLAGS_devserver_port} > /dev/null; do
sleep 5
- echo -n "."
- if ! pgrep -f start_devserver > /dev/null; then
- echo "Devserver failed, see dev_server.log."
- exit 1
+ if ! pgrep -f "devserver" > /dev/null; then
+ tail -n 10 "${FLAGS_server_log}"
+ die "Devserver failed, see dev_server.log -- snippet above"
fi
done
- echo ""
+ if is_xbuddy_path; then
+ local devserver_url=$(echo $(get_devserver_url) | sed "s#/update##")
+ local xbuddy_path="xbuddy/${FLAGS_image##*xbuddy:}?for_update=true"
+ info "Xbuddy image detected. Using xbuddy RPC: " ${xbuddy_path}
+ UPDATE_PATH="$(curl -f "${devserver_url}/${xbuddy_path}")"
+ if [ -z "${UPDATE_PATH}" ]; then
+ tail -n 10 "${FLAGS_server_log}"
+ die "XBuddy failed to stage the image specified by ${FLAGS_image}."
+ fi
+ info "XBuddy returned uri to use for update: ${UPDATE_PATH}."
+ if [[ "${UPDATE_PATH}" != *"/update"* ]]; then
+ die "XBuddy did not return a valid update uri."
+ fi
+ fi
}
# Copies stateful update script which fetches the newest stateful update
# from the dev server and prepares the update. chromeos_startup finishes
# the update on next boot.
run_stateful_update() {
- local dev_url=$(get_devserver_url)
+ local dev_url="${UPDATE_PATH}"
local stateful_url=""
local stateful_update_args=""
@@ -237,18 +256,13 @@
}
get_update_args() {
- if [ -z ${1} ]; then
+ if [ -z "${1}" ]; then
die "No url provided for update."
fi
local update_args="--omaha_url ${1}"
- # Grab everything after last colon as an xbuddy path
- if is_xbuddy_path; then
- update_args="${update_args}/xbuddy/${FLAGS_image##*xbuddy:}"
- fi
-
- info "${update_args}"
+ info "Omaha URL: " ${update_args}
if [[ ${FLAGS_ignore_version} -eq ${FLAGS_TRUE} ]]; then
info "Forcing update independent of the current version"
@@ -267,7 +281,7 @@
fi
if [ ${FLAGS_ignore_hostname} -eq ${FLAGS_TRUE} ]; then
- if [ -z ${FLAGS_update_url} ]; then
+ if [ -z "${FLAGS_update_url}" ]; then
devserver_url="http://$(get_hostname):${port}/update"
else
devserver_url="${FLAGS_update_url}"
@@ -340,16 +354,6 @@
done
}
-# Dumps the update_engine log in real-time
-log_thread() {
- echo 'starting log thread'
- # Using -t -t twice forces pseudo-tty allocation on the remote end, which
- # causes tail to go into line-buffered mode.
- EXTRA_REMOTE_SH_ARGS="-t -t" remote_sh_raw \
- "tail -n +0 -f /var/log/update_engine.log"
- echo 'stopping log thread'
-}
-
# Pings the update_engine to see if it responds or a max timeout is reached.
# Returns 1 if max timeout is reached.
wait_until_update_engine_is_ready() {
@@ -366,11 +370,12 @@
done
}
+# Runs the autoupdate.
run_auto_update() {
# Truncate the update log so our log file is clean.
truncate_update_log
- local update_args="$(get_update_args "$(get_devserver_url)")"
+ local update_args="$(get_update_args "${UPDATE_PATH}")"
info "Waiting to initiate contact with the update_engine."
wait_until_update_engine_is_ready || die "Could not contact update engine."
@@ -379,17 +384,13 @@
# Sets up secondary threads to track the update progress and logs
status_thread &
local status_thread_pid=$!
- log_thread &
- local log_thread_pid=$!
- trap "kill -1 ${status_thread_pid} && kill -1 ${log_thread_pid} && cleanup" \
- EXIT INT TERM
+ trap "kill ${status_thread_pid}; cleanup" EXIT INT TERM
# Actually run the update. This is a blocking call.
remote_sh "${UPDATER_BIN} ${update_args}"
# Clean up secondary threads.
- ! kill ${status_thread_pid} 2> /dev/null
- ! kill ${log_thread_pid} 2> /dev/null
+ kill ${status_thread_pid} 2> /dev/null || warn "Failed to kill status thread"
trap cleanup EXIT INT TERM
local update_status="$(get_update_var CURRENT_OP)"
@@ -404,6 +405,16 @@
verify_image() {
info "Verifying image."
+ if [ -n "${FLAGS_update_url}" ]; then
+ warn "Verify is not compatible with setting an update url."
+ return
+ fi
+
+ if is_xbuddy_path; then
+ warn "Verify is not currently compatible with xbuddy."
+ return
+ fi
+
ROOTFS_MOUNTPT=$(mktemp -d)
STATEFUL_MOUNTPT=$(mktemp -d)
"${SCRIPTS_DIR}/mount_gpt_image.sh" --from "$(dirname "${IMAGE_PATH}")" \
@@ -445,7 +456,7 @@
fi
local initial_root_dev=$(find_root_dev)
-
+ UPDATE_PATH="$(get_devserver_url)"
if [ -z "${FLAGS_update_url}" ]; then
# Start local devserver if no update url specified.
start_dev_server
@@ -529,18 +540,6 @@
set -e
- if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ] && \
- [ -n "${FLAGS_update_url}" ]; then
- warn "Verify is not compatible with setting an update url."
- FLAGS_verify=${FLAGS_FALSE}
- fi
-
- if [ ${FLAGS_verify} -eq ${FLAGS_TRUE} ] && \
- is_xbuddy_path; then
- warn "Verify is not currently compatible with xbuddy."
- FLAGS_verify=${FLAGS_FALSE}
- fi
-
trap cleanup EXIT INT TERM
TMP=$(mktemp -d /tmp/image_to_live.XXXX)
diff --git a/xbuddy.py b/xbuddy.py
index 2659002..5fff21a 100644
--- a/xbuddy.py
+++ b/xbuddy.py
@@ -46,19 +46,28 @@
LATEST = "latest"
LOCAL = "local"
REMOTE = "remote"
+
+# TODO(sosa): Fix a lot of assumptions about these aliases. There is too much
+# implicit logic here that's unnecessary. What should be done:
+# 1) Collapse Alias logic to one set of aliases for xbuddy (not local/remote).
+# 2) Do not use zip when creating these dicts. Better to not rely on ordering.
+# 3) Move alias/artifact mapping to a central module rather than having it here.
+# 4) Be explicit when things are missing i.e. no dev images in image.zip.
+
LOCAL_ALIASES = [
TEST,
- BASE,
DEV,
+ BASE,
FULL,
ANY,
]
LOCAL_FILE_NAMES = [
devserver_constants.TEST_IMAGE_FILE,
- devserver_constants.BASE_IMAGE_FILE,
devserver_constants.IMAGE_FILE,
+ devserver_constants.BASE_IMAGE_FILE,
devserver_constants.UPDATE_FILE,
+ None, # For ANY.
]
LOCAL_ALIAS_TO_FILENAME = dict(zip(LOCAL_ALIASES, LOCAL_FILE_NAMES))
@@ -66,6 +75,7 @@
# Google Storage constants
GS_ALIASES = [
TEST,
+ DEV,
BASE,
RECOVERY,
FULL,
@@ -166,9 +176,7 @@
self.config = self._ReadConfig()
self._manage_builds = manage_builds or self._ManageBuilds()
- self._board = board or self.GetDefaultBoardID()
- _Log("Default board used by xBuddy: %s", self._board)
-
+ self._board = board
self._timestamp_folder = os.path.join(self.static_dir,
Timestamp.XBUDDY_TIMESTAMP_DIR)
common_util.MkDirP(self._timestamp_folder)
@@ -418,7 +426,7 @@
path: the path xBuddy Get was called with.
Return:
- tuple of (image_type, board, version)
+ tuple of (image_type, board, version, whether the path is local)
Raises:
XBuddyException: if the path can't be resolved into valid components
@@ -508,19 +516,23 @@
return_tup = sorted(build_dict.iteritems(), key=operator.itemgetter(1))
return return_tup
- def _Download(self, gs_url, artifact):
- """Download the single artifact from the given gs_url."""
+ def _Download(self, gs_url, artifacts):
+ """Download the artifacts from the given gs_url.
+
+ Raises:
+ build_artifact.ArtifactDownloadError: If we failed to download the
+ artifact.
+ """
with XBuddy._staging_thread_count_lock:
XBuddy._staging_thread_count += 1
try:
- _Log("Downloading '%s' from '%s'", artifact, gs_url)
- downloader.Downloader(self.static_dir, gs_url).Download(
- [artifact], [])
+ _Log("Downloading %s from %s", artifacts, gs_url)
+ downloader.Downloader(self.static_dir, gs_url).Download(artifacts, [])
finally:
with XBuddy._staging_thread_count_lock:
XBuddy._staging_thread_count -= 1
- def _CleanCache(self):
+ def CleanCache(self):
"""Delete all builds besides the newest N builds"""
if not self._manage_builds:
return
@@ -551,8 +563,13 @@
except Exception as err:
raise XBuddyException('Failed to clear %s: %s' % (clear_dir, err))
- def _GetFromGS(self, build_id, image_type, lookup_only):
- """Check if the artifact is available locally. Download from GS if not."""
+ def _GetFromGS(self, build_id, image_type):
+ """Check if the artifact is available locally. Download from GS if not.
+
+ Raises:
+ build_artifact.ArtifactDownloadError: If we failed to download the
+ artifact.
+ """
gs_url = os.path.join(devserver_constants.GS_IMAGE_DIR,
build_id)
@@ -562,25 +579,29 @@
cached = os.path.exists(file_loc)
if not cached:
- if not lookup_only:
- artifact = GS_ALIAS_TO_ARTIFACT[image_type]
- self._Download(gs_url, artifact)
+ artifact = GS_ALIAS_TO_ARTIFACT[image_type]
+ self._Download(gs_url, [artifact])
else:
_Log('Image already cached.')
- def _GetArtifact(self, path_list, board, lookup_only=False):
+ def _GetArtifact(self, path_list, board=None, lookup_only=False):
"""Interpret an xBuddy path and return directory/file_name to resource.
+ Note board can be passed that in but by default if self._board is set,
+ that is used rather than board.
+
Returns:
build_id to the directory
file_name of the artifact
Raises:
XBuddyException: if the path could not be translated
+ build_artifact.ArtifactDownloadError: if we failed to download the
+ artifact.
"""
path = '/'.join(path_list)
# Rewrite the path if there is an appropriate default.
- path = self._LookupAlias(path, board)
+ path = self._LookupAlias(path, self._board if self._board else board)
# Parse the path.
image_type, board, version, is_local = self._InterpretPath(path)
@@ -607,12 +628,11 @@
if image_type not in GS_ALIASES:
raise XBuddyException('Bad remote image type: %s. Use one of: %s' %
(image_type, GS_ALIASES))
- file_name = GS_ALIAS_TO_FILENAME[image_type]
-
- # Interpret the version (alias), and get gs address.
build_id = self._ResolveVersionToUrl(board, version)
- _Log('Found on GS: %s', build_id)
- self._GetFromGS(build_id, image_type, lookup_only)
+ _Log('Resolved version %s to %s.', version, build_id)
+ file_name = GS_ALIAS_TO_FILENAME[image_type]
+ if not lookup_only:
+ self._GetFromGS(build_id, image_type)
return build_id, file_name
@@ -632,7 +652,7 @@
"""Returns the number of images cached by xBuddy."""
return str(self._Capacity())
- def Translate(self, path_list, board):
+ def Translate(self, path_list, board=None):
"""Translates an xBuddy path to a real path to artifact if it exists.
Equivalent to the Get call, minus downloading and updating timestamps,
@@ -651,11 +671,28 @@
XBuddyException: if the path couldn't be translated
"""
self._SyncRegistryWithBuildImages()
- build_id, file_name = self._GetArtifact(path_list, board, lookup_only=True)
+ build_id, file_name = self._GetArtifact(path_list, board=board,
+ lookup_only=True)
_Log('Returning path to payload: %s/%s', build_id, file_name)
return build_id, file_name
+ def StageTestAritfactsForUpdate(self, path_list):
+ """Stages test artifacts for update and returns build_id.
+
+ Raises:
+ XBuddyException: if the path could not be translated
+ build_artifact.ArtifactDownloadError: if we failed to download the test
+ artifacts.
+ """
+ build_id, file_name = self.Translate(path_list)
+ if file_name == devserver_constants.TEST_IMAGE_FILE:
+ gs_url = os.path.join(devserver_constants.GS_IMAGE_DIR,
+ build_id)
+ artifacts = [FULL, STATEFUL]
+ self._Download(gs_url, artifacts)
+ return build_id
+
def Get(self, path_list):
"""The full xBuddy call, returns resource specified by path_list.
@@ -675,14 +712,15 @@
specified 'test' or 'full_payload' artifacts, respectively.
Raises:
- XBuddyException: if path is invalid
+ XBuddyException: if the path could not be translated
+ build_artifact.ArtifactDownloadError: if we failed to download the
+ artifact.
"""
self._SyncRegistryWithBuildImages()
- build_id, file_name = self._GetArtifact(path_list, self._board)
-
+ build_id, file_name = self._GetArtifact(path_list)
Timestamp.UpdateTimestamp(self._timestamp_folder, build_id)
#TODO (joyc): run in sep thread
- self._CleanCache()
+ self.CleanCache()
_Log('Returning path to payload: %s/%s', build_id, file_name)
return build_id, file_name
diff --git a/xbuddy_unittest.py b/xbuddy_unittest.py
index 517c5a7..a509541 100755
--- a/xbuddy_unittest.py
+++ b/xbuddy_unittest.py
@@ -187,7 +187,7 @@
self.mox.StubOutWithMock(self.mock_xb, '_Download')
for _ in range(8):
- self.mock_xb._Download(mox.IsA(str), mox.IsA(str))
+ self.mock_xb._Download(mox.IsA(str), mox.In(mox.IsA(str)))
self.mox.ReplayAll()