blob: 9b2df412472bcabb19dfde0b8945ec2dc254a54b [file] [log] [blame]
# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to create sysroots."""
import logging
import os
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Tuple,
TYPE_CHECKING,
Union,
)
from chromite.lib import build_target_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import locking
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib import toolchain
from chromite.lib import toolchain_list
from chromite.lib.parser import package_info
if TYPE_CHECKING:
from chromite.lib import chroot_lib
class ConfigurationError(Exception):
"""Raised when an invalid configuration is found."""
CACHED_FIELD_PROFILE_OVERRIDE = "PROFILE_OVERRIDE"
STANDARD_FIELD_PORTDIR_OVERLAY = "PORTDIR_OVERLAY"
STANDARD_FIELD_CHOST = "CHOST"
STANDARD_FIELD_BOARD_OVERLAY = "BOARD_OVERLAY"
STANDARD_FIELD_BOARD_USE = "BOARD_USE"
STANDARD_FIELD_ARCH = "ARCH"
DEFAULT_PROFILE = "base"
_PORTAGE_WRAPPER_TEMPLATE = """#!/bin/sh
# If we try to use sudo when the sandbox is active, we get ugly warnings that
# just confuse developers. Disable the sandbox in this case by rexecing.
if [ "${{SANDBOX_ON}}" = "1" ]; then
SANDBOX_ON=0 exec "$0" "$@"
else
unset LD_PRELOAD
fi
export CHOST="{chost}"
export PORTAGE_CONFIGROOT="{sysroot}"
export SYSROOT="{sysroot}"
if [ -z "$PORTAGE_USERNAME" ]; then
export PORTAGE_USERNAME=$(basename "${{HOME}}")
fi
export ROOT="{sysroot}"
exec sudo -E {command} "$@"
"""
_PORTAGE_CHROMITE_WRAPPER_TEMPLATE = """#!/bin/sh
# Generated by chromite/lib/sysroot_lib.py.
exec "{wrapper_cmd}" \
--build-target "{build_target}" \
--chost "{chost}" \
--sysroot "{sysroot}" \
{command} \
-- \
{args} \
"$@"
"""
_BOARD_WRAPPER_TEMPLATE = """#!/bin/sh
exec {command} --board="{board}" "$@"
"""
_BOARD_WRAPPER_DEPRECATED_CMD_TEMPLATE = """#!/bin/sh
echo '{deprecated}' >&2
exec {command} --board="{board}" "$@"
"""
_BUILD_TARGET_WRAPPER_TEMPLATE = """#!/bin/sh
exec {command} --build-target="{build_target}" "$@"
"""
_PKGCONFIG_WRAPPER_TEMPLATE = """#!/bin/bash
PKG_CONFIG_LIBDIR=$(printf '%s:' "{sysroot}"/usr/*/pkgconfig)
export PKG_CONFIG_LIBDIR
export PKG_CONFIG_SYSROOT_DIR="{sysroot}"
# Portage will get confused and try to "help" us by exporting this.
# Undo that logic.
unset PKG_CONFIG_PATH
# TODO: Consider using pkgconf cross-personalities instead
# See https://github.com/pkgconf/pkgconf/issues/264
export PKG_CONFIG_SYSTEM_INCLUDE_PATH="/usr/include:{sysroot}/usr/include"
# https://github.com/pkgconf/pkgconf/issues/205
export PKG_CONFIG_FDO_SYSROOT_RULES=1
# Use full path to bypass automated wrapper checks that block `pkg-config`.
# https://crbug.com/985180
exec /usr/bin/pkg-config "$@"
"""
_wrapper_dir = "/usr/local/bin"
_IMPLICIT_SYSROOT_DEPS_KEY = "IMPLICIT_SYSROOT_DEPS"
_IMPLICIT_SYSROOT_DEPS = [
"sys-kernel/linux-headers",
"sys-libs/gcc-libs",
"sys-libs/libcxx",
]
_MAKE_CONF = "etc/make.conf"
_MAKE_CONF_BOARD_SETUP = "etc/make.conf.board_setup"
_MAKE_CONF_BOARD = "etc/make.conf.board"
_MAKE_CONF_USER = "etc/make.conf.user"
_MAKE_CONF_HOST_SETUP = "etc/make.conf.host_setup"
_BUILD_TARGET_CONFIG = "etc/portage/build_target.json"
_CACHE_PATH = "var/cache/edb/chromeos"
_CHROMIUMOS_OVERLAY = os.path.join(
constants.CHROOT_SOURCE_ROOT, constants.CHROMIUMOS_OVERLAY_DIR
)
_CHROMIUMOS_CONFIG = os.path.join(_CHROMIUMOS_OVERLAY, "chromeos", "config")
_INTERNAL_BINHOST_DIR = os.path.join(
constants.PRIVATE_BINHOST_CONF_DIR,
"target",
)
_EXTERNAL_BINHOST_DIR = os.path.join(
constants.PUBLIC_BINHOST_CONF_DIR,
"target",
)
_CHROMEOS_INTERNAL_BOTO_PATH = os.path.join(
constants.SOURCE_ROOT,
"src",
"private-overlays",
"chromeos-overlay",
"googlestorage_account.boto",
)
_ARCH_MAPPING = {
"amd64": "amd64-generic",
"x86": "x86-generic",
"arm": "arm-generic",
"arm64": "arm64-generic",
"mips": "mipsel-o32-generic",
}
class Error(Exception):
"""Module base error class."""
class NoBuildTargetFileError(Exception):
"""No build target config file."""
# This error is meant to be used with `cros build-packages`. This exists here
# so the setup_board (ToolchainInstallError) and `cros build-packages` errors
# exist in a common, sensible location.
class PackageInstallError(Error, cros_build_lib.RunCommandError):
"""An error installing packages."""
def __init__(
self,
msg: str,
result: "cros_build_lib.CompletedProcess",
exception: BaseException = None,
packages: Optional[Iterable[package_info.PackageInfo]] = None,
):
"""Init method.
Args:
msg: The message.
result: The command result.
exception: An origin exception.
packages: The list of failed packages.
"""
super().__init__(msg, result, exception)
self.failed_packages = packages
self.args = (self.args, packages)
def Stringify(self, stdout: bool = True, stderr: bool = True) -> str:
"""Stringify override to include the failed package info.
See:
cros_build_lib.RunCommandError.Stringify
"""
items = [super().Stringify(stdout, stderr)]
pkgs = []
for cpv in self.failed_packages:
if cpv.cpf:
pkgs.append(cpv.cpf)
elif cpv.cp:
pkgs.append(cpv.cp)
elif cpv.package:
pkgs.append(cpv.package)
if pkgs:
items.append("Failed Packages: %s" % " ".join(pkgs))
return "\n".join(items)
class ToolchainInstallError(PackageInstallError):
"""An error when installing a toolchain package.
Essentially identical to PackageInstallError, but has names that better
reflect that the packages are toolchain packages.
"""
def __init__(
self,
msg: str,
result: "cros_build_lib.CompletedProcess",
exception: BaseException = None,
tc_info: Optional[Iterable[package_info.PackageInfo]] = None,
):
"""Init method.
Args:
msg: The message.
result: The command result.
exception: An origin exception.
tc_info: The list of failed toolchain packages.
"""
super().__init__(msg, result, exception, packages=tc_info)
@property
def failed_toolchain_info(
self,
) -> Optional[Iterable[package_info.PackageInfo]]:
return self.failed_packages
def _CreateWrapper(wrapper_path: str, template: str, **kwargs: Any) -> None:
"""Creates a wrapper from a given template.
Args:
wrapper_path: path to the wrapper.
template: wrapper template.
**kwargs: fields to be set in the template.
"""
osutils.WriteFile(
wrapper_path,
template.format(**kwargs),
makedirs=True,
sudo=True,
chmod=0o755,
)
def _NotEmpty(filepath: str) -> bool:
"""Returns True if |filepath| is not empty.
Args:
filepath: path to a file.
"""
return os.path.exists(filepath) and osutils.ReadFile(filepath).strip()
def _DictToKeyValue(dictionary: Dict) -> str:
"""Formats dictionary in to a key=value string.
Args:
dictionary: a python dictionary.
Returns:
A string with one key=value pair per-line.
"""
output = []
for key in sorted(dictionary.keys()):
output.append('%s="%s"' % (key, dictionary[key]))
return "".join(f"{x}\n" for x in output)
def _GetMakeConfHostPath() -> Path:
"""Get the path to the make.conf.amd64-host file."""
return Path(_CHROMIUMOS_CONFIG) / "make.conf.amd64-host"
def _GetMakeConfGenericPath() -> str:
"""Get the path to the make.conf.generic-target file."""
return os.path.join(_CHROMIUMOS_CONFIG, "make.conf.generic-target")
def _GetChrootMakeConfUserPath() -> str:
"""Get the path to the chroot's make.conf.user file."""
return "/%s" % _MAKE_CONF_USER
class Profile:
"""Class that encapsulates the profile name for a sysroot."""
def __init__(self, name: str = ""):
self._name = name
@property
def name(self) -> str:
return self._name
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.name == other.name
class Sysroot:
"""Class that encapsulate the interaction with sysroots."""
def __init__(self, path: Union[Path, str]):
self.path = str(path)
# Read config from _MAKE_CONF which also pulls in config from
# _MAKE_CONF_BOARD_SETUP, but only write any config overrides directly
# to _MAKE_CONF_BOARD_SETUP.
self._config_file_read = self.JoinPath(_MAKE_CONF)
self._config_file_write = self.JoinPath(_MAKE_CONF_BOARD_SETUP)
self._cache_file = self.JoinPath(_CACHE_PATH)
self._cache_file_lock = self._cache_file + ".lock"
def __eq__(self, other):
"""Equality check."""
if not isinstance(other, self.__class__):
return False
return self.path == other.path
def Exists(self, chroot: "chroot_lib.Chroot" = None) -> bool:
"""Check if the sysroot exists.
Args:
chroot: Optionally check if the sysroot exists inside the specified
chroot.
Returns:
True if the sysroot exists.
"""
if chroot:
return chroot.has_path(self.path)
return os.path.exists(self.path)
def JoinPath(self, *args: str) -> str:
"""Helper to build out a path within the sysroot.
Pass args as if calling os.path.join().
TODO(build): Remove this method once self.path is a pathlib.Path.
Args:
*args: path components to join.
Returns:
The path within the sysroot.
"""
return os.path.join(self.path, *args)
def GetStandardField(self, field: str) -> Optional[Any]:
"""Returns the value of a standard field.
Args:
field: Field from the standard configuration file to get.
One of STANDARD_FIELD_* from above.
"""
# We want to source from within the config's directory as the config
# itself may source other scripts using a relative path.
with osutils.ChdirContext(Path(self._config_file_read).parent):
return osutils.SourceEnvironment(
self._config_file_read, [field], multiline=True
).get(field)
def GetCachedField(self, field: str) -> Optional[str]:
"""Returns the value of |field| in the sysroot cache file.
Access to the cache is thread-safe as long as we access it through this
methods or the bash helper in common.sh.
Args:
field: name of the field.
"""
if not os.path.exists(self._cache_file):
return None
with locking.FileLock(
self._cache_file_lock, locktype=locking.FLOCK, world_writable=True
).read_lock():
return osutils.SourceEnvironment(self._cache_file, [field]).get(
field
)
def SetCachedField(self, field: str, value: Optional[str]):
"""Sets |field| to |value| in the sysroot cache file.
Access to the cache is thread-safe as long as we access it through this
methods or the bash helper in common.sh.
Args:
field: name of the field.
value: value to set. If |value| is None, the field is unset.
"""
# TODO(bsimonnet): add support for values with quotes and newlines.
# crbug.com/476764.
for symbol in '\n`$"\\':
if value and symbol in value:
raise ValueError(
'Cannot use \\n, `, $, \\ or " in cached value.'
)
with locking.FileLock(
self._cache_file_lock, locktype=locking.FLOCK, world_writable=True
).write_lock():
lines = []
if os.path.exists(self._cache_file):
lines = osutils.ReadFile(self._cache_file).splitlines()
# Remove the old value for field if it exists.
lines = [l for l in lines if not l.startswith(field + "=")]
if value is not None:
lines.append('%s="%s"' % (field, value))
osutils.WriteFile(self._cache_file, "\n".join(lines), sudo=True)
@property
def build_target_name(self) -> str:
"""Get the name of the build target this sysroot was created for."""
return self.GetStandardField(STANDARD_FIELD_BOARD_USE)
@property
def profile_name(self) -> str:
"""Get the name of the sysroot's profile."""
return (
self.GetCachedField(CACHED_FIELD_PROFILE_OVERRIDE)
or DEFAULT_PROFILE
)
@property
def build_target(self) -> build_target_lib.BuildTarget:
"""Get the build target used to create the sysroot."""
p = Path(self.path) / _BUILD_TARGET_CONFIG
if not p.exists():
raise NoBuildTargetFileError(
"The build target file does not exist."
)
return build_target_lib.BuildTarget.from_json(
osutils.ReadFile(p, sudo=True)
)
@property
def board_overlay(self) -> List[str]:
"""The BOARD_OVERLAY standard field as a list.
The BOARD_OVERLAY field is set on creation, and stores the list of
overlays more directly associated with the build target itself. In an
ideal world, this would be the single, top level overlay for the build
target (e.g. overlay-eve-private) and everything else could be derived
from that. In practice, this is currently every available overlay that
is not in src/third_party.
"""
return self.GetStandardField(STANDARD_FIELD_BOARD_OVERLAY).split()
@property
def _build_target_overlays(self) -> List[Path]:
"""Overlays for the build target itself."""
prefix = f"overlay-{self.build_target_name}"
return [x for x in self.get_overlays() if x.name.startswith(prefix)]
@property
def build_target_overlay(self) -> Optional[Path]:
"""The most specific build target overlay for the sysroot."""
# Choose the longest as a proxy for the most specific. This should only
# ever be choosing between overlay-x and overlay-x-private, but we'll
# need better logic here if we have any cases with more than that.
overlays = self._build_target_overlays
overlay = max(overlays, key=lambda x: len(x.name)) if overlays else None
return overlay
@property
def chipset(self) -> Optional[str]:
"""The chipset for the sysroot's build target."""
overlays = [
x for x in self.get_overlays() if x.name.startswith("chipset-")
]
if not overlays:
return None
# Choose the longest as a proxy for the most specific. This should at
# most be choosing between chipset-x and chipset-x-private, but we'll
# need better logic here if we have any cases with more than that.
overlay = max(overlays, key=lambda x: len(x.name))
chipset = overlay.name
# TODO(python 3.9): string.removeprefix & string.removesuffix instead.
if chipset.startswith("chipset-"):
chipset = chipset[len("chipset-") :]
if chipset.endswith("-private"):
chipset = chipset[: -len("-private")]
return chipset
@property
def portdir_overlay(self) -> List[str]:
"""The PORTDIR_OVERLAY field as a list.
The PORTDIR_OVERLAY field is set on creation, and stores the list of all
overlays available to the sysroot.
"""
return self.GetStandardField(STANDARD_FIELD_PORTDIR_OVERLAY).split()
@property
def use_flags(self) -> List[str]:
"""Get all USE flags for the sysroot."""
return portage_util.PortageqEnvvar("USE", sysroot=self.path).split()
@property
def features(self) -> List[str]:
"""Get all FEATURES for the sysroot."""
return portage_util.PortageqEnvvar(
"FEATURES", sysroot=self.path
).split()
@property
def portage_logdir(self) -> str:
"""Get the PORTAGE_LOGDIR property for this sysroot."""
return portage_util.PortageqEnvvar("PORTAGE_LOGDIR", sysroot=self.path)
def get_overlays(
self, build_target_only: bool = False, relative: bool = False
) -> List[Path]:
"""Get a list of the overlays available to the sysroot.
Note: The overlay paths are always inside the SDK. If the outside the
SDK paths are needed, we should add an option to transform them here.
Args:
build_target_only: Only fetch the overlays more relevant to the
build target. By default, fetch all overlays available to the
sysroot.
relative: Get the overlay paths relative to the source root rather
than as absolute paths.
"""
overlays = (
self.board_overlay if build_target_only else self.portdir_overlay
)
overlay_paths = [Path(x) for x in overlays]
if relative:
return [
x.relative_to(constants.CHROOT_SOURCE_ROOT)
for x in overlay_paths
]
return overlay_paths
def _WrapperPath(self, command: str, friendly_name: str = None) -> str:
"""Returns the path to the wrapper for |command|.
Args:
command: command to wrap.
friendly_name: suffix to add to the command name. If None, the
wrapper will be created in the sysroot.
"""
if friendly_name:
return os.path.join(
_wrapper_dir, "%s-%s" % (command, friendly_name)
)
return self.JoinPath("build", "bin", command)
def CreateAllWrappers(self, friendly_name: str = None) -> None:
"""Creates all the wrappers.
Creates all portage tools wrappers, plus wrappers for gdb, cros_workon
and pkg-config.
Args:
friendly_name: if not None, create friendly wrappers with
|friendly_name| added to the command.
"""
chost = self.GetStandardField(STANDARD_FIELD_CHOST)
portage_chromite_wrapper_base_args = {
"build_target": self.build_target_name,
"chost": chost,
"sysroot": self.path,
"wrapper_cmd": (
constants.CHROMITE_SCRIPTS_DIR / "portage_cmd_wrapper"
),
"args": "",
}
for cmd in (
"ebuild",
"eclean",
"emaint",
"equery",
"portageq",
"qcheck",
"qdepends",
"qfile",
"qlist",
"qmerge",
"qsize",
):
args = {"sysroot": self.path, "chost": chost, "command": cmd}
if friendly_name:
_CreateWrapper(
self._WrapperPath(cmd, friendly_name),
_PORTAGE_WRAPPER_TEMPLATE,
**args,
)
_CreateWrapper(
self._WrapperPath(cmd), _PORTAGE_WRAPPER_TEMPLATE, **args
)
if friendly_name:
args = portage_chromite_wrapper_base_args.copy()
args["command"] = "emerge"
args["args"] = "--root-deps"
_CreateWrapper(
self._WrapperPath("emerge", friendly_name),
_PORTAGE_CHROMITE_WRAPPER_TEMPLATE,
**args,
)
# TODO(crbug.com/1108874): Delete the deprecated wrapper.
_CreateWrapper(
self._WrapperPath("cros_workon", friendly_name),
_BOARD_WRAPPER_DEPRECATED_CMD_TEMPLATE,
board=friendly_name,
command="cros_workon",
deprecated=(
f"cros_workon-{friendly_name} is deprecated, use "
f"`cros workon --board {friendly_name}` instead."
),
)
_CreateWrapper(
self._WrapperPath("cros-workon", friendly_name),
_BOARD_WRAPPER_DEPRECATED_CMD_TEMPLATE,
build_target=friendly_name,
board=friendly_name,
command="cros workon",
deprecated=(
f"cros-workon-{friendly_name} is deprecated, use "
f"`cros workon --board {friendly_name}` instead."
),
)
_CreateWrapper(
self._WrapperPath("gdb", friendly_name),
_BOARD_WRAPPER_TEMPLATE,
board=friendly_name,
command="cros_gdb",
)
_CreateWrapper(
self._WrapperPath("pkg-config", friendly_name),
_PKGCONFIG_WRAPPER_TEMPLATE,
sysroot=self.path,
)
_CreateWrapper(
self._WrapperPath("pkg-config"),
_PKGCONFIG_WRAPPER_TEMPLATE,
sysroot=self.path,
)
args = portage_chromite_wrapper_base_args.copy()
args["command"] = "emerge"
args["args"] = "--root-deps"
_CreateWrapper(
self._WrapperPath("emerge"),
_PORTAGE_CHROMITE_WRAPPER_TEMPLATE,
**args,
)
def InstallMakeConf(
self,
build_target: build_target_lib.BuildTarget,
) -> None:
"""Make sure the make.conf file exists and is up to date.
Args:
build_target: The BuildTarget to use.
"""
config_file = (
_GetMakeConfHostPath()
if build_target.is_host()
else _GetMakeConfGenericPath()
)
osutils.SafeSymlink(config_file, self.JoinPath(_MAKE_CONF), sudo=True)
def InstallMakeConfSdk(
self,
build_target: build_target_lib.BuildTarget,
public_only: bool = True,
) -> None:
"""Make sure the make.conf.host_setup file exists and is up to date.
Args:
build_target: The BuildTarget to use.
public_only: Whether to only search public ChromiumOS repos.
"""
if not build_target.is_host():
return
chroot_make_conf_sdk = Path(self.path) / _MAKE_CONF_HOST_SETUP
contents = ""
if not public_only:
chromeos_overlay = (
constants.SOURCE_ROOT / constants.CHROMEOS_OVERLAY_DIR
)
partner_overlay = (
constants.SOURCE_ROOT / constants.CHROMEOS_PARTNER_OVERLAY_DIR
)
if chromeos_overlay.is_dir():
# Googlers with internal source checkout.
make_conf_sdk = (
constants.CHROOT_SOURCE_ROOT
/ constants.CHROMIUMOS_OVERLAY_DIR
/ "chromeos"
/ "config"
/ "make.conf.sdk-chromeos"
)
contents = f"source {make_conf_sdk}\n"
elif partner_overlay.is_dir():
# Partners with partner overlay access.
partner_overlay = (
constants.CHROOT_SOURCE_ROOT
/ constants.CHROMEOS_PARTNER_OVERLAY_DIR
)
contents = (
f'PORTDIR_OVERLAY="$PORTDIR_OVERLAY {partner_overlay}"\n'
)
osutils.WriteFile(
chroot_make_conf_sdk,
f"# DO NOT EDIT\n\n{contents}",
sudo=True,
)
def InstallMakeConfBoard(
self,
accepted_licenses: str = None,
local_only: bool = False,
use_cq_prebuilts: bool = False,
expanded_binhost_inheritance: bool = False,
) -> None:
"""Make sure the make.conf.board file exists and is up to date.
Args:
accepted_licenses: Any additional accepted licenses.
local_only: Whether prebuilts can be fetched from remote sources.
use_cq_prebuilts: Whether to use the prebuilts generated by CQ.
expanded_binhost_inheritance: Whether to enable expanded binhost
inheritance, which searches for additional binhosts to include
to attempt to improve binhost hit rates.
"""
board_conf = self.GenerateBoardMakeConf(
accepted_licenses=accepted_licenses
)
make_conf_path = self.JoinPath(_MAKE_CONF_BOARD)
osutils.WriteFile(make_conf_path, board_conf, sudo=True)
# Once make.conf.board has been generated, generate the binhost config.
# We need to do this in two steps as the binhost generation step needs
# portageq to be available.
binhost_conf = self.GenerateBinhostConf(
local_only=local_only,
use_cq_prebuilts=use_cq_prebuilts,
expanded_binhost_inheritance=expanded_binhost_inheritance,
)
osutils.WriteFile(
make_conf_path, "%s\n%s\n" % (board_conf, binhost_conf), sudo=True
)
def InstallMakeConfBoardSetup(
self,
build_target: build_target_lib.BuildTarget,
) -> None:
"""Make sure the sysroot has the make.conf.board_setup file.
Args:
build_target: The BuildTarget to use.
"""
self.WriteConfig(self.GenerateBoardSetupConfig(build_target))
def InstallMakeConfUser(self) -> None:
"""Make sure the sysroot has the make.conf.user file.
This method assumes the chroot's make.conf.user file exists.
See chroot_util.CreateMakeConfUser() to create one if needed.
Only works inside the chroot.
"""
make_user = _GetChrootMakeConfUserPath()
link_path = self.JoinPath(_MAKE_CONF_USER)
if not os.path.exists(link_path):
osutils.SafeSymlink(make_user, link_path, sudo=True)
def write_build_target_config(
self, build_target: build_target_lib.BuildTarget
):
"""Write the build target config file."""
path = Path(self.path) / _BUILD_TARGET_CONFIG
osutils.WriteFile(
path, build_target.to_json(), makedirs=True, sudo=True
)
def _GenerateConfig(
self,
toolchains: toolchain_list.ToolchainList,
board_overlays: List[Path],
portdir_overlays: List[Path],
header: str,
use_internal: bool,
**kwargs: Any,
) -> str:
"""Create common config settings for boards and bricks.
Args:
toolchains: ToolchainList object to use.
board_overlays: List of board overlays.
portdir_overlays: List of portage overlays.
header: Header comment string; must start with #.
use_internal: Whether this build configuration should try
USE=internal features.
**kwargs: Additional configuration values to set.
Returns:
Configuration string.
Raises:
ConfigurationError: Could not generate a valid configuration.
"""
config = {}
default_toolchains = toolchain.FilterToolchains(
toolchains, "default", True
)
if not default_toolchains:
raise ConfigurationError("No default toolchain could be found.")
config["CHOST"] = list(default_toolchains)[0]
config["ARCH"] = toolchain.GetArchForTarget(config["CHOST"])
config["BOARD_OVERLAY"] = "\n".join(str(x) for x in board_overlays)
config["PORTDIR_OVERLAY"] = "\n".join(str(x) for x in portdir_overlays)
config["ROOT"] = self.path + "/"
config["PKG_CONFIG"] = self._WrapperPath("pkg-config")
if not use_internal:
config[
"USE"
] = "${USE} -ondevice_speech -ondevice_image_content_annotation"
config.update(kwargs)
return f"{header}\n" + _DictToKeyValue(config)
def GenerateBoardSetupConfig(
self, build_target: build_target_lib.BuildTarget
) -> str:
"""Generates the setup configuration for a given board.
Args:
build_target: BuildTarget to use to generate the configuration.
"""
# Compute the overlay list.
portdir_overlays = list(build_target.find_overlays())
prefix = constants.SOURCE_ROOT / "src" / "third_party"
board_overlays = [
o for o in portdir_overlays if prefix not in o.parents
]
toolchains = toolchain.get_toolchains_for_build_target(build_target)
header = (
"# Created by cros_sysroot_utils from --board=%s."
% build_target.name
)
# NB: Do not touch this w/out build consult.
use_internal = (
os.path.isfile(_CHROMEOS_INTERNAL_BOTO_PATH)
and not build_target.public
)
return self._GenerateConfig(
toolchains,
board_overlays,
portdir_overlays,
header,
use_internal=use_internal,
BOARD_USE=build_target.name,
)
def WriteConfig(self, config: str) -> None:
"""Writes the configuration.
Args:
config: configuration to use.
"""
osutils.WriteFile(
self._config_file_write, config, makedirs=True, sudo=True
)
def GenerateBoardMakeConf(self, accepted_licenses: str = None) -> str:
"""Generates the board specific make.conf.
Args:
accepted_licenses: Licenses accepted by portage.
Returns:
The make.conf file as a python string.
"""
config = [
"""# AUTO-GENERATED FILE. DO NOT EDIT.
# Source make.conf from each overlay."""
]
overlay_list = self.GetStandardField(STANDARD_FIELD_BOARD_OVERLAY)
boto_config = ""
for overlay in overlay_list.splitlines():
make_conf = os.path.join(overlay, "make.conf")
boto_file = os.path.join(overlay, "googlestorage_account.boto")
if os.path.isfile(make_conf):
config.append("source %s" % make_conf)
if os.path.isfile(boto_file):
boto_config = boto_file
# If there is a boto file in the chromeos internal overlay, use it as it
# will have access to the most stuff.
if os.path.isfile(_CHROMEOS_INTERNAL_BOTO_PATH):
boto_config = _CHROMEOS_INTERNAL_BOTO_PATH
gs_fetch_binpkg = os.path.join(
constants.SOURCE_ROOT, "chromite", "bin", "gs_fetch_binpkg"
)
gsutil_cmd = (
'%s \\"${URI}\\" \\"${DISTDIR}/${FILE}\\"' % gs_fetch_binpkg
)
config.append('BOTO_CONFIG="%s"' % boto_config)
config.append(
"FETCHCOMMAND_GS=\"bash -c 'BOTO_CONFIG=%s %s'\""
% (boto_config, gsutil_cmd)
)
config.append('RESUMECOMMAND_GS="$FETCHCOMMAND_GS"')
if accepted_licenses:
config.append('ACCEPT_LICENSE="%s"' % accepted_licenses)
return "".join(f"{x}\n" for x in config)
def GenerateBinhostConf(
self,
local_only: bool = False,
expanded_binhost_inheritance: bool = False,
use_cq_prebuilts: bool = False,
source_root: Path = constants.SOURCE_ROOT,
) -> str:
"""Returns the binhost configuration.
Args:
local_only: If True, use binary packages from local boards only.
expanded_binhost_inheritance: Look for additional binhosts to
inherit.
use_cq_prebuilts: Whether to use the prebuilts generated by CQ.
source_root: Root directory for the source files.
Returns:
The config contents.
"""
board = self.GetStandardField(STANDARD_FIELD_BOARD_USE)
if local_only:
if not board:
return ""
# TODO(bsimonnet): Refactor cros_generate_local_binhosts into a
# function here and remove the following call.
local_binhosts = cros_build_lib.run(
[
constants.CHROMITE_BIN_DIR / "cros_generate_local_binhosts",
"--board=%s" % board,
],
print_cmd=False,
capture_output=True,
encoding="utf-8",
).stdout
return "\n".join(
[local_binhosts, 'PORTAGE_BINHOST="$LOCAL_BINHOST"']
)
config = []
config.append(
"""
# FULL_BINHOST is populated by the full builders. It is listed first because it
# is the lowest priority binhost. It is better to download packages from the
# postsubmit/cq binhost because they are fresher packages.
PORTAGE_BINHOST="$FULL_BINHOST"
"""
)
config.extend(
self._ContinuousBinhostConfigs(
"POSTSUBMIT",
board,
expanded_binhost_inheritance,
source_root,
)
)
# CQ BINHOSTs in the repository are effective if |package_indexes| is
# not set or |use_cq_prebuilts| is explicitly specified.
# Swap the public and private ordering because --useoldpkg-atoms
# (i.e. --use-any-chrome) doesn't seem to follow the right to left
# convention the rest of portage configs use, instead it seems to
# basically do left to right, so it should hopefully mean more chrome
# binpkg usages for devs. This isn't a real solution to the chrome
# binpkg UX issues, but should hopefully make it somewhat better in
# practice for now.
if use_cq_prebuilts:
config.extend(
self._ContinuousBinhostConfigs(
"CQ",
board,
expanded_binhost_inheritance,
source_root,
swap_public_private=True,
)
)
return "".join(f"{x}\n" for x in config)
def _ContinuousBinhostConfigs(
self,
builder_type: str,
board: Union[str, None],
expanded_binhost_inheritance: bool,
source_root: Path,
swap_public_private: bool = False,
) -> List[str]:
config = []
(binhost_public, binhost_internal) = self._ContinuousBinhosts(
builder_type, board, expanded_binhost_inheritance, source_root
)
if binhost_public:
config.append(
f"""
# {builder_type}_BINHOST is populated by the public {builder_type} builders.
# The packages here takes higher priority than the packages provided by the
# above binhosts.
source {binhost_public}
PORTAGE_BINHOST="$PORTAGE_BINHOST ${builder_type}_BINHOST"
"""
)
if binhost_internal:
config.append(
f"""
# {builder_type}_BINHOST is populated by the internal {builder_type} builders.
# The packages here takes higher priority than the packages provided by the
# above binhosts.
source {binhost_internal}
PORTAGE_BINHOST="$PORTAGE_BINHOST ${builder_type}_BINHOST"
"""
)
if swap_public_private:
config.reverse()
return config
def GetBaseArchBoard(self) -> Optional[str]:
"""Return name of base architecture board."""
arch = self.GetStandardField(STANDARD_FIELD_ARCH)
if arch in _ARCH_MAPPING:
return _ARCH_MAPPING[arch]
return None
def _ContinuousBinhosts(
self,
builder_type: str,
board: Union[str, None],
expanded_binhost_inheritance: bool,
source_root: Path,
) -> Tuple[Optional[str], Optional[str]]:
"""Returns the postsubmit or CQ binhost to use."""
boards = []
# The preference of picking the binhost file for a board is in the same
# order of boards, so it's critical to make sure
# <board>-<builder_type>_BINHOST.conf is at the top of |boards| list.
if board:
boards = [board]
# Add reference board if applicable.
if "_" in board:
boards.append(board.split("_")[0])
elif expanded_binhost_inheritance:
# Search the public parent overlays for the given board, and
# include the parents' binhosts; e.g. eve for eve-kvm.
overlays = portage_util.FindOverlays(
constants.PUBLIC_OVERLAYS, board=board
)
names = [portage_util.GetOverlayName(x) for x in overlays]
boards.extend(x for x in names if x != board)
# Add base architecture board.
base_board = self.GetBaseArchBoard()
if base_board:
boards.append(base_board)
filenames = [f"{p}-{builder_type}_BINHOST.conf" for p in boards]
external = internal = None
for filename in filenames:
# The binhost file must exist and not be empty, both for internal
# and external binhosts. When a builder is deleted and no longer
# publishes prebuilts, we need developers to pick up the next set of
# prebuilts. Clearing the binhost files triggers this.
candidate = os.path.join(
source_root, _INTERNAL_BINHOST_DIR, filename
)
if not internal and _NotEmpty(candidate):
internal = candidate
candidate = os.path.join(
source_root, _EXTERNAL_BINHOST_DIR, filename
)
if not external and _NotEmpty(candidate):
external = candidate
return external, internal
def CreateSkeleton(self) -> None:
"""Creates a sysroot skeleton."""
needed_dirs = [
self.JoinPath("etc", "portage", "hooks"),
self.JoinPath("etc", "portage", "profile"),
"/usr/local/bin",
]
for d in needed_dirs:
osutils.SafeMakedirs(d, sudo=True)
# Create links for portage hooks.
for filename in (constants.CROSUTILS_DIR / "hooks").glob("*"):
linkpath = self.JoinPath(
"etc",
"portage",
"hooks",
filename.name,
)
osutils.SafeSymlink(filename, linkpath, sudo=True)
@osutils.rotate_log_file(portage_util.get_die_hook_status_file())
def UpdateToolchain(self, board: str, local_init: bool = True) -> None:
"""Updates the toolchain packages.
This will install both the toolchains and the packages that are
implicitly needed (gcc-libs, linux-headers).
Args:
board: The name of the board.
local_init: Whether to use local packages to bootstrap the implicit
dependencies.
"""
try:
toolchain.InstallToolchain(self)
except toolchain.ToolchainInstallError as e:
raise ToolchainInstallError(
str(e),
e.result,
exception=e.exception,
tc_info=e.failed_toolchain_info,
) from e
if not self.IsToolchainInstalled():
# Emerge the implicit dependencies.
emerge = self._UpdateToolchainCommand(board, local_init)
try:
cros_build_lib.sudo_run(emerge, preserve_env=True)
except cros_build_lib.RunCommandError as e:
# Include failed packages from the status file in the error.
failed_pkgs = portage_util.ParseDieHookStatusFile()
raise ToolchainInstallError(
str(e), e.result, exception=e, tc_info=failed_pkgs
)
# Record we've installed them so we don't call emerge each time.
self.SetCachedField(_IMPLICIT_SYSROOT_DEPS_KEY, "yes")
def _UpdateToolchainCommand(self, board: str, local_init: bool) -> str:
"""Helper function to build the emerge command for UpdateToolchain."""
emerge = [
constants.CHROMITE_BIN_DIR / "parallel_emerge",
"--board=%s" % board,
"--root-deps=rdeps",
"--select",
"--quiet",
]
if local_init:
emerge += ["--getbinpkg", "--usepkg"]
emerge += _IMPLICIT_SYSROOT_DEPS
return emerge
def IsToolchainInstalled(self) -> bool:
"""Check if the toolchain has been installed."""
return self.GetCachedField(_IMPLICIT_SYSROOT_DEPS_KEY) == "yes"
def Delete(self, background: bool = False) -> None:
"""Delete the sysroot.
Optionally run asynchronously. Async delete moves the sysroot into a
temp directory and then deletes the tempdir with a background task.
Args:
background: Whether to run the delete as a background operation.
"""
rm = ["rm", "-rf", "--one-file-system", "--"]
if background:
# Make the temporary directory in the same folder as the sysroot
# were deleting to avoid crossing disks, mounts, etc. that'd cause
# us to synchronously copy the entire thing before we delete it.
cwd = os.path.normpath(self.JoinPath(".."))
try:
result = cros_build_lib.sudo_run(
["mktemp", "-d", "-p", cwd],
encoding="utf-8",
stdout=True,
cwd=cwd,
debug_level=logging.DEBUG,
)
except cros_build_lib.RunCommandError:
# Fall back to a synchronous delete just in case.
logging.notice(
"Error deleting sysroot asynchronously. Deleting "
"synchronously instead. This may take a minute."
)
return self.Delete(background=False)
tempdir = result.stdout.strip()
cros_build_lib.sudo_run(
["mv", self.path, tempdir],
capture_output=True,
debug_level=logging.DEBUG,
)
if not os.fork():
# Child process, just delete the sysroot root and _exit.
result = cros_build_lib.sudo_run(
rm + [tempdir],
capture_output=True,
check=False,
debug_level=logging.DEBUG,
)
if result.returncode:
# Log it so it can be handled manually.
logging.warning(
"Unable to delete old sysroot now at %s: %s",
tempdir,
result.stderr,
)
# pylint: disable=protected-access
os._exit(result.returncode)
else:
cros_build_lib.sudo_run(
rm + [self.path], capture_output=True, debug_level=logging.DEBUG
)
def get_sdk_provided_packages(self) -> Iterable[package_info.PackageInfo]:
"""Find all packages provided by the SDK (i.e. package.provided)."""
# Look at packages in package.provided.
sdk_file_path = self.JoinPath(
"etc", "portage", "profile", "package.provided"
)
for line in osutils.ReadFile(sdk_file_path).splitlines():
# Skip comments and empty lines.
line = line.split("#", 1)[0].strip()
if not line:
continue
yield package_info.parse(line)
def get_sdk_provided_packages(
sysroot_path: str,
) -> Iterable[package_info.PackageInfo]:
"""Find all packages provided by the SDK (i.e. package.provided).
Convenience wrapper for the Sysroot method.
Args:
sysroot_path: The sysroot to use when finding SDK packages.
Returns:
The provided packages.
"""
sysroot = Sysroot(sysroot_path)
return sysroot.get_sdk_provided_packages()