| # Copyright 2013 The ChromiumOS Authors |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| """Module containing the generic stages.""" |
| |
| import contextlib |
| import copy |
| import fnmatch |
| import json |
| import logging |
| import os |
| import re |
| import sys |
| import time |
| import traceback |
| |
| from chromite.cbuildbot import cbuildbot_alerts |
| from chromite.cbuildbot import commands |
| from chromite.cbuildbot import repository |
| from chromite.lib import buildbucket_v2 |
| from chromite.lib import builder_status_lib |
| from chromite.lib import constants |
| from chromite.lib import cros_build_lib |
| from chromite.lib import failure_message_lib |
| from chromite.lib import failures_lib |
| from chromite.lib import gs |
| from chromite.lib import metrics |
| from chromite.lib import osutils |
| from chromite.lib import parallel |
| from chromite.lib import portage_util |
| from chromite.lib import results_lib |
| from chromite.lib import timeout_util |
| from chromite.lib.buildstore import BuildIdentifier |
| |
| |
| def ReportStageFailure(exception, metrics_fields=None) -> None: |
| """Reports stage failure to Mornach along with inner exceptions. |
| |
| Args: |
| exception: The failure exception to report. |
| metrics_fields: Fields for ts_mon metric. |
| """ |
| _InsertFailureToMonarch( |
| exception_category=failures_lib.GetExceptionCategory(type(exception)), |
| metrics_fields=metrics_fields, |
| ) |
| |
| # This assumes that CompoundFailure can't be nested. |
| if isinstance(exception, failures_lib.CompoundFailure): |
| for exc_class, _, _ in exception.exc_infos: |
| _InsertFailureToMonarch( |
| exception_category=failures_lib.GetExceptionCategory(exc_class), |
| metrics_fields=metrics_fields, |
| ) |
| |
| |
| def _InsertFailureToMonarch( |
| exception_category=constants.EXCEPTION_CATEGORY_UNKNOWN, metrics_fields=None |
| ) -> None: |
| """Report a single stage failure to Mornach if needed. |
| |
| Args: |
| exception_category: one of constants.EXCEPTION_CATEGORY_ALL_CATEGORIES. |
| Defaults to 'unknown'. |
| metrics_fields: Fields for ts_mon metric. |
| """ |
| if ( |
| metrics_fields is not None |
| and exception_category != constants.EXCEPTION_CATEGORY_UNKNOWN |
| ): |
| counter = metrics.Counter(constants.MON_STAGE_FAILURE_COUNT) |
| metrics_fields["exception_category"] = exception_category |
| counter.increment(fields=metrics_fields) |
| |
| |
| class BuilderStage: |
| """Parent class for stages to be performed by a builder.""" |
| |
| # Used to remove 'Stage' suffix of stage class when generating stage name. |
| name_stage_re = re.compile(r"(\w+)Stage") |
| category = constants.UNCATEGORIZED_STAGE |
| |
| # TODO(sosa): Remove these once we have a SEND/RECIEVE IPC mechanism |
| # implemented. |
| overlays = None |
| push_overlays = None |
| |
| # Class should set this if they have a corresponding no<stage> option that |
| # skips their stage. |
| # TODO(mtennant): Rename this something like skip_option_name. |
| option_name = None |
| |
| # Class should set this if they have a corresponding setting in |
| # the build_config that skips their stage. |
| # TODO(mtennant): Rename this something like skip_config_name. |
| config_name = None |
| |
| @classmethod |
| def StageNamePrefix(cls): |
| """Return cls.__name__ with any 'Stage' suffix removed.""" |
| match = cls.name_stage_re.match(cls.__name__) |
| assert match, "Class name %s does not end with Stage" % cls.__name__ |
| return match.group(1) |
| |
| def __init__( |
| self, |
| builder_run, |
| buildstore, |
| suffix=None, |
| attempt=None, |
| max_retry=None, |
| build_root=None, |
| ) -> None: |
| """Create a builder stage. |
| |
| Args: |
| builder_run: The BuilderRun object for the run this stage is part |
| of. |
| buildstore: BuildStore object to make DB calls. |
| suffix: The suffix to append to the buildbot name. Defaults to None. |
| attempt: If this build is to be retried, the current attempt number |
| (starting from 1). Defaults to None. Is only valid if |
| |max_retry| is also specified. |
| max_retry: The maximum number of retries. Defaults to None. Is only |
| valid if |attempt| is also specified. |
| build_root: Override the builder_run build_root. |
| """ |
| self._run = builder_run |
| self.buildstore = buildstore |
| |
| self._attempt = attempt |
| self._max_retry = max_retry |
| self._build_stage_id = None |
| |
| # Construct self.name, the name string for this stage instance. |
| self.name = self._prefix = self.StageNamePrefix() |
| if suffix: |
| self.name += suffix |
| |
| # TODO(mtennant): Phase this out and use self._run.bot_id directly. |
| self._bot_id = self._run.bot_id |
| |
| # self._boards holds list of boards involved in this run. |
| # TODO(mtennant): Replace self._boards with a self._run.boards? |
| self._boards = self._run.config.boards |
| |
| self._build_root = os.path.abspath(build_root or self._run.buildroot) |
| |
| self.build_config = self._run.config.name |
| self.metrics_branch = self._run.options.branch |
| self.metrics_tryjob = self._run.options.remote_trybot |
| |
| self._prebuilt_type = None |
| if self._run.ShouldUploadPrebuilts(): |
| self._prebuilt_type = self._run.config.build_type |
| |
| # Determine correct chrome_rev. |
| self._chrome_rev = self._run.config.chrome_rev |
| if self._run.options.chrome_rev: |
| self._chrome_rev = self._run.options.chrome_rev |
| |
| # USE and enviroment variable settings. |
| self._portage_extra_env = {} |
| useflags = self._run.config.useflags[:] |
| |
| if self._run.options.chrome_root: |
| self._portage_extra_env["CHROME_ORIGIN"] = "LOCAL_SOURCE" |
| |
| self._latest_toolchain = ( |
| self._run.config.latest_toolchain |
| or self._run.options.latest_toolchain |
| ) |
| |
| if useflags: |
| self._portage_extra_env["USE"] = " ".join(useflags) |
| |
| # Note: BuildStartStage is a special case: Since it is created before we |
| # have a valid |build_id|, it is not logged in cidb. |
| self._InsertBuildStageInCIDB(name=self.name) |
| |
| def GetStageNames(self): |
| """Get a list of the places where this stage has recorded results.""" |
| return [self.name] |
| |
| def UpdateSuffix(self, tag, child_suffix): |
| """Update the suffix arg for the init call. |
| |
| Use this function to concatenate the tag for the current class with the |
| suffix passed in by a child class. |
| This function is expected to be called before __init__, and as such |
| should not use any object attributes. |
| |
| Args: |
| tag: The tag for this class. Should not be None. |
| child_suffix: The suffix passed up by the child class. May be None. |
| |
| Returns: |
| Extended suffix that incoroporates the tag, to be passed up to the |
| parent class's __init__. |
| """ |
| if child_suffix is None: |
| child_suffix = "" |
| return " [%s]%s" % (tag, child_suffix) |
| |
| # TODO(akeshet): Eliminate this method and update the callers to use |
| # builder run directly. |
| def ConstructDashboardURL(self, stage=None): |
| """Return the dashboard URL |
| |
| This is the direct link to buildbot logs as seen in build.chromium.org |
| |
| Args: |
| stage: Link to a specific |stage|, otherwise the general buildbot |
| log |
| |
| Returns: |
| The fully formed URL |
| """ |
| return self._run.ConstructDashboardURL(stage=stage) |
| |
| def _InsertBuildStageInCIDB( |
| self, name, board=None, status=constants.BUILDER_STATUS_PLANNED |
| ) -> None: |
| """Insert a build stage in cidb. |
| |
| Expected arguments are the same as cidb.InsertBuildStage, except |
| |build_id|, which is populated here. |
| """ |
| build_identifier, _ = self._run.GetCIDBHandle() |
| build_id = build_identifier.cidb_id |
| if build_id: |
| self._build_stage_id = self.buildstore.InsertBuildStage( |
| build_id, name, board, status |
| ) |
| |
| def _FinishBuildStageInCIDBAndMonarch( |
| self, stage_result, status, elapsed_time_seconds=0 |
| ) -> None: |
| """Mark the stage as finished in cidb. |
| |
| Args: |
| stage_result: results_lib.Results.* object of this stage. |
| status: The finish status of the build. Enum type |
| constants.BUILDER_COMPLETED_STATUSES |
| elapsed_time_seconds: Elapsed time in stage, in seconds. |
| """ |
| if ( |
| self._build_stage_id is not None |
| and self.buildstore.AreClientsReady() |
| ): |
| self.buildstore.FinishBuildStage(self._build_stage_id, status) |
| |
| fields = { |
| "status": status, |
| "name": self.name, |
| "build_config": self._run.config.name, |
| "important": self._run.config.important, |
| } |
| |
| metrics.CumulativeSecondsDistribution(constants.MON_STAGE_DURATION).add( |
| elapsed_time_seconds, fields=fields |
| ) |
| metrics.Counter(constants.MON_STAGE_COMP_COUNT).increment(fields=fields) |
| common_metrics_fields = { |
| "branch_name": self.metrics_branch, |
| "build_config": self.build_config, |
| "tryjob": self.metrics_tryjob, |
| } |
| duration_metrics_fields = copy.deepcopy(common_metrics_fields) |
| duration_metrics_fields.update({"stage": self.name, "status": status}) |
| if self.metrics_branch is not None: |
| metrics.FloatMetric(constants.MON_STAGE_INSTANCE_DURATION).set( |
| elapsed_time_seconds, fields=duration_metrics_fields |
| ) |
| if ( |
| isinstance(stage_result, BaseException) |
| and self._build_stage_id is not None |
| ): |
| failed_metrics_fields = copy.deepcopy(common_metrics_fields) |
| failed_metrics_fields.update( |
| {"failed_stage": self.name, "category": self.category} |
| ) |
| if self.buildstore.AreClientsReady(): |
| ReportStageFailure( |
| stage_result, metrics_fields=failed_metrics_fields |
| ) |
| |
| def _StartBuildStageInCIDB(self) -> None: |
| """Mark the stage as inflight in cidb.""" |
| if ( |
| self._build_stage_id is not None |
| and self.buildstore.AreClientsReady() |
| ): |
| self.buildstore.StartBuildStage(self._build_stage_id) |
| |
| def _WaitBuildStageInCIDB(self) -> None: |
| """Mark the stage as waiting in cidb.""" |
| if ( |
| self._build_stage_id is not None |
| and self.buildstore.AreClientsReady() |
| ): |
| self.buildstore.WaitBuildStage(self._build_stage_id) |
| |
| def _TranslateResultToCIDBStatus(self, result): |
| """Translates different result_lib.Result results to builder statuses. |
| |
| Args: |
| result: Same as the result passed to results_lib.Result.Record() |
| |
| Returns: |
| A value in the enum constants.BUILDER_ALL_STATUSES. |
| """ |
| if result == results_lib.Results.SUCCESS: |
| return constants.BUILDER_STATUS_PASSED |
| elif result == results_lib.Results.FORGIVEN: |
| return constants.BUILDER_STATUS_FORGIVEN |
| elif result == results_lib.Results.SKIPPED: |
| return constants.BUILDER_STATUS_SKIPPED |
| else: |
| logging.info("Translating result %s to fail.", result) |
| return constants.BUILDER_STATUS_FAILED |
| |
| def GetRepoRepository(self, **kwargs): |
| """Create a new repo repository object.""" |
| manifest_url = self._run.options.manifest_repo_url |
| if manifest_url is None: |
| manifest_url = self._run.config.manifest_repo_url |
| |
| manifest_branch = self._run.config.manifest_branch |
| if manifest_branch is None: |
| manifest_branch = self._run.manifest_branch |
| |
| kwargs.setdefault("manifest_repo_url", manifest_url) |
| kwargs.setdefault("directory", self._build_root) |
| kwargs.setdefault("referenced_repo", self._run.options.reference_repo) |
| kwargs.setdefault("branch", manifest_branch) |
| kwargs.setdefault("manifest", self._run.config.manifest) |
| kwargs.setdefault("git_cache_dir", self._run.options.git_cache_dir) |
| kwargs.setdefault("groups", "all") |
| |
| # pass in preserve_paths so that repository.RepoRepository |
| # knows what paths to preserve when executing clean_up_repo |
| if hasattr(self._run.options, "preserve_paths"): |
| kwargs.setdefault( |
| "preserve_paths", self._run.options.preserve_paths |
| ) |
| |
| return repository.RepoRepository(**kwargs) |
| |
| def GetScheduledSlaveBuildbucketIds(self): |
| """Get buildbucket_ids list of the scheduled slave builds. |
| |
| Returns: |
| A list of buildbucket_ids (strings) of the slave builds. The list |
| doesn't contain the old builds which were retried in Buildbucket. |
| """ |
| buildbucket_ids = None |
| if self._run.config.slave_configs: |
| buildbucket_ids = buildbucket_v2.GetBuildbucketIds( |
| self._run.attrs.metadata |
| ) |
| |
| return buildbucket_ids |
| |
| def GetBuildFailureMessageFromBuildStore( |
| self, buildstore, build_identifier |
| ): |
| """Get message summarizing failures of this build from BuildStore. |
| |
| Args: |
| buildstore: An instance of BuildStore to make DB calls. |
| build_identifier: The instance of BuildIdentifier of the current |
| build. |
| |
| Returns: |
| An instance of build_failure_message.BuildFailureMessage. |
| """ |
| stage_failures = buildstore.GetBuildsFailures( |
| [build_identifier.buildbucket_id] |
| ) |
| failure_msg_manager = failure_message_lib.FailureMessageManager() |
| failure_messages = failure_msg_manager.ConstructStageFailureMessages( |
| stage_failures |
| ) |
| master_build_identifier = BuildIdentifier( |
| self._run.options.master_build_id, |
| self._run.options.master_buildbucket_id, |
| ) |
| aborted = ( |
| builder_status_lib.BuilderStatusManager.AbortedBySelfDestruction( |
| buildstore, |
| build_identifier.buildbucket_id, |
| master_build_identifier, |
| ) |
| ) |
| |
| return ( |
| builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage( |
| self._run.config.name, |
| self._run.config.overlays, |
| self._run.ConstructDashboardURL(), |
| failure_messages, |
| aborted_by_self_destruction=aborted, |
| ) |
| ) |
| |
| def GetBuildFailureMessageFromResults(self): |
| """Get message summarizing build failures from result_lib.Results. |
| |
| Returns: |
| An instance of build_failure_message.BuildFailureMessage. |
| """ |
| failure_messages = results_lib.Results.GetStageFailureMessage() |
| return ( |
| builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage( |
| self._run.config.name, |
| self._run.config.overlays, |
| self._run.ConstructDashboardURL(), |
| failure_messages, |
| ) |
| ) |
| |
| def GetBuildFailureMessage(self): |
| """Get message summarizing failure of this build.""" |
| build_identifier, _ = self._run.GetCIDBHandle() |
| if self.buildstore.AreClientsReady(): |
| return self.GetBuildFailureMessageFromBuildStore( |
| self.buildstore, build_identifier |
| ) |
| else: |
| return self.GetBuildFailureMessageFromResults() |
| |
| def _Print(self, msg) -> None: |
| """Prints a msg to stderr.""" |
| sys.stdout.flush() |
| print(msg, file=sys.stderr) |
| sys.stderr.flush() |
| |
| def _PrintLoudly(self, msg) -> None: |
| """Prints a msg with loudly.""" |
| |
| border_line = "*" * 60 |
| edge = "*" * 2 |
| |
| sys.stdout.flush() |
| print(border_line, file=sys.stderr) |
| |
| msg_lines = msg.split("\n") |
| |
| # If the last line is whitespace only drop it. |
| if not msg_lines[-1].rstrip(): |
| del msg_lines[-1] |
| |
| for msg_line in msg_lines: |
| print("%s %s" % (edge, msg_line), file=sys.stderr) |
| |
| print(border_line, file=sys.stderr) |
| sys.stderr.flush() |
| |
| def _GetSlaveConfigs(self): |
| """Get the slave configs for the current build config. |
| |
| This assumes self._run.config is a master config. |
| |
| Returns: |
| A list of build configs corresponding to the slaves for the master |
| build config at self._run.config. |
| |
| Raises: |
| See config_lib.Config.GetSlavesForMaster for details. |
| """ |
| experimental_builders = self._run.attrs.metadata.GetValueWithDefault( |
| constants.METADATA_EXPERIMENTAL_BUILDERS, [] |
| ) |
| slave_configs = self._run.site_config.GetSlavesForMaster( |
| self._run.config, self._run.options |
| ) |
| slave_configs = [ |
| config |
| for config in slave_configs |
| if config["name"] not in experimental_builders |
| ] |
| return slave_configs |
| |
| def _GetSlaveConfigMap(self, important_only=True): |
| """Get slave config map for the current build config. |
| |
| This assumes self._run.config is a master config. |
| |
| Args: |
| important_only: If True, only get important slaves. |
| |
| Returns: |
| A map of slave_name to slave_config for the current master. |
| |
| Raises: |
| See config_lib.Config.GetSlaveConfigMapForMaster for details. |
| """ |
| |
| slave_config_map = self._run.site_config.GetSlaveConfigMapForMaster( |
| self._run.config, self._run.options, important_only=important_only |
| ) |
| if important_only: |
| experimental_builders = ( |
| self._run.attrs.metadata.GetValueWithDefault( |
| constants.METADATA_EXPERIMENTAL_BUILDERS, [] |
| ) |
| ) |
| slave_config_map = { |
| k: v |
| for k, v in slave_config_map.items() |
| if k not in experimental_builders |
| } |
| return slave_config_map |
| |
| def _BeginStepForBuildbot(self, tag=None) -> None: |
| """Called before a stage is performed. |
| |
| Args: |
| tag: Extra tag to add to the stage name on the waterfall. |
| """ |
| waterfall_name = self.name |
| if tag is not None: |
| waterfall_name += tag |
| cbuildbot_alerts.PrintBuildbotStepName(waterfall_name) |
| |
| self._PrintLoudly( |
| "Start Stage %s - %s\n\n%s" |
| % (self.name, cros_build_lib.UserDateTimeFormat(), self.__doc__) |
| ) |
| |
| def Finish(self) -> None: |
| """Called after a stage has already completed. |
| |
| Will be called on both success or failure. EXPECTIONS WILL BE LOGGED AND |
| IGNORED, and will not fail the stage. |
| |
| This is an appropriate place for non-essential cleanup/reporting work. |
| """ |
| |
| def WaitUntilReady(self): |
| """Wait until all the preconditions for the stage are satisfied. |
| |
| Can be overridden by stages. If it returns True, trigger the run |
| of PerformStage; else, skip this stage. |
| |
| Returns: |
| By default it just returns True. Subclass can override it |
| to return the boolean indicating if Wait succeeds and |
| if PerformStage should be run |
| """ |
| return True |
| |
| def PerformStage(self) -> None: |
| """Run the actual commands needed for this stage. |
| |
| Subclassed stages must override this function. |
| """ |
| |
| @staticmethod |
| def _StringifyException(exc_info): |
| """Convert an exception into a string. |
| |
| Args: |
| exc_info: A (type, value, traceback) tuple as returned by |
| sys.exc_info(). |
| |
| Returns: |
| A string description of the exception. |
| """ |
| exc_type, exc_value = exc_info[:2] |
| if issubclass(exc_type, failures_lib.StepFailure): |
| return str(exc_value) |
| else: |
| return "".join(traceback.format_exception(*exc_info)) |
| |
| @classmethod |
| def _HandleExceptionAsWarning(cls, exc_info, retrying=False): |
| """Use over HandleStageException to treat an exception as a warning. |
| |
| This is used by the ForgivingBuilderStage's to treat any exceptions as |
| warnings instead of stage failures. |
| """ |
| description = cls._StringifyException(exc_info) |
| cbuildbot_alerts.PrintBuildbotStepWarnings() |
| logging.warning(description) |
| return (results_lib.Results.FORGIVEN, description, retrying) |
| |
| @classmethod |
| def _HandleExceptionAsError(cls, exc_info): |
| """Handle an exception as an error, but ignore stage retry settings. |
| |
| Meant as a helper for _HandleStageException code only. |
| |
| Args: |
| exc_info: A (type, value, traceback) tuple as returned by |
| sys.exc_info(). |
| |
| Returns: |
| Result tuple of (exception, description, retrying). |
| """ |
| # Tell the user about the exception, and record it. |
| retrying = False |
| description = cls._StringifyException(exc_info) |
| cbuildbot_alerts.PrintBuildbotStepFailure() |
| logging.error(description) |
| return (exc_info[1], description, retrying) |
| |
| def _HandleStageException(self, exc_info): |
| """Called when PerformStage throws an exception. |
| |
| Can be overridden. |
| |
| Args: |
| exc_info: A (type, value, traceback) tuple as returned by |
| sys.exc_info(). |
| |
| Returns: |
| Result tuple of (exception, description, retrying). If it isn't an |
| exception, then description will be None. |
| """ |
| if ( |
| self._attempt |
| and self._max_retry |
| and self._attempt <= self._max_retry |
| ): |
| return self._HandleExceptionAsWarning(exc_info, retrying=True) |
| else: |
| return self._HandleExceptionAsError(exc_info) |
| |
| def _TopHandleStageException(self): |
| """Called when PerformStage throws an unhandled exception. |
| |
| Should only be called by the Run function. Provides a wrapper around |
| _HandleStageException to handle buggy handlers. We must go deeper... |
| """ |
| exc_info = sys.exc_info() |
| try: |
| return self._HandleStageException(exc_info) |
| except Exception: |
| logging.error( |
| "An exception was thrown while running _HandleStageException" |
| ) |
| logging.error("The original exception was:", exc_info=exc_info) |
| logging.error("The new exception is:", exc_info=True) |
| return self._HandleExceptionAsError(exc_info) |
| |
| def HandleSkip(self) -> None: |
| """Run if the stage is skipped.""" |
| # This is a hook used by some subclasses. |
| |
| def _RecordResult(self, *args, **kwargs) -> None: |
| """Record a successful or failed result.""" |
| results_lib.Results.Record(*args, **kwargs) |
| |
| def _ShouldSkipStage(self): |
| """Decide if we were requested to skip this stage.""" |
| return ( |
| self.option_name |
| and not getattr(self._run.options, self.option_name) |
| or self.config_name |
| and not getattr(self._run.config, self.config_name) |
| ) |
| |
| def Run(self) -> None: |
| """Have the builder execute the stage.""" |
| skip_stage = self._ShouldSkipStage() |
| previous_record = results_lib.Results.PreviouslyCompletedRecord( |
| self.name |
| ) |
| if skip_stage: |
| # We do not log the beginning of a skipped stage. |
| pass |
| elif previous_record is not None: |
| self._BeginStepForBuildbot(" : [PREVIOUSLY PROCESSED]") |
| else: |
| self._BeginStepForBuildbot() |
| |
| # Set default values |
| result = None |
| cidb_result = None |
| description = None |
| board = "" |
| elapsed_time = None |
| start_time = time.time() |
| try: |
| if skip_stage: |
| self._StartBuildStageInCIDB() |
| self.HandleSkip() |
| result = results_lib.Results.SKIPPED |
| return |
| |
| if previous_record: |
| self._StartBuildStageInCIDB() |
| self._PrintLoudly("Stage %s processed previously" % self.name) |
| self.HandleSkip() |
| # Success is stored in the results log for a stage that |
| # completed successfully in a previous run. But, we report the |
| # truth to CIDB. |
| result = results_lib.Results.SUCCESS |
| cidb_result = constants.BUILDER_STATUS_SKIPPED |
| # Copy over metadata from the previous record. instead of |
| # returning metadata about the current run. |
| board = previous_record.board |
| elapsed_time = float(previous_record.time) |
| return |
| |
| self._WaitBuildStageInCIDB() |
| ready = self.WaitUntilReady() |
| if not ready: |
| self._PrintLoudly( |
| "Stage %s precondition failed while waiting to start." |
| % self.name |
| ) |
| # If WaitUntilReady is false, mark stage as skipped in Results |
| # and CIDB |
| result = results_lib.Results.SKIPPED |
| return |
| |
| # Ready to start, mark buildStage as inflight in CIDB |
| self._Print( |
| "Preconditions for the stage successfully met. " |
| "Beginning to execute stage..." |
| ) |
| self._StartBuildStageInCIDB() |
| |
| start_time = time.time() |
| sys.stdout.flush() |
| sys.stderr.flush() |
| # TODO(davidjames): Verify that PerformStage always returns None. |
| # See crbug.com/264781 |
| self.PerformStage() |
| result = results_lib.Results.SUCCESS |
| except SystemExit as e: |
| if e.code != 0: |
| result, description, _ = self._TopHandleStageException() |
| |
| raise |
| except Exception as e: |
| if isinstance(e, failures_lib.ExitEarlyException): |
| # One stage finished and exited early, not a failure. |
| result = results_lib.Results.SUCCESS |
| raise |
| |
| # Tell the build bot this step failed for the waterfall. |
| result, description, retrying = self._TopHandleStageException() |
| if result not in ( |
| results_lib.Results.FORGIVEN, |
| results_lib.Results.SUCCESS, |
| ): |
| if isinstance(e, failures_lib.StepFailure): |
| raise |
| else: |
| raise failures_lib.StepFailure() |
| elif retrying: |
| raise failures_lib.RetriableStepFailure() |
| except BaseException: |
| result, description, _ = self._TopHandleStageException() |
| raise |
| finally: |
| # Some cases explicitly set a cidb status. For others, infer. |
| if cidb_result is None: |
| cidb_result = self._TranslateResultToCIDBStatus(result) |
| if elapsed_time is None: |
| elapsed_time = time.time() - start_time |
| |
| self._RecordResult( |
| self.name, |
| result, |
| description, |
| prefix=self._prefix, |
| board=board, |
| time=elapsed_time, |
| build_stage_id=None, |
| ) |
| self._FinishBuildStageInCIDBAndMonarch( |
| result, cidb_result, elapsed_time |
| ) |
| |
| try: |
| self.Finish() |
| except Exception as e: |
| # Failures here are OUTSIDE of the stage and not handled well. |
| # Log and continue with the assumption that the ReportStage will |
| # re-upload this data or report a failure correctly. |
| logging.warning("IGNORED: Finish failure: %s", e) |
| |
| self._PrintLoudly( |
| "Finished Stage %s - %s" |
| % (self.name, cros_build_lib.UserDateTimeFormat()) |
| ) |
| |
| sys.stdout.flush() |
| sys.stderr.flush() |
| |
| |
| class ForgivingBuilderStage(BuilderStage): |
| """Build stage that turns a build step red but not a build.""" |
| |
| def _HandleStageException(self, exc_info): |
| """Override and don't set status to FAIL but FORGIVEN instead.""" |
| return self._HandleExceptionAsWarning(exc_info) |
| |
| |
| class BoardSpecificBuilderStage(BuilderStage): |
| """Builder stage that is specific to a board. |
| |
| The following attributes are provided on self: |
| _current_board: The active board for this stage. |
| board_runattrs: BoardRunAttributes object for this stage. |
| """ |
| |
| def __init__( |
| self, builder_run, buildstore, board, suffix=None, **kwargs |
| ) -> None: |
| if not isinstance(board, str): |
| raise TypeError("Expected string, got %r" % (board,)) |
| |
| self._current_board = board |
| |
| self.board_runattrs = builder_run.GetBoardRunAttrs(board) |
| |
| # Add a board name suffix to differentiate between various boards (in |
| # case more than one board is built on a single builder.) |
| if len(builder_run.config.boards) > 1 or builder_run.config.grouped: |
| suffix = self.UpdateSuffix(board, suffix) |
| |
| super().__init__(builder_run, buildstore, suffix=suffix, **kwargs) |
| |
| def _RecordResult(self, *args, **kwargs) -> None: |
| """Record a successful or failed result.""" |
| kwargs.setdefault("board", self._current_board) |
| super()._RecordResult(*args, **kwargs) |
| |
| def _InsertBuildStageInCIDB( |
| self, name, board=None, status=constants.BUILDER_STATUS_PLANNED |
| ) -> None: |
| """Insert a build stage in cidb.""" |
| if not board: |
| board = self._current_board |
| super()._InsertBuildStageInCIDB(name, board, status) |
| |
| def GetListOfPackagesToBuild(self): |
| """Returns a list of packages to build.""" |
| |
| # If we have defined explicit packages to build for ChromeOS Findit |
| # integration, add those to the list of _run.config.packages to build. |
| packages = [] |
| if self._run.options.cbb_build_packages: |
| logging.info( |
| "Adding list of packages to build for ChromeOS Findit: %s", |
| self._run.options.cbb_build_packages, |
| ) |
| packages += self._run.options.cbb_build_packages |
| if self._run.config.packages: |
| packages += self._run.config.packages |
| |
| # Short circuit if there are any Findit or explicit builds specified. |
| if packages: |
| return packages |
| |
| # TODO: the logic below is duplicated from the build_packages |
| # script. Once we switch to `cros build`, we should consolidate |
| # the logic in a shared location. |
| packages += [constants.TARGET_OS_PKG] |
| # Build Dev packages by default. |
| packages += [constants.TARGET_OS_DEV_PKG] |
| # Build test packages by default. |
| packages += [constants.TARGET_OS_TEST_PKG] |
| # Build factory packages if requested by config. |
| if self._run.config.factory: |
| packages += [ |
| "virtual/target-os-factory", |
| "virtual/target-os-factory-shim", |
| ] |
| |
| if self._run.ShouldBuildAutotest(): |
| packages += ["chromeos-base/autotest-all"] |
| |
| return packages |
| |
| def GetParallel(self, board_attr, timeout=None, pretty_name=None): |
| """Wait for given |board_attr| to show up. |
| |
| Args: |
| board_attr: A valid board runattribute name. |
| timeout: Timeout in seconds. None value means wait forever. |
| pretty_name: Optional name to use instead of raw board_attr in log |
| messages. |
| |
| Returns: |
| Value of board_attr found. |
| |
| Raises: |
| AttrTimeoutError if timeout occurs. |
| """ |
| timeout_str = "forever" |
| if timeout is not None: |
| timeout_str = "%d minutes" % int((timeout / 60.0) + 0.5) |
| |
| if pretty_name is None: |
| pretty_name = board_attr |
| |
| logging.info("Waiting up to %s for %s ...", timeout_str, pretty_name) |
| return self.board_runattrs.GetParallel(board_attr, timeout=timeout) |
| |
| def GetImageDirSymlink(self, pointer="latest-cbuildbot", buildroot=None): |
| """Get the location of the current image.""" |
| |
| if not buildroot: |
| buildroot = self._run.buildroot |
| |
| return os.path.join( |
| buildroot, "src", "build", "images", self._current_board, pointer |
| ) |
| |
| |
| class ArchivingStageMixin: |
| """Stage with utilities for uploading artifacts. |
| |
| This provides functionality for doing archiving. All it needs is access |
| to the BuilderRun object at self._run. No __init__ needed. |
| |
| Attributes: |
| acl: GS ACL to use for uploads. |
| archive: Archive object. |
| archive_path: Local path where archives are kept for this run. Also |
| copy of self.archive.archive_path. |
| download_url: The URL where artifacts for this run can be downloaded. |
| Also copy of self.archive.download_url. |
| upload_url: The Google Storage location where artifacts for this run |
| should be uploaded. Also copy of self.archive.upload_url. |
| version: Copy of self.archive.version. |
| """ |
| |
| PROCESSES = 10 |
| |
| @property |
| def archive(self): |
| """Retrieve the Archive object to use.""" |
| # pylint: disable=attribute-defined-outside-init |
| if not hasattr(self, "_archive"): |
| self._archive = self._run.GetArchive() |
| |
| return self._archive |
| |
| @property |
| def acl(self): |
| """Retrieve GS ACL to use for uploads.""" |
| return self.archive.upload_acl |
| |
| # TODO(mtennant): Get rid of this property. |
| @property |
| def version(self): |
| """Retrieve the ChromeOS version for the archiving.""" |
| return self.archive.version |
| |
| @property |
| def archive_path(self): |
| """Local path where archives are kept for this run.""" |
| return self.archive.archive_path |
| |
| @property |
| def upload_url(self): |
| """The GS location where artifacts should be uploaded for this run.""" |
| return self.archive.upload_url |
| |
| @property |
| def download_url(self): |
| """The URL where artifacts for this run can be downloaded.""" |
| return self.archive.download_url |
| |
| @contextlib.contextmanager |
| def ArtifactUploader(self, queue=None, archive=True, strict=True): |
| """Upload each queued input in the background. |
| |
| This context manager starts a set of workers in the background, who each |
| wait for input on the specified queue. These workers run |
| self.UploadArtifact(*args, archive=archive) for each input in the queue. |
| |
| Args: |
| queue: Queue to use. Add artifacts to this queue, and they will be |
| uploaded in the background. If None, one will be created on the |
| fly. |
| archive: Whether to automatically copy files to the archive dir. |
| strict: Whether to treat upload errors as fatal. |
| |
| Returns: |
| The queue to use. This is only useful if you did not supply a queue. |
| """ |
| upload = lambda path: self.UploadArtifact(path, archive, strict) |
| with parallel.BackgroundTaskRunner( |
| upload, queue=queue, processes=self.PROCESSES |
| ) as bg_queue: |
| yield bg_queue |
| |
| def PrintDownloadLink(self, filename, prefix="", text_to_display=None): |
| """Log a link to an artifact in Google Storage and return the URL. |
| |
| Args: |
| filename: The filename of the uploaded file. |
| prefix: The prefix to put in front of the filename. |
| text_to_display: Text to display. If None, use |prefix| + |
| |filename|. |
| |
| Returns: |
| The download URL. |
| """ |
| url = "%s/%s" % (self.download_url.rstrip("/"), filename) |
| if not text_to_display: |
| text_to_display = "%s%s" % (prefix, filename) |
| cbuildbot_alerts.PrintBuildbotLink(text_to_display, url) |
| return url |
| |
| def _MayBeUploaded(self, filename): |
| """Check if this file is allowed to go into a board's extra buckets. |
| |
| Args: |
| filename: The filename of the file we want to check. |
| |
| Returns: |
| True if the file may be uploaded, False otherwise. |
| """ |
| return not any( |
| fnmatch.fnmatch(filename, x) |
| for x in constants.EXTRA_BUCKETS_FILES_BLOCKLIST |
| ) |
| |
| def _FilterBuildFromMoblab(self, url, bot_id): |
| """Deteminine if this is a build that should not be copied to moblab. |
| |
| Args: |
| url: The gs url of the target bucket. |
| bot_id: The name of the bot |
| |
| Returns: |
| True is the build should not be copied to this moblab url |
| """ |
| bot_filter_list = [ |
| "paladin", |
| "trybot", |
| "pfq", |
| "pre-cq", |
| "tryjob", |
| "postsubmit", |
| ] |
| # Jetstream are using the moblab buckets as a general |
| # build distribution system, not related to moblab. |
| # This code ensutes that jetstream is left in the |
| # prior behavior, where moblab users now use an on demand |
| # copy via an API vs the "copy everything" approach. |
| if "chromeos-moblab-jetstream" in url: |
| return any(bot_filter in bot_id for bot_filter in bot_filter_list) |
| return True |
| |
| def _GetUploadUrls(self, filename, builder_run=None, prefix=None): |
| """Returns a list of all urls for which to upload filename to. |
| |
| Args: |
| filename: The filename of the file we want to upload. |
| builder_run: builder_run object from which to get the board, base |
| upload url, and bot_id. If none, this stage's values. |
| prefix: When not None, add an additional directory prefix by this |
| value. |
| """ |
| board = None |
| urls = [self.upload_url] |
| bot_id = self._bot_id |
| if builder_run: |
| urls = [builder_run.GetArchive().upload_url] |
| bot_id = builder_run.GetArchive().bot_id |
| if ( |
| builder_run.config["boards"] |
| and len(builder_run.config["boards"]) == 1 |
| ): |
| board = builder_run.config["boards"][0] |
| if self._MayBeUploaded(filename) and ( |
| hasattr(self, "_current_board") or board |
| ): |
| board = board or self._current_board |
| custom_artifacts_file = portage_util.ReadOverlayFile( |
| "scripts/artifacts.json", |
| board=board, |
| buildroot=self._build_root, |
| ) |
| if custom_artifacts_file is not None: |
| json_file = json.loads(custom_artifacts_file) |
| for url in json_file.get("extra_upload_urls", []): |
| # Moblab users do not need the paladin, pfq or trybot |
| # builds, filter those bots from extra uploads. |
| if self._FilterBuildFromMoblab(url, bot_id): |
| continue |
| urls.append("/".join([url, bot_id, self.version])) |
| if prefix: |
| urls = [u + "/" + prefix for u in urls] |
| return urls |
| |
| @failures_lib.SetFailureType(failures_lib.InfrastructureFailure) |
| def UploadArtifact( |
| self, path, archive=True, strict=True, prefix=None |
| ) -> None: |
| """Upload generated artifact to Google Storage. |
| |
| Args: |
| path: Path of local file to upload to Google Storage if |archive| is |
| True. Otherwise, this is the name of the file in |
| self.archive_path. |
| archive: Whether to automatically copy files to the archive dir. |
| strict: Whether to treat upload errors as fatal. |
| prefix: When not None, add an additional directory prefix by this |
| value. |
| """ |
| filename = path |
| if archive: |
| filename = commands.ArchiveFile(path, self.archive_path) |
| upload_urls = self._GetUploadUrls(filename, prefix=prefix) |
| try: |
| commands.UploadArchivedFile( |
| self.archive_path, |
| upload_urls, |
| filename, |
| self._run.options.debug_forced, |
| update_list=True, |
| acl=self.acl, |
| ) |
| except failures_lib.GSUploadFailure as e: |
| cbuildbot_alerts.PrintBuildbotStepText("Upload failed") |
| if e.HasFatalFailure( |
| exempt_exception_list=[ |
| gs.GSContextException, |
| timeout_util.TimeoutError, |
| ] |
| ): |
| raise |
| elif strict: |
| raise |
| else: |
| # Treat gsutil flake as a warning if it's the only problem. |
| self._HandleExceptionAsWarning(sys.exc_info()) |
| |
| @failures_lib.SetFailureType(failures_lib.InfrastructureFailure) |
| def UploadMetadata( |
| self, upload_queue=None, filename=constants.METADATA_JSON |
| ): |
| """Create & upload JSON file of the builder run's metadata, and to cidb. |
| |
| This uses the existing metadata stored in the builder run. The default |
| metadata.json file should only be uploaded once, at the end of the run, |
| and considered immutable. During the build, intermediate metadata |
| snapshots can be uploaded to other files, such as partial-metadata.json. |
| |
| This method also updates the metadata in the cidb database, if there is |
| a valid cidb connection set up. |
| |
| Args: |
| upload_queue: If specified then put the artifact file to upload on |
| this queue. If None then upload it directly now. |
| filename: Name of file to dump metadata to. Defaults to |
| constants.METADATA_JSON |
| |
| Returns: |
| If upload was successful or not |
| """ |
| metadata_json = os.path.join(self.archive_path, filename) |
| |
| # Stages may run in parallel, so we have to do atomic updates on this. |
| logging.info("Writing metadata to %s.", metadata_json) |
| osutils.WriteFile( |
| metadata_json, |
| self._run.attrs.metadata.GetJSON(), |
| atomic=True, |
| makedirs=True, |
| ) |
| |
| if upload_queue is not None: |
| logging.info( |
| "Adding metadata file %s to upload queue.", metadata_json |
| ) |
| upload_queue.put([filename]) |
| else: |
| logging.info("Uploading metadata file %s now.", metadata_json) |
| self.UploadArtifact(filename, archive=False) |
| |
| build_identifier, _ = self._run.GetCIDBHandle() |
| build_id = build_identifier.cidb_id |
| if self.buildstore.AreClientsReady(): |
| logging.info( |
| "Writing updated metadata to database for build_id %s.", |
| build_id, |
| ) |
| self.buildstore.UpdateMetadata(build_id, self._run.attrs.metadata) |
| else: |
| logging.info("Skipping database update, no database or build_id.") |
| return False |
| return True |