| # This file defines a workflow that runs the libc++ benchmarks when a comment is added to the PR. |
| # |
| # The comment is of the form: |
| # |
| # /libcxx-bot benchmark <path-to-benchmarks-to-run> |
| # |
| # That will cause the specified benchmarks to be run on the PR and on the pull-request target, and |
| # their results to be compared. |
| |
| name: Benchmark libc++ |
| |
| permissions: |
| contents: read |
| |
| on: |
| issue_comment: |
| types: |
| - created |
| - edited |
| |
| env: |
| CC: clang-22 |
| CXX: clang++-22 |
| |
| jobs: |
| run-benchmarks: |
| permissions: |
| pull-requests: write |
| |
| if: >- |
| github.event.issue.pull_request && |
| contains(github.event.comment.body, '/libcxx-bot benchmark') |
| |
| runs-on: llvm-premerge-libcxx-next-runners # TODO: This should run on a dedicated set of machines |
| steps: |
| - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 |
| with: |
| python-version: '3.14' |
| |
| - name: Setup virtual environment |
| run: | |
| python3 -m venv .venv |
| source .venv/bin/activate |
| python -m pip install pygithub==2.8.1 |
| |
| - name: Extract information from the PR |
| id: vars |
| env: |
| COMMENT_BODY: ${{ github.event.comment.body }} |
| run: | |
| source .venv/bin/activate |
| cat <<EOF | python >> ${GITHUB_OUTPUT} |
| import github |
| repo = github.Github(auth=github.Auth.Token("${{ github.token }}")).get_repo("${{ github.repository }}") |
| pr = repo.get_pull(${{ github.event.issue.number }}) |
| print(f"pr_base={pr.base.sha}") |
| print(f"pr_head={pr.head.sha}") |
| EOF |
| BENCHMARKS=$(echo "$COMMENT_BODY" | sed -nE 's/\/libcxx-bot benchmark (.+)/\1/p') |
| echo "benchmarks=${BENCHMARKS}" >> ${GITHUB_OUTPUT} |
| |
| - name: Update comment with link to the job |
| run: | |
| source .venv/bin/activate |
| cat <<EOF | python |
| import github |
| repo = github.Github(auth=github.Auth.Token("${{ github.token }}")).get_repo("${{ github.repository }}") |
| pr = repo.get_pull(${{ github.event.issue.number }}) |
| comment = pr.get_issue_comment(${{ github.event.comment.id }}) |
| add_text = "> _Running benchmarks in ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ job.check_run_id }}_" |
| comment.edit('\n\n'.join([comment.body, add_text])) |
| EOF |
| |
| - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 |
| with: |
| persist-credentials: false |
| ref: ${{ steps.vars.outputs.pr_head }} |
| fetch-depth: 0 |
| fetch-tags: true # This job requires access to all the Git branches so it can diff against (usually) main |
| path: repo # Avoid nuking the workspace, where we have the Python virtualenv |
| |
| - name: Build and run baseline |
| env: |
| BENCHMARKS: ${{ steps.vars.outputs.benchmarks }} |
| run: | |
| source .venv/bin/activate && cd repo |
| python -m pip install -r libcxx/utils/requirements.txt |
| baseline_commit=$(git merge-base ${{ steps.vars.outputs.pr_base }} ${{ steps.vars.outputs.pr_head }}) |
| ./libcxx/utils/build-at-commit --commit ${baseline_commit} --install-dir install/baseline -- -DCMAKE_BUILD_TYPE=RelWithDebInfo |
| ./libcxx/utils/test-at-commit --libcxx-installation install/baseline -B benchmarks/baseline -- -sv -j1 --param optimization=speed "$BENCHMARKS" |
| ./libcxx/utils/consolidate-benchmarks benchmarks/baseline | tee baseline.lnt |
| |
| - name: Build and run candidate |
| env: |
| BENCHMARKS: ${{ steps.vars.outputs.benchmarks }} |
| run: | |
| source .venv/bin/activate && cd repo |
| ./libcxx/utils/build-at-commit --commit ${{ steps.vars.outputs.pr_head }} --install-dir install/candidate -- -DCMAKE_BUILD_TYPE=RelWithDebInfo |
| ./libcxx/utils/test-at-commit --libcxx-installation install/candidate -B benchmarks/candidate -- -sv -j1 --param optimization=speed "$BENCHMARKS" |
| ./libcxx/utils/consolidate-benchmarks benchmarks/candidate | tee candidate.lnt |
| |
| - name: Compare baseline and candidate runs |
| run: | |
| source .venv/bin/activate && cd repo |
| ./libcxx/utils/compare-benchmarks baseline.lnt candidate.lnt | tee results.txt |
| |
| - name: Update comment with results |
| run: | |
| source .venv/bin/activate && cd repo |
| cat <<EOF | python |
| import github |
| repo = github.Github(auth=github.Auth.Token("${{ github.token }}")).get_repo("${{ github.repository }}") |
| pr = repo.get_pull(${{ github.event.issue.number }}) |
| comment = pr.get_issue_comment(${{ github.event.comment.id }}) |
| with open('results.txt', 'r') as f: |
| benchmark_results = f.read() |
| |
| new_comment_text = f""" |
| {comment.body} |
| |
| <details> |
| <summary> |
| Benchmark results: |
| </summary> |
| |
| \`\`\` |
| {benchmark_results} |
| \`\`\` |
| |
| </details> |
| """ |
| |
| comment.edit(new_comment_text) |
| EOF |