From 50240fc74b26e8cc75ed09876dfb8b3835f462ba Mon Sep 17 00:00:00 2001
From: Saroj <43822041+sarojsarab@users.noreply.github.com>
Date: Fri, 7 Jul 2023 12:26:55 +0530
Subject: [PATCH] ci: Add command to run ci-test-limited (#25180)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
## Description
- Add command to run ci-test-limited
#### PR fixes following issue(s)
Fixes # (issue number)
> if no issue exists, please create an issue and ask the maintainers
about this first
>
>
#### Media
> A video or a GIF is preferred. when using Loom, donβt embed because it
looks like itβs a GIF. instead, just link to the video
>
>
#### Type of change
> Please delete options that are not relevant.
- Bug fix (non-breaking change which fixes an issue)
- New feature (non-breaking change which adds functionality)
- Breaking change (fix or feature that would cause existing
functionality to not work as expected)
- Chore (housekeeping or task changes that don't impact user perception)
- This change requires a documentation update
>
>
>
## Testing
>
#### How Has This Been Tested?
> Please describe the tests that you ran to verify your changes. Also
list any relevant details for your test configuration.
> Delete anything that is not relevant
- [ ] Manual
- [ ] Jest
- [ ] Cypress
>
>
#### Test Plan
> Add Testsmith test cases links that relate to this PR
>
>
#### Issues raised during DP testing
> Link issues raised during DP testing for better visiblity and tracking
(copy link from comments dropped on this PR)
>
>
>
## Checklist:
#### Dev activity
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my
feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] PR is being merged under a feature flag
#### QA activity:
- [ ] [Speedbreak
features](https://github.com/appsmithorg/TestSmith/wiki/Guidelines-for-test-plans#speedbreakers-)
have been covered
- [ ] Test plan covers all impacted features and [areas of
interest](https://github.com/appsmithorg/TestSmith/wiki/Guidelines-for-test-plans#areas-of-interest-)
- [ ] Test plan has been peer reviewed by project stakeholders and other
QA members
- [ ] Manually tested functionality on DP
- [ ] We had an implementation alignment call with stakeholders post QA
Round 2
- [ ] Cypress test cases have been added and approved by SDET/manual QA
- [ ] Added `Test Plan Approved` label after Cypress tests were reviewed
- [ ] Added `Test Plan Approved` label after JUnit tests were reviewed
---
.github/workflows/build-client-server.yml | 288 ++++++++++++++++--
.github/workflows/ci-test-limited-command.yml | 22 ++
.github/workflows/ci-test-limited.yml | 45 ++-
3 files changed, 329 insertions(+), 26 deletions(-)
create mode 100644 .github/workflows/ci-test-limited-command.yml
diff --git a/.github/workflows/build-client-server.yml b/.github/workflows/build-client-server.yml
index 6ba3c6020d..6bc5d71b9b 100644
--- a/.github/workflows/build-client-server.yml
+++ b/.github/workflows/build-client-server.yml
@@ -1,6 +1,8 @@
name: Build Client, Server & Run only Cypress
on:
+ repository_dispatch:
+ types: [ ci-test-limit-command ]
# This workflow can be triggered manually from the GitHub Actions page
workflow_dispatch:
inputs:
@@ -17,11 +19,42 @@ jobs:
outputs:
non_ts_files: ${{ steps.check_files.outputs.non_ts_files }}
non_ts_files_count: ${{ steps.check_files.outputs.non_ts_files_count }}
+ pr: ${{steps.args.outputs.pr}}
+ runId: ${{steps.args.outputs.runId}}
steps:
- name: Checkout the head commit of the branch
uses: actions/checkout@v3
with:
fetch-depth: 0
+
+ - name: Get the PR number if workflow is triggered manually
+ id: fetch_pr
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ res=`curl -s -H "Authorization: Bearer ${{ secrets.APPSMITH_CI_TEST_PAT}}" https://api.github.com/repos/${{ github.repository }}/pulls?head=appsmithorg:${{ github.ref_name }}`
+ response_length=`echo "$res" | jq -r 'length'`
+ if [[ $response_length -ne 0 ]]; then
+ pr_number=`echo $res | jq -r '.[0] | .number'`
+ echo "pr=$pr_number" >> $GITHUB_OUTPUT
+ else
+ echo "pr=0" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Set args
+ id: args
+ run: |
+ if [[ ${{github.event_name}} == 'repository_dispatch' ]]; then
+ echo "pr=${{ github.event.client_payload.pull_request.number }}" >> $GITHUB_OUTPUT
+ checkArg=`echo '${{toJSON(github.event.client_payload.slash_command.args.named)}}' | jq 'has("runId")'`
+ if [[ $checkArg == 'true' ]]; then
+ echo "runId=${{ github.event.client_payload.slash_command.args.named.runId }}" >> $GITHUB_OUTPUT
+ else
+ echo "runId=0" >> $GITHUB_OUTPUT
+ fi
+ else
+ echo "runId=${{ inputs.previous_run_id }}" >> $GITHUB_OUTPUT
+ echo "pr=${{ steps.fetch_pr.outputs.pr }}" >> $GITHUB_OUTPUT
+ fi
- name: Get the diff from base branch
continue-on-error: true
@@ -39,74 +72,293 @@ jobs:
non_ts_files=()
for file in "${files[@]}"; do
if [[ $file != *.ts ]]; then
- non_ts_files+=("$file")
+ non_ts_files+=("
$file")
fi
done
echo "non_ts_files=${non_ts_files[@]}" >> $GITHUB_OUTPUT
echo "non_ts_files_count=${#non_ts_files[@]}" >> $GITHUB_OUTPUT
- name: Print the files
- if: steps.check_files.outputs.non_ts_files_count != 0
+ if: steps.check_files.outputs.non_ts_files_count != 0 && steps.args.outputs.pr == '0'
run: |
echo "${{ steps.check_files.outputs.non_ts_files }}"
- exit 1
+
+ - name: Comment the filenames if PR is there
+ if: steps.check_files.outputs.non_ts_files_count != 0 && steps.args.outputs.pr != '0'
+ uses: peter-evans/create-or-update-comment@v1
+ with:
+ issue-number: ${{ fromJson(steps.args.outputs.pr) }}
+ body: |
+ Below new test files are written in js π΄
+ Expected format ts. Please fix and retrigger ci-test-limit:
+ ${{ steps.check_files.outputs.non_ts_files }}
+ - if: steps.check_files.outputs.non_ts_files_count != 0
+ run: exit 1
+
+ - name: Add a comment on the PR with link to workflow run
+ if: success() && steps.args.outputs.pr != '0'
+ uses: peter-evans/create-or-update-comment@v2
+ with:
+ issue-number: ${{ fromJson(steps.args.outputs.pr) }}
+ body: |
+ Tests running at: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>.
+ Workflow: `ci-test-limited`.
+ PR: ${{ fromJson(steps.args.outputs.pr) }}.
server-build:
name: server-build
needs: [file-check]
- if: success() && inputs.previous_run_id == '0'
+ if: success() && needs.file-check.outputs.runId == '0'
uses: ./.github/workflows/server-build.yml
secrets: inherit
with:
- pr: 0
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
skip-tests: "true"
client-build:
name: client-build
needs: [file-check]
- if: success() && inputs.previous_run_id == '0'
+ if: success() && needs.file-check.outputs.runId == '0'
uses: ./.github/workflows/client-build.yml
secrets: inherit
with:
- pr: 0
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
skip-tests: "true"
rts-build:
name: rts-build
needs: [file-check]
- if: success() && inputs.previous_run_id == '0'
+ if: success() && needs.file-check.outputs.runId == '0'
uses: ./.github/workflows/rts-build.yml
secrets: inherit
with:
- pr: 0
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
build-docker-image:
- needs: [ client-build, server-build, rts-build ]
+ needs: [ file-check, client-build, server-build, rts-build ]
# Only run if the build step is successful
- if: success() && inputs.previous_run_id == '0'
+ if: success() && needs.file-check.outputs.runId == '0'
name: build-docker-image
uses: ./.github/workflows/build-docker-image.yml
secrets: inherit
with:
- pr: 0
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
ci-test-limited:
- needs: [ build-docker-image ]
+ needs: [ file-check, build-docker-image ]
# Only run if the build step is successful
- if: success() && inputs.previous_run_id == '0'
+ if: success() && needs.file-check.outputs.runId == '0'
name: ci-test-limited
uses: ./.github/workflows/ci-test-limited.yml
secrets: inherit
with:
- pr: 0
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
ci-test-limited-existing-docker-image:
needs: [file-check]
# Only run if the previous run-id is provided
- if: success() && inputs.previous_run_id != '0'
+ if: success() && needs.file-check.outputs.runId != '0'
name: ci-test-limited-existing-image
uses: ./.github/workflows/ci-test-limited.yml
secrets: inherit
with:
- pr: 0
- previous-workflow-run-id: ${{ fromJson(inputs.previous_run_id) }}
+ pr: ${{fromJson(needs.file-check.outputs.pr)}}
+ previous-workflow-run-id: ${{ fromJson(needs.file-check.outputs.runId) }}
+
+ ci-test-result:
+ needs: [ file-check, ci-test-limited ]
+ # Only run if the ci-test-limited with matrices step is successful
+ if: always()
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash
+ steps:
+ # Deleting the existing dir's if any
+ - name: Delete existing directories
+ if: needs.ci-test-limited.result != 'success'
+ run: |
+ rm -f ~/failed_spec_ci
+ rm -f ~/combined_failed_spec_ci
+
+ # Force store previous cypress dashboard url from cache
+ - name: Store the previous cypress dashboard url
+ if: success()
+ uses: actions/cache@v3
+ with:
+ path: |
+ ~/cypress_url
+ key: ${{ github.run_id }}-dashboard-url-${{ github.run_attempt }}
+ restore-keys: |
+ ${{ github.run_id }}-dashboard-url
+
+ - name: Print cypress dashboard url
+ id: dashboard_url
+ run: |
+ cypress_url=$(cat ~/cypress_url)
+ echo "dashboard_url=$cypress_url" >> $GITHUB_OUTPUT
+
+ # Download failed_spec list for all jobs
+ - uses: actions/download-artifact@v3
+ if: needs.ci-test-limited.result != 'success'
+ id: download_ci
+ with:
+ name: failed-spec-ci
+ path: ~/failed_spec_ci
+
+ # In case for any ci job failure, create combined failed spec
+ - name: "combine all specs for CI"
+ if: needs.ci-test-limited.result != 'success'
+ run: |
+ echo "Debugging: failed specs in ~/failed_spec_ci/failed_spec_ci*"
+ cat ~/failed_spec_ci/failed_spec_ci*
+ cat ~/failed_spec_ci/failed_spec_ci* | sort -u >> ~/combined_failed_spec_ci
+
+ # Upload combined failed CI spec list to a file
+ # This is done for debugging.
+ - name: upload combined failed spec
+ if: needs.ci-test-limited.result != 'success'
+ uses: actions/upload-artifact@v3
+ with:
+ name: combined_failed_spec_ci
+ path: ~/combined_failed_spec_ci
+
+ - name: Get Latest flaky Tests
+ shell: bash
+ run: |
+ curl --request POST --url https://yatin-s-workspace-jk8ru5.us-east-1.xata.sh/db/CypressKnownFailures:main/tables/CypressKnownFailuires/query --header 'Authorization: Bearer ${{ secrets.XATA_TOKEN }}' --header 'Content-Type: application/json'|jq -r |grep Spec|cut -d ':' -f 2 2> /dev/null|sed 's/"//g'|sed 's/,//g' > ~/knownfailures
+
+ # Verify CI test failures against known failures
+ - name: Verify CI test failures against known failures
+ if: needs.ci-test-limited.result != 'success'
+ shell: bash
+ run: |
+ new_failed_spec_env="$(comm -1 -3 <(sort ~/knownfailures) <(sort -u ~/combined_failed_spec_ci) | sed 's/|cypress|cypress/\n/g' | sed 's/^/- /')
"
+ echo "$new_failed_spec_env"
+ echo "new_failed_spec_env<> $GITHUB_ENV
+ echo "$new_failed_spec_env" >> $GITHUB_ENV
+ echo "EOF" >> $GITHUB_ENV
+
+ - name: Add a comment on the PR with new CI failures
+ if: needs.ci-test-limited.result != 'success'
+ uses: peter-evans/create-or-update-comment@v1
+ with:
+ issue-number: ${{fromJson(needs.file-check.outputs.pr)}}
+ body: |
+ Workflow run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>.
+ Cypress dashboard: Click here!
+ The following are new failures, please fix them before merging the PR: ${{env.new_failed_spec_env}}
+ To know the list of identified flaky tests - Refer here
+
+ - name: Add a comment on the PR when ci-test-limited is success
+ if: needs.ci-test-limited.result == 'success'
+ uses: peter-evans/create-or-update-comment@v1
+ with:
+ issue-number: ${{fromJson(needs.file-check.outputs.pr)}}
+ body: |
+ Workflow run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>.
+ Cypress dashboard url: Click here!
+ All cypress tests have passed πππ
+
+ - name: Check ci-test-limited set status
+ if: needs.ci-test-limited.result != 'success'
+ run: exit 1
+
+ ci-test-result-existing:
+ needs: [ file-check, ci-test-limited-existing-docker-image ]
+ # Only run if the ci-test-limited with matrices step is successful
+ if: needs.file-check.outputs.runId != '0'
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash
+ steps:
+ # Deleting the existing dir's if any
+ - name: Delete existing directories
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ run: |
+ rm -f ~/failed_spec_ci
+ rm -f ~/combined_failed_spec_ci
+
+ # Force store previous cypress dashboard url from cache
+ - name: Store the previous cypress dashboard url
+ if: success()
+ uses: actions/cache@v3
+ with:
+ path: |
+ ~/cypress_url
+ key: ${{ github.run_id }}-dashboard-url-${{ github.run_attempt }}
+ restore-keys: |
+ ${{ github.run_id }}-dashboard-url
+
+ - name: Print cypress dashboard url
+ id: dashboard_url
+ run: |
+ cypress_url=$(cat ~/cypress_url)
+ echo "dashboard_url=$cypress_url" >> $GITHUB_OUTPUT
+
+ # Download failed_spec list for all jobs
+ - uses: actions/download-artifact@v3
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ id: download_ci
+ with:
+ name: failed-spec-ci
+ path: ~/failed_spec_ci
+
+ # In case for any ci job failure, create combined failed spec
+ - name: "combine all specs for CI"
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ run: |
+ echo "Debugging: failed specs in ~/failed_spec_ci/failed_spec_ci*"
+ cat ~/failed_spec_ci/failed_spec_ci*
+ cat ~/failed_spec_ci/failed_spec_ci* | sort -u >> ~/combined_failed_spec_ci
+
+ # Upload combined failed CI spec list to a file
+ # This is done for debugging.
+ - name: upload combined failed spec
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ uses: actions/upload-artifact@v3
+ with:
+ name: combined_failed_spec_ci
+ path: ~/combined_failed_spec_ci
+
+ - name: Get Latest flaky Tests
+ shell: bash
+ run: |
+ curl --request POST --url https://yatin-s-workspace-jk8ru5.us-east-1.xata.sh/db/CypressKnownFailures:main/tables/CypressKnownFailuires/query --header 'Authorization: Bearer ${{ secrets.XATA_TOKEN }}' --header 'Content-Type: application/json'|jq -r |grep Spec|cut -d ':' -f 2 2> /dev/null|sed 's/"//g'|sed 's/,//g' > ~/knownfailures
+
+ # Verify CI test failures against known failures
+ - name: Verify CI test failures against known failures
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ shell: bash
+ run: |
+ new_failed_spec_env="$(comm -1 -3 <(sort ~/knownfailures) <(sort -u ~/combined_failed_spec_ci) | sed 's/|cypress|cypress/\n/g' | sed 's/^/- /')
"
+ echo "$new_failed_spec_env"
+ echo "new_failed_spec_env<> $GITHUB_ENV
+ echo "$new_failed_spec_env" >> $GITHUB_ENV
+ echo "EOF" >> $GITHUB_ENV
+
+ - name: Add a comment on the PR with new CI failures
+ if: needs.ci-test-limited-existing-docker-image.result != 'success' && needs.file-check.outputs.pr != '0'
+ uses: peter-evans/create-or-update-comment@v1
+ with:
+ issue-number: ${{fromJson(needs.file-check.outputs.pr)}}
+ body: |
+ Workflow run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>.
+ Cypress dashboard: Click here!
+ The following are new failures, please fix them before merging the PR: ${{env.new_failed_spec_env}}
+ To know the list of identified flaky tests - Refer here
+
+ - name: Add a comment on the PR when ci-test-limited-existing-docker-image is success
+ if: needs.ci-test-limited-existing-docker-image.result == 'success' && needs.file-check.outputs.pr != '0'
+ uses: peter-evans/create-or-update-comment@v1
+ with:
+ issue-number: ${{fromJson(needs.file-check.outputs.pr)}}
+ body: |
+ Workflow run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>.
+ Cypress dashboard url: Click here!
+ All cypress tests have passed πππ
+
+ - name: Check ci-test-limited-existing-docker-image set status
+ if: needs.ci-test-limited-existing-docker-image.result != 'success'
+ run: exit 1
diff --git a/.github/workflows/ci-test-limited-command.yml b/.github/workflows/ci-test-limited-command.yml
new file mode 100644
index 0000000000..b4e360842e
--- /dev/null
+++ b/.github/workflows/ci-test-limited-command.yml
@@ -0,0 +1,22 @@
+# If someone with write access comments "/ci-test-limit" on a pull request, emit a repository_dispatch event
+name: CI Test Limited Command
+
+on:
+ issue_comment:
+ types: [created]
+
+jobs:
+ ci-test-limit:
+ runs-on: ubuntu-latest
+ # Only run for PRs, not issue comments
+ if: |
+ github.event.issue.pull_request
+ steps:
+
+ - name: Slash Command Dispatch
+ uses: peter-evans/slash-command-dispatch@v3
+ with:
+ issue-type: pull-request
+ token: ${{ secrets.APPSMITH_CI_TEST_PAT }}
+ commands: |
+ ci-test-limit
\ No newline at end of file
diff --git a/.github/workflows/ci-test-limited.yml b/.github/workflows/ci-test-limited.yml
index 5fad2376ee..e4311793c7 100644
--- a/.github/workflows/ci-test-limited.yml
+++ b/.github/workflows/ci-test-limited.yml
@@ -87,20 +87,28 @@ jobs:
run_result_env=$(cat ~/run_result)
echo "::set-output name=run_result::$run_result_env"
- # Download failed_spec list for all jobs in case of rerun
- - uses: actions/download-artifact@v3
+ # In case this is second attempt try restoring failed tests
+ - name: Restore the previous failed combine result
if: steps.run_result.outputs.run_result == 'failedtest'
+ uses: actions/download-artifact@v3
with:
- name: failed-spec-ci
- path: ~/failed_spec_ci
+ name: combined_failed_spec_ci
+ path: ~/combined_failed_spec_ci
- # In case of rerun combine all the failed specs and set it in env
- - name: combine all specs for CI in case of rerun
+ # failed_spec_env will contain list of all failed specs
+ # We are using environment variable instead of regular to support multiline
+ - name: Get failed_spec
+ id: failed_spec
if: steps.run_result.outputs.run_result == 'failedtest'
+ working-directory: app/client
run: |
- failed_spec_env=$(cat ~/failed_spec_ci/failed_spec_ci* | sort -u)
echo "failed_spec_env<> $GITHUB_ENV
- echo "$failed_spec_env" >> $GITHUB_ENV
+ while IFS= read -r line
+ do
+ spec_name=$(echo $line | awk -F'/' '{print $NF}')
+ failed_spec_env=$(find . -name $spec_name | sed 's|./||')
+ echo "$failed_spec_env" >> $GITHUB_ENV
+ done < ~/combined_failed_spec_ci/combined_failed_spec_ci
echo "EOF" >> $GITHUB_ENV
# Get specs to run
@@ -270,6 +278,7 @@ jobs:
echo Remote $COMMIT_INFO_REMOTE
- name: Run the cypress test
+ id: cypress_test
if: steps.run_result.outputs.run_result != 'success' && steps.run_result.outputs.run_result != 'failedtest'
uses: cypress-io/github-action@v5
env:
@@ -331,6 +340,7 @@ jobs:
# In case of second attempt only run failed specs
- name: Run the cypress test with failed tests
+ id: cypress_test_failedtest
if: steps.run_result.outputs.run_result == 'failedtest'
uses: cypress-io/github-action@v5
env:
@@ -460,6 +470,25 @@ jobs:
name: server-logs-${{ matrix.job }}
path: app/server/server-logs.log
+ - name: get cypress url dashboard url
+ id: dashboard_url
+ if: always()
+ run: |
+ if [[ "${{steps.run_result.outputs.run_result }}" != "success" && "${{steps.run_result.outputs.run_result }}" != "failedtest" ]]; then
+ echo ${{ steps.cypress_test.outputs.resultsUrl }} >> ~/cypress_url
+ elif [[ "${{steps.run_result.outputs.run_result }}" == "failedtest" ]]; then
+ echo ${{ steps.cypress_test_failedtest.outputs.resultsUrl }} >> ~/cypress_url
+ fi
+
+ # Force store previous run result to cache
+ - name: Store the previous run result
+ if: success()
+ uses: actions/cache/save@v3
+ with:
+ path: |
+ ~/cypress_url
+ key: ${{ github.run_id }}-dashboard-url-${{ github.run_attempt }}
+
# Set status = success
- name: Save the status of the run
run: |