Sfoglia il codice sorgente

Reduce Test Workflow Time by 45% and Enable Qlty Coverage on Main (#6374)

* improving test workflow

* testing pipeline improvement

* testing new run

* adding missing protos

* adding previous cache + restoring dev dep version

* restoring webview package lock

* scripts update

* fixing old flaky test

* fixing old flaky test

* changeset update

* adding test-platform-integration again

* adding quality check for integration platform
Jose Castelli 3 mesi fa
parent
commit
d1fc59758e

+ 5 - 0
.changeset/chatty-turkeys-return.md

@@ -0,0 +1,5 @@
+---
+"claude-dev": patch
+---
+
+Enhance Test Workflow and Report Coverage to Qlty on Main

+ 79 - 148
.github/workflows/test.yml

@@ -1,6 +1,9 @@
 name: Tests
 
 on:
+    push:
+        branches:
+            - main
     workflow_dispatch:
     pull_request:
         branches:
@@ -14,7 +17,45 @@ permissions:
     pull-requests: write # Needed to add comments/annotations to PRs
 
 jobs:
+    quality-checks:
+        runs-on: ubuntu-latest
+        name: Quality Checks
+        steps:
+            - name: Checkout code
+              uses: actions/checkout@v4
+
+            - name: Setup Node.js environment
+              uses: actions/setup-node@v4
+              with:
+                  node-version: 22
+
+            - name: Cache root dependencies
+              uses: actions/cache@v4
+              id: root-cache
+              with:
+                  path: node_modules
+                  key: ${{ runner.os }}-npm-${{ hashFiles('package-lock.json') }}
+
+            - name: Cache webview-ui dependencies
+              uses: actions/cache@v4
+              id: webview-cache
+              with:
+                  path: webview-ui/node_modules
+                  key: ${{ runner.os }}-npm-webview-${{ hashFiles('webview-ui/package-lock.json') }}
+
+            - name: Install root dependencies
+              if: steps.root-cache.outputs.cache-hit != 'true'
+              run: npm ci
+
+            - name: Install webview-ui dependencies
+              if: steps.webview-cache.outputs.cache-hit != 'true'
+              run: cd webview-ui && npm ci
+
+            - name: Run Quality Checks (Parallel)
+              run: npm run ci:check-all
+
     test:
+        needs: quality-checks
         strategy:
             fail-fast: false
             matrix:
@@ -33,18 +74,6 @@ jobs:
               with:
                   node-version: 22
 
-            # Setup Python for coverage script
-            - name: Setup Python
-              uses: actions/setup-python@v4
-              with:
-                  python-version: "3.10"
-
-            - name: Install Python dependencies
-              run: |
-                  python -m pip install --upgrade pip
-                  pip install requests
-
-            # Cache root dependencies - only reuse if package-lock.json exactly matches
             - name: Cache root dependencies
               uses: actions/cache@v4
               id: root-cache
@@ -52,7 +81,6 @@ jobs:
                   path: node_modules
                   key: ${{ runner.os }}-npm-${{ hashFiles('package-lock.json') }}
 
-            # Cache webview-ui dependencies - only reuse if package-lock.json exactly matches
             - name: Cache webview-ui dependencies
               uses: actions/cache@v4
               id: webview-cache
@@ -68,60 +96,60 @@ jobs:
               if: steps.webview-cache.outputs.cache-hit != 'true'
               run: cd webview-ui && npm ci
 
-            - name: Install xvfb on Linux
-              if: runner.os == 'Linux'
-              run: sudo apt-get update && sudo apt-get install -y xvfb
-              
             - name: Set up NPM on Windows
               if: runner.os == 'Windows'
               run: |
                   npm config set script-shell "C:\\Program Files\\Git\\bin\\bash.exe"
 
-            - name: Type Check
-              run: npm run check-types
-
-            - name: Lint Check
-              run: npm run lint
-
-            - name: Format Check
-              run: npm run format
-
-            # Build the extension before running tests
+            # Build the extension and tests (without redundant checks)
             - name: Build Tests and Extension
-              run: npm run pretest
+              run: npm run ci:build
 
-            - name: Unit Tests (with coverage on Linux)
+            - name: Unit Tests with coverage - Linux
+              id: unit_tests_linux
+              continue-on-error: true
+              if: runner.os == 'Linux'
               run: |
-                if [ "${{ runner.os }}" = "Linux" ]; then
-                  npm install --no-save nyc
                   npx nyc --nycrc-path .nycrc.unit.json --reporter=lcov npm run test:unit
-                else
-                  npm run test:unit
-                fi
 
-            # Run extension tests with coverage
-            - name: Extension Integration Tests with Coverage
-              id: extension_coverage
+            - name: Unit Tests - Non-Linux
+              id: unit_tests_non_linux
               continue-on-error: true
+              if: runner.os != 'Linux'
               run: |
-                  node ./scripts/test-ci.js 2>&1 | tee extension_coverage.txt
-                  # Default the encoding to UTF-8 - It's not the default on Windows
-                  PYTHONUTF8=1 PYTHONPATH=.github/scripts python -m coverage_check extract-coverage extension_coverage.txt --type=extension --github-output --verbose
+                  npm run test:unit
+
+            - name: Extension Integration Tests - Linux
+              id: integration_tests_linux
+              continue-on-error: true
+              if: runner.os == 'Linux'
+              run: xvfb-run -a npm run test:coverage
+
+            - name: Extension Integration Tests - Non-Linux
+              id: integration_tests_non_linux
+              continue-on-error: true
+              if: runner.os != 'Linux'
+              run: npm run test:integration
 
-            # Run webview tests with coverage
             - name: Webview Tests with Coverage
-              id: webview_coverage
+              id: webview_tests
               continue-on-error: true
               run: |
                   cd webview-ui
-                  # Ensure coverage dependency is installed
-                  npm install --no-save @vitest/coverage-v8
-                  npm run test:coverage 2>&1 | tee webview_coverage.txt
-                  cd ..
-                  # Default the encoding to UTF-8 - It's not the default on Windows
-                  PYTHONUTF8=1 PYTHONPATH=.github/scripts python -m coverage_check extract-coverage webview-ui/webview_coverage.txt --type=webview --github-output --verbose
-
-            # Save coverage reports as artifacts (workflow-scoped)
+                  npm run test:coverage
+
+            - name: Check Test Results
+              if: always()
+              run: |
+                  failed=""
+                  [[ "${{ steps.unit_tests_linux.outcome }}" == "failure" && "${{ runner.os }}" == "Linux" ]] && failed="$failed unit_tests_linux"
+                  [[ "${{ steps.unit_tests_non_linux.outcome }}" == "failure" && "${{ runner.os }}" != "Linux" ]] && failed="$failed unit_tests_non_linux"
+                  [[ "${{ steps.integration_tests_linux.outcome }}" == "failure" && "${{ runner.os }}" == "Linux" ]] && failed="$failed integration_tests_linux"
+                  [[ "${{ steps.integration_tests_non_linux.outcome }}" == "failure" && "${{ runner.os }}" != "Linux" ]] && failed="$failed integration_tests_non_linux"
+                  [[ "${{ steps.webview_tests.outcome }}" == "failure" ]] && failed="$failed webview_tests"
+                  [[ -n "$failed" ]] && { echo "❌ The following test suites failed:$failed"; exit 1; }
+                  echo "✅ All tests passed"
+
             - name: Save Coverage Reports
               uses: actions/upload-artifact@v4
               # Only upload artifacts on Linux - We only need coverage from one OS
@@ -129,27 +157,11 @@ jobs:
               with:
                   name: pr-coverage-reports
                   path: |
-                      extension_coverage.txt
-                      webview-ui/webview_coverage.txt
                       coverage-unit/lcov.info
                       webview-ui/coverage/lcov.info
 
-            # Set the check as failed if any of the tests failed
-            - name: Check for test failures
-              run: |
-                  # Check if any of the test steps failed
-                  # https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#steps-context
-                  if [ "${{ steps.extension_coverage.outcome }}" != "success" ]; then
-                      echo "Extension Integration Tests failed, see previous step for test output."
-                  fi
-                  if [ "${{ steps.webview_coverage.outcome }}" != "success" ]; then
-                      echo "Webview Tests failed, see previous step for test output."
-                  fi
-                  if [ "${{ steps.extension_coverage.outcome }}" != "success" ] || [ "${{ steps.webview_coverage.outcome }}" != "success" ]; then
-                      exit 1
-                  fi
-
     test-platform-integration:
+        needs: quality-checks
         runs-on: ubuntu-latest
         steps:
             - name: Checkout code
@@ -212,85 +224,6 @@ jobs:
                 name: test-platform-integration-core-coverage
                 path: coverage/**/lcov.info
 
-    coverage:
-        needs: test
-        runs-on: ubuntu-latest
-        # Only run on PRs to main branch
-        if: github.event_name == 'pull_request' && github.base_ref == 'main'
-        steps:
-            - name: Checkout code
-              uses: actions/checkout@v4
-              with:
-                  fetch-depth: 0 # Fetch all history for accurate comparison
-
-            # Setup Python for coverage script
-            - name: Setup Python
-              uses: actions/setup-python@v4
-              with:
-                  python-version: "3.10"
-
-            - name: Install Python dependencies
-              run: |
-                  python -m pip install --upgrade pip
-                  pip install requests
-
-            - name: Setup Node.js environment
-              uses: actions/setup-node@v4
-              with:
-                  node-version: 22
-
-            # Cache root dependencies - only reuse if package-lock.json exactly matches
-            - name: Cache root dependencies
-              uses: actions/cache@v4
-              id: root-cache
-              with:
-                  path: node_modules
-                  key: ${{ runner.os }}-npm-${{ hashFiles('package-lock.json') }}
-
-            # Cache webview-ui dependencies - only reuse if package-lock.json exactly matches
-            - name: Cache webview-ui dependencies
-              uses: actions/cache@v4
-              id: webview-cache
-              with:
-                  path: webview-ui/node_modules
-                  key: ${{ runner.os }}-npm-webview-${{ hashFiles('webview-ui/package-lock.json') }}
-
-            - name: Install root dependencies
-              if: steps.root-cache.outputs.cache-hit != 'true'
-              run: npm ci
-
-            - name: Install webview-ui dependencies
-              if: steps.webview-cache.outputs.cache-hit != 'true'
-              run: cd webview-ui && npm ci
-
-            # Build the extension before running tests
-            - name: Build Extension
-              run: npm run compile
-
-            # Download coverage artifacts from test job
-            - name: Download Coverage Reports
-              uses: actions/download-artifact@v4
-              with:
-                  name: pr-coverage-reports
-                  path: . # Download to root directory to match expected paths
-
-            # Process coverage workflow
-            - name: Process coverage workflow
-              id: coverage
-              run: |
-                  # Extract PR number from GITHUB_REF
-                  PR_NUMBER=$(echo "$GITHUB_REF" | sed -e 's/refs\/pull\///' -e 's/\/merge//')
-
-                  # Run the coverage workflow from root directory
-                  PYTHONPATH=.github/scripts python -m coverage_check process-workflow \
-                    --base-branch ${{ github.base_ref }} \
-                    --pr-number $PR_NUMBER \
-                    --repo $GITHUB_REPOSITORY \
-                    --token ${{ secrets.GITHUB_TOKEN }} \
-                    --verbose
-              env:
-                  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
     qlty:
         needs: [test, test-platform-integration]
         runs-on: ubuntu-latest
@@ -298,8 +231,6 @@ jobs:
         steps:
             - name: Checkout code
               uses: actions/checkout@v4
-              with:
-                  fetch-depth: 0 # Fetch all history for accurate comparison
 
             - name: Download unit tests coverage reports
               uses: actions/download-artifact@v4

+ 0 - 1
CONTRIBUTING.md

@@ -74,7 +74,6 @@ We also welcome contributions to our [documentation](https://github.com/cline/cl
 4. Testing
     - Run `npm run test` to run tests locally. 
     - Before submitting PR, run `npm run format:fix` to format your code
-    - Run `npm run test:ci` to run tests locally
 
 ### Extension
 

File diff suppressed because it is too large
+ 845 - 8
package-lock.json


+ 3 - 1
package.json

@@ -356,9 +356,10 @@
 		"format": "biome format --changed --no-errors-on-unmatched --files-ignore-unknown=true --diagnostic-level=error",
 		"format:fix": "biome check --changed --no-errors-on-unmatched --files-ignore-unknown=true --write",
 		"fix:all": "biome check --no-errors-on-unmatched --files-ignore-unknown=true --write --diagnostic-level=error --unsafe",
+		"ci:check-all": "npm-run-all -p check-types lint format",
+		"ci:build": "npm run protos && npm run build:webview && node esbuild.mjs && npm run compile-tests",
 		"pretest": "npm run compile && npm run compile-tests && npm run compile-standalone && npm run lint",
 		"test": "npm-run-all test:unit test:integration",
-		"test:ci": "node scripts/test-ci.js",
 		"test:integration": "vscode-test",
 		"test:unit": "TS_NODE_PROJECT='./tsconfig.unit-test.json' mocha",
 		"test:coverage": "vscode-test --coverage",
@@ -418,6 +419,7 @@
 		"lint-staged": "^16.1.0",
 		"minimatch": "^3.0.3",
 		"npm-run-all": "^4.1.5",
+		"nyc": "^17.1.0",
 		"prebuild-install": "^7.1.3",
 		"protoc-gen-ts": "^0.8.7",
 		"proxyquire": "^2.1.3",

+ 0 - 30
scripts/test-ci.js

@@ -1,30 +0,0 @@
-#!/usr/bin/env node
-const { execSync } = require("child_process")
-const process = require("process")
-
-try {
-	if (process.platform === "linux") {
-		console.log("Detected Linux environment.")
-
-		execSync("which xvfb-run", { stdio: "ignore" })
-
-		console.log("xvfb-run is installed. Running tests with xvfb-run...")
-		execSync("xvfb-run -a npm run test:coverage", { stdio: "inherit" })
-	} else {
-		console.log("Non-Linux environment detected. Running tests normally.")
-		execSync("npm run test:integration", { stdio: "inherit" })
-	}
-} catch (error) {
-	if (process.platform === "linux") {
-		console.error(
-			`Error: xvfb-run is not installed.\n` +
-				`Please install it using the following command:\n` +
-				`  Debian/Ubuntu: sudo apt install xvfb\n` +
-				`  RHEL/CentOS: sudo yum install xvfb\n` +
-				`  Arch Linux: sudo pacman -S xvfb`,
-		)
-	} else {
-		console.error("Error running tests:", error.message)
-	}
-	process.exit(1)
-}

+ 40 - 19
src/core/api/retry.test.ts

@@ -1,8 +1,13 @@
 import { describe, it } from "mocha"
 import "should"
+import sinon from "sinon"
 import { withRetry } from "./retry"
 
 describe("Retry Decorator", () => {
+	afterEach(() => {
+		sinon.restore()
+	})
+
 	describe("withRetry", () => {
 		it("should not retry on success", async () => {
 			let callCount = 0
@@ -73,9 +78,11 @@ describe("Retry Decorator", () => {
 
 		it("should respect retry-after header with delta seconds", async () => {
 			let callCount = 0
-			const startTime = Date.now()
+			const setTimeoutSpy = sinon.spy(global, "setTimeout")
+			const baseDelay = 1000
+
 			class TestClass {
-				@withRetry({ maxRetries: 2, baseDelay: 1000 }) // Use large baseDelay to ensure header takes precedence
+				@withRetry({ maxRetries: 2, baseDelay }) // Use large baseDelay to ensure header takes precedence
 				async *failMethod() {
 					callCount++
 					if (callCount === 1) {
@@ -94,19 +101,23 @@ describe("Retry Decorator", () => {
 				result.push(value)
 			}
 
-			const duration = Date.now() - startTime
-			duration.should.be.approximately(10, 10) // Allow 10ms variance
 			callCount.should.equal(2)
+			setTimeoutSpy.calledOnce.should.be.true
+			const [_, delay] = setTimeoutSpy.getCall(0).args
+			delay?.should.equal(0)
+
 			result.should.deepEqual(["success after retry"])
 		})
 
 		it("should respect retry-after header with Unix timestamp", async () => {
+			const setTimeoutSpy = sinon.spy(global, "setTimeout")
 			let callCount = 0
-			const startTime = Date.now()
-			const retryTimestamp = Math.floor(Date.now() / 1000) + 0.01 // 10ms in the future
+			const fixedDate = new Date("2010-01-01T00:00:00.000Z")
+			const retryTimestamp = Math.floor(fixedDate.getTime() / 1000) + 0.01 // 10ms in the future
+			const baseDelay = 1000
 
 			class TestClass {
-				@withRetry({ maxRetries: 2, baseDelay: 1000 }) // Use large baseDelay to ensure header takes precedence
+				@withRetry({ maxRetries: 2, baseDelay }) // Use large baseDelay to ensure header takes precedence
 				async *failMethod() {
 					callCount++
 					if (callCount === 1) {
@@ -125,17 +136,22 @@ describe("Retry Decorator", () => {
 				result.push(value)
 			}
 
-			const duration = Date.now() - startTime
-			duration.should.be.approximately(10, 10) // Allow 10ms variance
 			callCount.should.equal(2)
+
+			setTimeoutSpy.calledOnce.should.be.true
+			const [_, delay] = setTimeoutSpy.getCall(0).args
+			delay?.should.equal(fixedDate.getTime())
+
 			result.should.deepEqual(["success after retry"])
 		})
 
 		it("should use exponential backoff when no retry-after header", async () => {
+			const setTimeoutSpy = sinon.spy(global, "setTimeout")
 			let callCount = 0
-			const startTime = Date.now()
+			const baseDelay = 10
+
 			class TestClass {
-				@withRetry({ maxRetries: 2, baseDelay: 10, maxDelay: 100 })
+				@withRetry({ maxRetries: 2, baseDelay, maxDelay: 100 })
 				async *failMethod() {
 					callCount++
 					if (callCount === 1) {
@@ -153,18 +169,22 @@ describe("Retry Decorator", () => {
 				result.push(value)
 			}
 
-			const duration = Date.now() - startTime
-			// First retry should be after baseDelay (10ms)
-			duration.should.be.approximately(10, 10)
 			callCount.should.equal(2)
+			setTimeoutSpy.calledOnce.should.be.true
+			const [_, delay] = setTimeoutSpy.getCall(0).args
+			delay?.should.equal(baseDelay)
+
 			result.should.deepEqual(["success after retry"])
 		})
 
 		it("should respect maxDelay", async () => {
+			const setTimeoutSpy = sinon.spy(global, "setTimeout")
 			let callCount = 0
-			const startTime = Date.now()
+			const baseDelay = 50
+			const maxDelay = 10
+
 			class TestClass {
-				@withRetry({ maxRetries: 3, baseDelay: 50, maxDelay: 10 })
+				@withRetry({ maxRetries: 3, baseDelay, maxDelay })
 				async *failMethod() {
 					callCount++
 					if (callCount < 3) {
@@ -182,10 +202,11 @@ describe("Retry Decorator", () => {
 				result.push(value)
 			}
 
-			const duration = Date.now() - startTime
-			// Both retries should be capped at maxDelay (10ms each)
-			duration.should.be.approximately(20, 20)
 			callCount.should.equal(3)
+			setTimeoutSpy.calledOnce.should.be.true
+			const [_, delay] = setTimeoutSpy.getCall(0).args
+			delay?.should.equal(maxDelay)
+
 			result.should.deepEqual(["success after retries"])
 		})
 

Some files were not shown because too many files changed in this diff