diff --git a/.github/workflows/rhai-tests.yml b/.github/workflows/rhai-tests.yml new file mode 100644 index 0000000..5e34eb1 --- /dev/null +++ b/.github/workflows/rhai-tests.yml @@ -0,0 +1,73 @@ +name: Rhai Tests + +on: + push: + branches: [ '*' ] + paths: + - 'src/rhai_tests/**' + - 'src/rhai/**' + - 'src/git/**' + - 'src/os/**' + - 'run_rhai_tests.sh' + - '.github/workflows/rhai-tests.yml' + pull_request: + branches: [ '*' ] + paths: + - 'src/rhai_tests/**' + - 'src/rhai/**' + - 'src/git/**' + - 'src/os/**' + - 'run_rhai_tests.sh' + - '.github/workflows/rhai-tests.yml' + workflow_dispatch: # Allow manual triggering + +jobs: + rhai-tests: + name: Run Rhai Tests + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + + - name: Cache Rust dependencies + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Build herodo + run: | + cargo build --bin herodo + echo "${{ github.workspace }}/target/debug" >> $GITHUB_PATH + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y git curl + + - name: Run Rhai tests + run: | + chmod +x run_rhai_tests.sh + ./run_rhai_tests.sh + + - name: Check for test failures + run: | + if grep -q "Some tests failed" run_rhai_tests.log; then + echo "::error::Some Rhai tests failed. Check the logs for details." + exit 1 + else + echo "All Rhai tests passed!" + fi + if: always() diff --git a/docs/rhai/ci_workflow.md b/docs/rhai/ci_workflow.md new file mode 100644 index 0000000..65a38fe --- /dev/null +++ b/docs/rhai/ci_workflow.md @@ -0,0 +1,71 @@ +# Continuous Integration for Rhai Tests + +This document describes the continuous integration (CI) workflow for running Rhai tests in the SAL library. + +## GitHub Actions Workflow + +The SAL project includes a GitHub Actions workflow that automatically runs all Rhai tests whenever changes are made to relevant files. This ensures that the Rhai integration continues to work correctly as the codebase evolves. + +### Workflow File + +The workflow is defined in `.github/workflows/rhai-tests.yml`. + +### Trigger Events + +The workflow runs automatically when: + +1. Changes are pushed to the `main` or `master` branch that affect: + - Rhai test scripts (`src/rhai_tests/**`) + - Rhai module code (`src/rhai/**`) + - Git module code (`src/git/**`) + - OS module code (`src/os/**`) + - The test runner script (`run_rhai_tests.sh`) + - The workflow file itself (`.github/workflows/rhai-tests.yml`) + +2. A pull request is opened or updated that affects the same files. + +3. The workflow is manually triggered using the GitHub Actions interface. + +### Workflow Steps + +The workflow performs the following steps: + +1. **Checkout Code**: Checks out the repository code. +2. **Set up Rust**: Installs the Rust toolchain. +3. **Cache Dependencies**: Caches Rust dependencies to speed up builds. +4. **Build herodo**: Builds the `herodo` binary used to run Rhai scripts. +5. **Install Dependencies**: Installs system dependencies like Git and curl. +6. **Run Rhai Tests**: Runs the `run_rhai_tests.sh` script to execute all Rhai tests. +7. **Check for Failures**: Verifies that all tests passed. + +### Test Results + +The workflow will fail if any Rhai test fails. This prevents changes that break the Rhai integration from being merged. + +## Local Testing + +Before pushing changes, you can run the same tests locally using the `run_rhai_tests.sh` script: + +```bash +./run_rhai_tests.sh +``` + +This will produce the same test results as the CI workflow, allowing you to catch and fix issues before pushing your changes. + +## Logs + +The test runner script creates a log file (`run_rhai_tests.log`) that contains the output of all tests. This log is used by the CI workflow to check for test failures. + +## Adding New Tests + +When adding new tests, make sure they are included in the appropriate module's test runner script (`run_all_tests.rhai`). The CI workflow will automatically run the new tests. + +## Troubleshooting + +If the CI workflow fails, check the GitHub Actions logs for details. Common issues include: + +1. **Missing Dependencies**: Ensure all required dependencies are installed. +2. **Test Failures**: Fix any failing tests. +3. **Build Errors**: Fix any errors in the Rust code. + +If you need to modify the workflow, edit the `.github/workflows/rhai-tests.yml` file. diff --git a/docs/rhai/index.md b/docs/rhai/index.md index 78c1a29..b9fc5ed 100644 --- a/docs/rhai/index.md +++ b/docs/rhai/index.md @@ -33,6 +33,7 @@ SAL includes test scripts for verifying the functionality of its Rhai integratio - [OS Module Tests](os_module_tests.md): Tests for file system, download, and package management operations - [Git Module Tests](git_module_tests.md): Tests for Git repository management and operations - [Running Tests](running_tests.md): Instructions for running all Rhai tests +- [CI Workflow](ci_workflow.md): Continuous integration workflow for Rhai tests ## Examples diff --git a/run_rhai_tests.sh b/run_rhai_tests.sh index 58419d6..2182cb5 100755 --- a/run_rhai_tests.sh +++ b/run_rhai_tests.sh @@ -9,10 +9,19 @@ YELLOW='\033[0;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color +# Create log file +LOG_FILE="run_rhai_tests.log" +> $LOG_FILE # Clear log file if it exists + +# Function to log messages to both console and log file +log() { + echo -e "$1" | tee -a $LOG_FILE +} + # Print header -echo -e "${BLUE}=======================================${NC}" -echo -e "${BLUE} Running All Rhai Tests ${NC}" -echo -e "${BLUE}=======================================${NC}" +log "${BLUE}=======================================${NC}" +log "${BLUE} Running All Rhai Tests ${NC}" +log "${BLUE}=======================================${NC}" # Find all test runner scripts RUNNERS=$(find src/rhai_tests -name "run_all_tests.rhai") @@ -26,38 +35,39 @@ FAILED_MODULES=0 for runner in $RUNNERS; do # Extract module name from path module=$(echo $runner | cut -d'/' -f3) - - echo -e "\n${YELLOW}Running tests for module: ${module}${NC}" - echo -e "${YELLOW}-------------------------------------${NC}" - + + log "\n${YELLOW}Running tests for module: ${module}${NC}" + log "${YELLOW}-------------------------------------${NC}" + # Run the test runner - herodo --path $runner - + herodo --path $runner | tee -a $LOG_FILE + TEST_RESULT=${PIPESTATUS[0]} + # Check if the test passed - if [ $? -eq 0 ]; then - echo -e "${GREEN}✓ Module ${module} tests passed${NC}" + if [ $TEST_RESULT -eq 0 ]; then + log "${GREEN}✓ Module ${module} tests passed${NC}" PASSED_MODULES=$((PASSED_MODULES + 1)) else - echo -e "${RED}✗ Module ${module} tests failed${NC}" + log "${RED}✗ Module ${module} tests failed${NC}" FAILED_MODULES=$((FAILED_MODULES + 1)) fi - + TOTAL_MODULES=$((TOTAL_MODULES + 1)) done # Print summary -echo -e "\n${BLUE}=======================================${NC}" -echo -e "${BLUE} Test Summary ${NC}" -echo -e "${BLUE}=======================================${NC}" -echo -e "Total modules tested: ${TOTAL_MODULES}" -echo -e "Passed: ${GREEN}${PASSED_MODULES}${NC}" -echo -e "Failed: ${RED}${FAILED_MODULES}${NC}" +log "\n${BLUE}=======================================${NC}" +log "${BLUE} Test Summary ${NC}" +log "${BLUE}=======================================${NC}" +log "Total modules tested: ${TOTAL_MODULES}" +log "Passed: ${GREEN}${PASSED_MODULES}${NC}" +log "Failed: ${RED}${FAILED_MODULES}${NC}" # Set exit code based on test results if [ $FAILED_MODULES -eq 0 ]; then - echo -e "\n${GREEN}All tests passed!${NC}" + log "\n${GREEN}All tests passed!${NC}" exit 0 else - echo -e "\n${RED}Some tests failed!${NC}" + log "\n${RED}Some tests failed!${NC}" exit 1 fi