| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293 |
- #!/usr/bin/env python3
- """
- Test runner script that mimics CI/CD pipeline behavior.
- This script runs tests with the same configuration as the GitHub Actions workflow,
- making it easier to reproduce CI issues locally.
- """
- import argparse
- import subprocess
- import sys
- from pathlib import Path
- def run_command(cmd: list[str], description: str, continue_on_error: bool = False) -> bool:
- """
- Run a command and handle its output.
-
- Args:
- cmd: Command to run as list of strings
- description: Description of what the command does
- continue_on_error: Whether to continue if command fails
-
- Returns:
- True if command succeeded, False otherwise
- """
- print(f"\n{'='*80}")
- print(f"Running: {description}")
- print(f"Command: {' '.join(cmd)}")
- print(f"{'='*80}\n")
-
- try:
- result = subprocess.run(cmd, check=True)
- print(f"\n✅ {description} - PASSED\n")
- return True
- except subprocess.CalledProcessError as e:
- print(f"\n❌ {description} - FAILED (exit code: {e.returncode})\n")
- if not continue_on_error:
- sys.exit(e.returncode)
- return False
- def main():
- """Main entry point for test runner."""
- parser = argparse.ArgumentParser(
- description="Run tests with CI/CD configuration",
- formatter_class=argparse.RawDescriptionHelpFormatter,
- epilog="""
- Examples:
- # Run all tests
- python scripts/run_tests.py
-
- # Run only unit tests
- python scripts/run_tests.py --unit
-
- # Run with coverage report
- python scripts/run_tests.py --coverage
-
- # Run linting only
- python scripts/run_tests.py --lint-only
-
- # Run everything (tests + lint + security)
- python scripts/run_tests.py --all
- """
- )
-
- parser.add_argument(
- "--unit",
- action="store_true",
- help="Run only unit tests"
- )
- parser.add_argument(
- "--integration",
- action="store_true",
- help="Run only integration tests"
- )
- parser.add_argument(
- "--e2e",
- action="store_true",
- help="Run only end-to-end tests"
- )
- parser.add_argument(
- "--coverage",
- action="store_true",
- help="Generate coverage report"
- )
- parser.add_argument(
- "--html",
- action="store_true",
- help="Generate HTML coverage report"
- )
- parser.add_argument(
- "--lint-only",
- action="store_true",
- help="Run only linting checks"
- )
- parser.add_argument(
- "--security-only",
- action="store_true",
- help="Run only security checks"
- )
- parser.add_argument(
- "--all",
- action="store_true",
- help="Run all checks (tests, lint, security)"
- )
- parser.add_argument(
- "--fast",
- action="store_true",
- help="Skip slow tests"
- )
- parser.add_argument(
- "--parallel",
- action="store_true",
- help="Run tests in parallel (requires pytest-xdist)"
- )
- parser.add_argument(
- "--verbose",
- "-v",
- action="store_true",
- help="Verbose output"
- )
-
- args = parser.parse_args()
-
- # Ensure we're in the project root
- project_root = Path(__file__).parent.parent
-
- # Create necessary directories
- (project_root / "tests" / "logs").mkdir(parents=True, exist_ok=True)
- (project_root / "logs").mkdir(parents=True, exist_ok=True)
-
- results = {
- "tests": None,
- "lint": None,
- "security": None
- }
-
- # Determine what to run
- run_tests = not (args.lint_only or args.security_only)
- run_lint = args.lint_only or args.all
- run_security = args.security_only or args.all
-
- # Build test command
- if run_tests:
- test_cmd = ["pytest"]
-
- # Add verbosity
- if args.verbose:
- test_cmd.append("-v")
- else:
- test_cmd.append("-v") # Always verbose in CI mode
-
- # Add parallel execution
- if args.parallel:
- test_cmd.extend(["-n", "auto"])
-
- # Add coverage
- if args.coverage or args.html:
- test_cmd.extend(["--cov=src"])
- if args.html:
- test_cmd.append("--cov-report=html")
- test_cmd.append("--cov-report=term")
- test_cmd.append("--cov-report=xml")
-
- # Add test selection
- if args.unit:
- test_cmd.extend(["tests/unit", "-m", "unit"])
- elif args.integration:
- test_cmd.extend(["tests/integration", "-m", "integration"])
- elif args.e2e:
- test_cmd.extend(["tests/e2e", "-m", "e2e"])
-
- # Add fast mode
- if args.fast:
- test_cmd.extend(["-m", "not slow"])
-
- # Run tests
- results["tests"] = run_command(
- test_cmd,
- "Test Suite",
- continue_on_error=args.all
- )
-
- # Run linting
- if run_lint:
- print("\n" + "="*80)
- print("LINTING CHECKS")
- print("="*80 + "\n")
-
- # flake8
- lint_results = []
- lint_results.append(run_command(
- ["flake8", "src", "tests", "--count", "--select=E9,F63,F7,F82",
- "--show-source", "--statistics"],
- "flake8 - Critical Errors",
- continue_on_error=True
- ))
-
- lint_results.append(run_command(
- ["flake8", "src", "tests", "--count", "--exit-zero",
- "--max-complexity=10", "--max-line-length=127", "--statistics"],
- "flake8 - Style Warnings",
- continue_on_error=True
- ))
-
- # black
- lint_results.append(run_command(
- ["black", "--check", "src", "tests"],
- "black - Code Formatting",
- continue_on_error=True
- ))
-
- # isort
- lint_results.append(run_command(
- ["isort", "--check-only", "src", "tests"],
- "isort - Import Sorting",
- continue_on_error=True
- ))
-
- # mypy
- lint_results.append(run_command(
- ["mypy", "src", "--ignore-missing-imports"],
- "mypy - Type Checking",
- continue_on_error=True
- ))
-
- results["lint"] = all(lint_results)
-
- # Run security checks
- if run_security:
- print("\n" + "="*80)
- print("SECURITY CHECKS")
- print("="*80 + "\n")
-
- security_results = []
-
- # safety
- security_results.append(run_command(
- ["safety", "check", "--json"],
- "safety - Dependency Vulnerabilities",
- continue_on_error=True
- ))
-
- # bandit
- security_results.append(run_command(
- ["bandit", "-r", "src", "-f", "json", "-o", "bandit-report.json"],
- "bandit - Security Issues",
- continue_on_error=True
- ))
-
- results["security"] = all(security_results)
-
- # Print summary
- print("\n" + "="*80)
- print("SUMMARY")
- print("="*80 + "\n")
-
- all_passed = True
-
- if results["tests"] is not None:
- status = "✅ PASSED" if results["tests"] else "❌ FAILED"
- print(f"Tests: {status}")
- all_passed = all_passed and results["tests"]
-
- if results["lint"] is not None:
- status = "✅ PASSED" if results["lint"] else "❌ FAILED"
- print(f"Lint: {status}")
- all_passed = all_passed and results["lint"]
-
- if results["security"] is not None:
- status = "✅ PASSED" if results["security"] else "❌ FAILED"
- print(f"Security: {status}")
- all_passed = all_passed and results["security"]
-
- print("\n" + "="*80)
-
- if args.coverage or args.html:
- print("\n📊 Coverage report generated:")
- if args.html:
- print(f" HTML: {project_root}/htmlcov/index.html")
- print(f" XML: {project_root}/coverage.xml")
-
- if all_passed:
- print("\n🎉 All checks passed!")
- sys.exit(0)
- else:
- print("\n💥 Some checks failed!")
- sys.exit(1)
- if __name__ == "__main__":
- main()
|