diff --git a/DEV_SETUP.md b/DEV_SETUP.md new file mode 100644 index 0000000..d65ed56 --- /dev/null +++ b/DEV_SETUP.md @@ -0,0 +1,144 @@ +# Development Environment Setup + +This guide helps you set up a development environment that matches your CI exactly. + +## Quick Setup (Recommended) + +Run the automated setup script: + +```bash +./scripts/setup-dev-env.sh +``` + +This will: +- Install pyenv for Python version management +- Install Python 3.11, 3.12, and 3.13 +- Set up your development environment +- Install all dependencies +- Set up pre-commit hooks +- Run initial tests + +## Manual Setup + +If you prefer manual setup or the script doesn't work: + +### 1. Install pyenv (for Python version management) + +```bash +# On macOS with Homebrew +brew install pyenv + +# Add to your shell profile (.zshrc, .bashrc, etc.) +export PATH="$HOME/.pyenv/bin:$PATH" +eval "$(pyenv init --path)" +eval "$(pyenv init -)" +``` + +### 2. Install Python versions + +```bash +# Install the same Python versions as CI +pyenv install 3.11.9 # or latest 3.11.x +pyenv install 3.12.4 # or latest 3.12.x +pyenv install 3.13.0 # or latest 3.13.x + +# Set local versions for this project +pyenv local 3.11.9 3.12.4 3.13.0 +``` + +### 3. Set up development environment + +```bash +make setup +make dev +``` + +## Available Commands + +After setup, you can use these commands: + +### Basic Development +- `make help` - Show all available commands +- `make setup` - Create virtual environment +- `make dev` - Install in development mode +- `make test` - Run tests with default Python version +- `make lint` - Run linting +- `make format` - Format code with black +- `make clean` - Clean build artifacts + +### Multi-Version Testing (Matches CI) +- `make test-all-versions` - Test against Python 3.11, 3.12, 3.13 +- `make ci-test` - Run exact CI test workflow locally +- `make ci-lint` - Run exact CI lint workflow locally + +### Python Version Management +- `make install-python-versions` - Install all required Python versions +- `make check-python-versions` - Check which Python versions are available + +## Testing Like CI + +To run tests exactly like your CI: + +```bash +# Run all tests across all Python versions (like CI matrix) +make ci-test + +# Run linting exactly like CI +make ci-lint + +# Or use tox for multi-version testing +make test-all-versions +``` + +## Troubleshooting + +### Python Version Issues + +If you get Python version errors: + +1. Check your current Python version: `python3 --version` +2. Install required versions: `make install-python-versions` +3. Verify installation: `make check-python-versions` + +### pyenv Not Working + +1. Restart your terminal after installation +2. Check your shell profile has pyenv configuration +3. Run: `source ~/.zshrc` (or ~/.bashrc) + +### CI Test Failures + +If local tests pass but CI fails: + +1. Run `make ci-test` to replicate CI exactly +2. Check Python version requirements in `setup.py` +3. Compare local and CI dependency versions + +## Integration with IDEs + +### VS Code + +Add this to your `.vscode/settings.json`: + +```json +{ + "python.defaultInterpreterPath": "./env/bin/python", + "python.testing.pytestEnabled": true, + "python.testing.pytestArgs": ["tests"], + "python.linting.flake8Enabled": true, + "python.formatting.provider": "black" +} +``` + +### PyCharm + +1. Set Project Interpreter to `./env/bin/python` +2. Set Test Runner to pytest +3. Configure Code Style to use Black + +## Notes + +- Your CI uses Python 3.11, 3.12, 3.13 +- Your `setup.py` requires Python >=3.11 +- Local testing with Python 3.9 will not match CI behavior +- Use `make ci-test` for the most accurate local testing diff --git a/Makefile b/Makefile index 1ae6fc7..cc1e9dc 100755 --- a/Makefile +++ b/Makefile @@ -1,36 +1,82 @@ +# Detect OS for cross-platform compatibility OS := $(shell uname -s 2>/dev/null || echo Windows) +# Python versions to test (matching CI) +PYTHON_VERSIONS := 3.11 3.12 3.13 +DEFAULT_PYTHON_VERSION := 3.11 + +# Directory for virtual environments +VENV_DIR := .venvs + ifeq ($(OS),Windows) PYTHON := $(shell where python3 2>NUL || echo "") PYTHON_EXE := env\Scripts\python.exe PIP := env\Scripts\pip.exe PYTEST := env\Scripts\pytest.exe + FLAKE8 := env\Scripts\flake8.exe + BLACK := env\Scripts\black.exe + TOX := env\Scripts\tox.exe else PYTHON := $(shell which python3 2>/dev/null || echo "") PYTHON_EXE := ./env/bin/python3 PIP := ./env/bin/pip PYTEST := ./env/bin/pytest + FLAKE8 := ./env/bin/flake8 + BLACK := ./env/bin/black + TOX := ./env/bin/tox endif ifeq ($(PYTHON),) - $(error "Python is not installed. Please install Python 3.") + $(error "Python is not installed. Please install Python 3.11+") endif -.PHONY: setup install dev test test-unit test-commands test-integration lint format clean all +.PHONY: help setup install dev test test-unit test-commands test-integration lint format clean all +.PHONY: test-all-versions setup-pyenv install-python-versions check-python-versions +.PHONY: ci-test ci-lint test-matrix clean-all + +# Default target +help: + @echo "Available commands:" + @echo " make setup - Create virtual environment with default Python" + @echo " make install - Install package in virtual environment" + @echo " make dev - Install package in development mode" + @echo " make test - Run tests with single Python version" + @echo " make test-unit - Run unit tests only" + @echo " make test-commands - Run command tests only" + @echo " make test-integration - Run integration tests only" + @echo " make lint - Run linting (flake8)" + @echo " make format - Format code with black" + @echo " make clean - Clean build artifacts and venv" + @echo "" + @echo "Multi-version testing (matches CI):" + @echo " make setup-pyenv - Install pyenv (macOS/Linux)" + @echo " make install-python-versions - Install Python 3.11, 3.12, 3.13" + @echo " make test-all-versions - Test against all Python versions (like CI)" + @echo " make ci-test - Run exact CI test workflow locally" + @echo " make ci-lint - Run exact CI lint workflow locally" + @echo "" + @echo " make all - Setup + dev + test" + @echo " make clean-all - Clean everything including multi-version setups" -setup: +# Check if required Python version is available +check-python-version: + @python3 -c "import sys; exit(0 if sys.version_info >= (3, 11) else 1)" || \ + (echo "Error: Python 3.11+ required. You have: $$(python3 --version)" && \ + echo "Please install Python 3.11+ or run 'make install-python-versions'" && exit 1) + +setup: check-python-version ifeq ($(OS),Windows) "$(PYTHON)" -m venv env @if not exist env\Scripts\activate ( \ echo "Virtual environment creation failed. Check Python installation." && exit 1 \ ) - $(PYTHON_EXE) -m $(PIP) install --upgrade pip + $(PYTHON_EXE) -m pip install --upgrade pip else "$(PYTHON)" -m venv env @if [ ! -f "./env/bin/activate" ]; then \ echo "Virtual environment creation failed. Check Python installation." && exit 1; \ fi - $(PYTHON_EXE) -m $(PIP) install --upgrade pip + $(PYTHON_EXE) -m pip install --upgrade pip endif install: @@ -52,63 +98,196 @@ ifeq ($(OS),Windows) echo "Virtual environment not found. Run 'make setup' first." && exit 1 \ ) $(PIP) install -e ".[dev]" + $(PIP) install tox flake8 black else @if [ ! -f "./env/bin/pip" ]; then \ echo "Virtual environment not found. Run 'make setup' first." && exit 1; \ fi $(PIP) install -e ".[dev]" + $(PIP) install tox flake8 black endif test: ifeq ($(OS),Windows) - @if not exist env\Scripts\pip ( \ - echo "Virtual environment not found. Run 'make setup' first." && exit 1 \ + @if not exist env\Scripts\pytest.exe ( \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1 \ ) $(PYTEST) tests else - @if [ ! -f "./env/bin/pip" ]; then \ - echo "Virtual environment not found. Run 'make setup' first." && exit 1; \ + @if [ ! -f "./env/bin/pytest" ]; then \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1; \ fi $(PYTEST) tests endif test-unit: -ifeq ($(OS),Windows) - $(PYTEST) tests/test_*.py -else $(PYTEST) tests/test_*.py -endif test-commands: -ifeq ($(OS),Windows) $(PYTEST) tests/commands/ -else - $(PYTEST) tests/commands/ -endif test-integration: -ifeq ($(OS),Windows) - $(PYTEST) tests/integration/ -else $(PYTEST) tests/integration/ -endif lint: ifeq ($(OS),Windows) - $(PIP) install flake8 && \ - env\Scripts\flake8 ftf_cli tests + @if not exist env\Scripts\flake8.exe ( \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1 \ + ) + $(FLAKE8) ftf_cli tests --count --select=E9,F63,F7,F82 --show-source --statistics + $(FLAKE8) ftf_cli tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics else - (PIP) install flake8 && flake8 ftf_cli tests + @if [ ! -f "./env/bin/flake8" ]; then \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1; \ + fi + $(FLAKE8) ftf_cli tests --count --select=E9,F63,F7,F82 --show-source --statistics + $(FLAKE8) ftf_cli tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics endif format: ifeq ($(OS),Windows) - $(PIP) install black && \ - env\Scripts\black ftf_cli tests + @if not exist env\Scripts\black.exe ( \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1 \ + ) + $(BLACK) ftf_cli tests +else + @if [ ! -f "./env/bin/black" ]; then \ + echo "Virtual environment not found. Run 'make dev' first." && exit 1; \ + fi + $(BLACK) ftf_cli tests +endif + +# Multi-version testing setup (macOS/Linux) +setup-pyenv: +ifneq ($(OS),Windows) + @echo "Setting up pyenv for multi-version Python testing..." + @if ! command -v pyenv >/dev/null 2>&1; then \ + echo "Installing pyenv..."; \ + if command -v brew >/dev/null 2>&1; then \ + brew install pyenv; \ + else \ + curl https://pyenv.run | bash; \ + fi; \ + echo ""; \ + echo "⚠️ Please add the following to your shell profile (~/.bashrc, ~/.zshrc, etc.):"; \ + echo " export PATH=\"\$$HOME/.pyenv/bin:\$$PATH\""; \ + echo " eval \"\$$(pyenv init --path)\""; \ + echo " eval \"\$$(pyenv init -)\""; \ + echo ""; \ + echo "Then restart your shell or run: source ~/.bashrc"; \ + echo "After that, run 'make install-python-versions'"; \ + else \ + echo "pyenv is already installed"; \ + fi +else + @echo "Multi-version testing on Windows requires manual Python installation" + @echo "Please install Python 3.11, 3.12, and 3.13 from python.org" +endif + +install-python-versions: +ifneq ($(OS),Windows) + @echo "Installing Python versions for testing..." + @for version in $(PYTHON_VERSIONS); do \ + echo "Installing Python $$version..."; \ + pyenv install -s $$version; \ + done + @echo "Setting local Python versions..." + pyenv local $(PYTHON_VERSIONS) + @echo "Available Python versions:" + pyenv versions else - $(PIP) install black && black ftf_cli tests + @echo "Please manually install Python 3.11, 3.12, and 3.13 on Windows" endif +check-python-versions: + @echo "Checking available Python versions..." + @for version in $(PYTHON_VERSIONS); do \ + if command -v python$$version >/dev/null 2>&1; then \ + echo "✓ Python $$version: $$(python$$version --version)"; \ + else \ + echo "✗ Python $$version: Not found"; \ + fi; \ + done + +# Create tox.ini for multi-version testing +create-tox-config: + @echo "Creating tox configuration..." + @echo "[tox]" > tox.ini + @echo "envlist = py311,py312,py313" >> tox.ini + @echo "isolated_build = true" >> tox.ini + @echo "" >> tox.ini + @echo "[testenv]" >> tox.ini + @echo "deps =" >> tox.ini + @echo " pytest>=8.3.5" >> tox.ini + @echo " pytest-mock" >> tox.ini + @echo " pyhcl>=0.4.5" >> tox.ini + @echo "commands = python -m pytest tests" >> tox.ini + @echo "install_command = pip install {opts} {packages}" >> tox.ini + @echo "" >> tox.ini + @echo "[testenv:lint]" >> tox.ini + @echo "deps = flake8" >> tox.ini + @echo "commands =" >> tox.ini + @echo " flake8 ftf_cli tests --count --select=E9,F63,F7,F82 --show-source --statistics" >> tox.ini + @echo " flake8 ftf_cli tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics" >> tox.ini + @echo "" >> tox.ini + @echo "[testenv:format]" >> tox.ini + @echo "deps = black" >> tox.ini + @echo "commands = black --check ftf_cli tests" >> tox.ini + +# Test against all Python versions (like CI) +test-all-versions: create-tox-config +ifneq ($(OS),Windows) + @if [ ! -f "./env/bin/tox" ]; then \ + echo "Installing tox in virtual environment..."; \ + $(PIP) install tox; \ + fi + @echo "Running tests against all Python versions (matching CI)..." + $(TOX) +else + @echo "Multi-version testing on Windows requires tox to be set up manually" +endif + +# Run exact CI lint workflow locally +ci-lint: + @echo "Running CI lint workflow locally..." + @echo "Setting up Python 3.13 environment..." + @if command -v python3.13 >/dev/null 2>&1; then \ + python3.13 -m venv .ci-lint-env; \ + .ci-lint-env/bin/pip install --upgrade pip; \ + .ci-lint-env/bin/pip install flake8; \ + .ci-lint-env/bin/pip install -e ".[dev]"; \ + echo "Running flake8 (strict)..."; \ + .ci-lint-env/bin/flake8 ftf_cli tests --count --select=E9,F63,F7,F82 --show-source --statistics; \ + echo "Running flake8 (warnings)..."; \ + .ci-lint-env/bin/flake8 ftf_cli tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics; \ + rm -rf .ci-lint-env; \ + echo "✓ CI lint workflow completed successfully"; \ + else \ + echo "Python 3.13 not found. Install it first with 'make install-python-versions'"; \ + exit 1; \ + fi + +# Run exact CI test workflow locally +ci-test: create-tox-config + @echo "Running CI test workflow locally..." + @for version in $(PYTHON_VERSIONS); do \ + echo ""; \ + echo "=== Testing with Python $$version ==="; \ + if command -v python$$version >/dev/null 2>&1; then \ + python$$version -m venv .ci-test-env-$$version; \ + .ci-test-env-$$version/bin/pip install --upgrade pip; \ + .ci-test-env-$$version/bin/pip install pytest pytest-mock; \ + .ci-test-env-$$version/bin/pip install -e ".[dev]"; \ + .ci-test-env-$$version/bin/python -m pytest; \ + rm -rf .ci-test-env-$$version; \ + echo "✓ Python $$version tests passed"; \ + else \ + echo "✗ Python $$version not found"; \ + fi; \ + done + @echo "" + @echo "✓ All CI test workflows completed" + clean: ifeq ($(OS),Windows) @if exist env (rmdir /S /Q env) @@ -116,10 +295,15 @@ ifeq ($(OS),Windows) @if exist build (rmdir /S /Q build) @if exist dist (rmdir /S /Q dist) @if exist .pytest_cache (rmdir /S /Q .pytest_cache) + @if exist .tox (rmdir /S /Q .tox) @for /r %%i in (__pycache__) do @if exist "%%i" (rmdir /S /Q "%%i") else - rm -rf env ftf_cli.egg-info build dist .pytest_cache - find . -type d -name "__pycache__" -exec rm -rf {} + + rm -rf env ftf_cli.egg-info build dist .pytest_cache .tox + find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true endif +clean-all: clean + rm -rf $(VENV_DIR) .ci-*-env-* tox.ini .python-version + @echo "Cleaned all environments and configurations" + all: setup dev test diff --git a/TESTING.md b/TESTING.md deleted file mode 100644 index 9257f66..0000000 --- a/TESTING.md +++ /dev/null @@ -1,144 +0,0 @@ -# Testing Guide for FTF CLI - -This document explains the testing structure and best practices for contributing to the FTF CLI project. - -## Overview - -The project uses `pytest` for testing and has a structured approach to organizing tests: - -- `tests/`: Main test directory - - `commands/`: Tests for CLI commands - - `integration/`: Integration tests with real CLI execution - - Unit tests for utility functions and other components - -## Running Tests - -You can run tests using the following commands: - -```bash -# Run all tests -make test - -# Run only unit tests -make test-unit - -# Run only command tests -make test-commands - -# Run only integration tests -make test-integration -``` - -Or directly with pytest: - -```bash -# Run all tests -pytest - -# Run specific tests -pytest tests/commands/test_add_variable.py -pytest tests/test_utils.py::test_dict_input -``` - -## Test Structure and Best Practices - -### 1. Unit Tests - -Place these directly in the `tests/` directory. They should test individual utility functions and other components. - -```python -# Example: tests/test_utils.py -import pytest -from ftf_cli.utils import some_function - -def test_some_function(): - result = some_function(input_value) - assert result == expected_value -``` - -### 2. Command Tests - -Place these in the `tests/commands/` directory. They should test the CLI commands using mocking. - -```python -# Example: tests/commands/test_add_variable.py -import pytest -from click.testing import CliRunner -from unittest.mock import patch, mock_open -from ftf_cli.commands.add_variable import add_variable - -def test_add_variable(runner): # runner fixture from conftest.py - with patch('builtins.open', mock_open(read_data="mock_data")): - result = runner.invoke(add_variable, ['--arg', 'value']) - assert result.exit_code == 0 - assert "Success message" in result.output -``` - -### 3. Integration Tests - -Place these in the `tests/integration/` directory. They should test the CLI from end to end, calling actual CLI commands. - -```python -# Example: tests/integration/test_cli_commands.py -import subprocess -import os -import pytest - -def test_generate_module(temp_module_dir): # temp_module_dir fixture from conftest.py - result = subprocess.run( - ['ftf', 'generate-module', '--arg', 'value'], - capture_output=True, - text=True - ) - assert result.returncode == 0 - assert os.path.exists(os.path.join(temp_module_dir, 'expected_file')) -``` - -## Fixtures - -Common test fixtures are provided in `tests/conftest.py`: - -- `runner`: A Click test runner for command tests -- `temp_module_dir`: A temporary directory with a basic module structure -- `mock_yaml_validator`: Mocks the YAML validator - -Use these fixtures in your tests where appropriate. - -## Mocking - -When testing commands that interact with files or external services, use mocking: - -```python -# Mock file operations -with patch('builtins.open', mock_open(read_data='file_content')): - # Test code that reads files - -# Mock file existence checks -with patch('os.path.exists', return_value=True): - # Test code that checks if files exist - -# Mock YAML operations -with patch('yaml.safe_load', return_value={'key': 'value'}): - # Test code that loads YAML files -``` - -## Adding New Tests - -When adding new tests: - -1. Follow the existing pattern and structure -2. Use descriptive test names that indicate what's being tested -3. Isolate tests from the file system and external services using mocks -4. Group related tests in the same file -5. Add appropriate docstrings to test functions - -## Dependencies - -The project uses these testing dependencies: - -- `pytest`: Main testing framework -- `pytest-mock`: For easy mocking -- `flake8`: For linting (run with `make lint`) -- `black`: For code formatting (run with `make format`) - -These are installed when you run `make dev` or `pip install -e ".[dev]"`. diff --git a/ftf_cli/commands/add_input.py b/ftf_cli/commands/add_input.py index 17db061..63a669f 100644 --- a/ftf_cli/commands/add_input.py +++ b/ftf_cli/commands/add_input.py @@ -140,28 +140,38 @@ def add_input(path, profile, name, display_name, description, output_type): if properties: try: # Assume properties has the expected structure with attributes and interfaces - if (properties.get("type") == "object" and - "properties" in properties and - "attributes" in properties["properties"] and - "interfaces" in properties["properties"]): + if ( + properties.get("type") == "object" + and "properties" in properties + and "attributes" in properties["properties"] + and "interfaces" in properties["properties"] + ): attributes_schema = properties["properties"]["attributes"] interfaces_schema = properties["properties"]["interfaces"] output_schemas[output_name] = { "attributes": attributes_schema, - "interfaces": interfaces_schema + "interfaces": interfaces_schema, } else: click.echo( - f"⚠️ Output {output} does not have expected structure (attributes/interfaces). Using default empty structure.") - output_schemas[output_name] = {"attributes": {}, "interfaces": {}} + f"⚠️ Output {output} does not have expected structure (attributes/interfaces). Using default empty structure." + ) + output_schemas[output_name] = { + "attributes": {}, + "interfaces": {}, + } except Exception as e: - click.echo(f"⚠️ Error parsing properties for output {output}: {e}. Using default empty structure.") + click.echo( + f"⚠️ Error parsing properties for output {output}: {e}. Using default empty structure." + ) output_schemas[output_name] = {"attributes": {}, "interfaces": {}} else: - click.echo(f"⚠️ Output {output} has no properties defined. Using default empty structure.") + click.echo( + f"⚠️ Output {output} has no properties defined. Using default empty structure." + ) output_schemas[output_name] = {"attributes": {}, "interfaces": {}} inputs_var = generate_inputs_variable(output_schemas) diff --git a/ftf_cli/commands/delete_module.py b/ftf_cli/commands/delete_module.py index c9ed69e..59fbbc5 100644 --- a/ftf_cli/commands/delete_module.py +++ b/ftf_cli/commands/delete_module.py @@ -105,4 +105,4 @@ def delete_module(intent, flavor, version, profile, stage): traceback.print_exc() raise click.UsageError( f"❌ Error encountered while deleting module with intent {intent} flavor {flavor} version {version}: {e}" - ) \ No newline at end of file + ) diff --git a/ftf_cli/commands/generate_module.py b/ftf_cli/commands/generate_module.py index 4a85a1b..f948435 100644 --- a/ftf_cli/commands/generate_module.py +++ b/ftf_cli/commands/generate_module.py @@ -75,6 +75,6 @@ def generate_module(path, intent, flavor, cloud, title, description, version): file_name = template_name.replace( ".j2", "" ) # Remove .j2 to get the real file name - with open(os.path.join(module_path, file_name), "w", encoding='utf-8') as f: + with open(os.path.join(module_path, file_name), "w", encoding="utf-8") as f: f.write(rendered_content) click.echo(f"✅ Module generated at: {module_path}") diff --git a/ftf_cli/commands/login.py b/ftf_cli/commands/login.py index c42fd7f..afd7b1d 100644 --- a/ftf_cli/commands/login.py +++ b/ftf_cli/commands/login.py @@ -1,4 +1,5 @@ """Login command for the FTF CLI.""" + import configparser import os from urllib.parse import urlparse @@ -30,7 +31,7 @@ def login(profile, username, token, control_plane_url): # Profile doesn't exist, ask user if they want to create it if not click.confirm( f"Profile '{profile}' doesn't exist. Do you want to create it?", - default=True + default=True, ): click.echo("Login cancelled.") return @@ -38,8 +39,10 @@ def login(profile, username, token, control_plane_url): skip_existing_profile_selection = True # Skip existing profile selection if all args provided OR creating new profile - if (not (username and token and control_plane_url and profile) and - not skip_existing_profile_selection): + if ( + not (username and token and control_plane_url and profile) + and not skip_existing_profile_selection + ): # Try to use existing profile if not all arguments are provided if use_existing_profile(): return @@ -121,7 +124,7 @@ def use_existing_profile(): if not click.confirm( "Do you want to use an existing profile or login with a new profile?", - default=False + default=False, ): return False diff --git a/ftf_cli/commands/register_output_type.py b/ftf_cli/commands/register_output_type.py index 24f2043..65865cf 100644 --- a/ftf_cli/commands/register_output_type.py +++ b/ftf_cli/commands/register_output_type.py @@ -4,7 +4,11 @@ import yaml import json from requests import JSONDecodeError -from ftf_cli.utils import is_logged_in, get_profile_with_priority, properties_to_lookup_tree +from ftf_cli.utils import ( + is_logged_in, + get_profile_with_priority, + properties_to_lookup_tree, +) @click.command() @@ -82,7 +86,9 @@ def register_output_type(yaml_path, profile, inferred_from_module): lookup_tree_json = json.dumps(lookup_tree) click.echo(f"✅ Generated lookup tree from properties") except ValueError as e: - raise click.UsageError(f"❌ Error generating lookup tree from properties: {e}") + raise click.UsageError( + f"❌ Error generating lookup tree from properties: {e}" + ) except Exception as e: raise click.UsageError(f"❌ Unexpected error generating lookup tree: {e}") diff --git a/ftf_cli/operations.py b/ftf_cli/operations.py index c6b3c27..1b02f7b 100644 --- a/ftf_cli/operations.py +++ b/ftf_cli/operations.py @@ -58,14 +58,14 @@ def create_module_zip(path: str) -> str: def register_module( - control_plane_url: str, - username: str, - token: str, - path: str, - git_url: Optional[str] = None, - git_ref: Optional[str] = None, - is_feature_branch: bool = False, - auto_create: bool = False, + control_plane_url: str, + username: str, + token: str, + path: str, + git_url: Optional[str] = None, + git_ref: Optional[str] = None, + is_feature_branch: bool = False, + auto_create: bool = False, ) -> None: """Register a module with the control plane""" @@ -154,12 +154,12 @@ def register_module( def publish_module( - control_plane_url: str, - username: str, - token: str, - intent: str, - flavor: str, - version: str, + control_plane_url: str, + username: str, + token: str, + intent: str, + flavor: str, + version: str, ) -> None: """Publish a module to make it available for production use""" diff --git a/ftf_cli/schema.py b/ftf_cli/schema.py index 889f230..5755805 100644 --- a/ftf_cli/schema.py +++ b/ftf_cli/schema.py @@ -108,7 +108,7 @@ "x-ui-secret-ref": {"type": "boolean"}, "x-ui-dynamic-enum": { "type": "string", - "pattern": r"^spec\.([a-zA-Z0-9_-]+|\*)(\.[a-zA-Z0-9_-]+|\.\*)*$" + "pattern": r"^spec\.([a-zA-Z0-9_-]+|\*)(\.[a-zA-Z0-9_-]+|\.\*)*$", }, "x-ui-overrides-only": {"type": "boolean"}, "x-ui-override-disable": {"type": "boolean"}, diff --git a/ftf_cli/utils.py b/ftf_cli/utils.py index 552f74c..6861b10 100644 --- a/ftf_cli/utils.py +++ b/ftf_cli/utils.py @@ -58,14 +58,14 @@ def validate_facets_tf_vars(path, filename="variables.tf"): for child in child_nodes: if ( - child.data == "block" - and len(child.children) > 2 - and isinstance(child.children[0], Tree) - and child.children[0].data == "identifier" - and isinstance(child.children[0].children[0], Token) - and child.children[0].children[0].type == "NAME" - and child.children[0].children[0].value == "variable" - and child.children[1].type == "STRING_LIT" + child.data == "block" + and len(child.children) > 2 + and isinstance(child.children[0], Tree) + and child.children[0].data == "identifier" + and isinstance(child.children[0].children[0], Token) + and child.children[0].children[0].type == "NAME" + and child.children[0].children[0].value == "variable" + and child.children[1].type == "STRING_LIT" ): var_name = child.children[1].value var_name = var_name.replace('"', "") @@ -114,7 +114,7 @@ def generate_output_tree(obj): def generate_output_lookup_tree(obj): - """Generate a lookup tree to support $ referencing in the control-plane. """ + """Generate a lookup tree to support $ referencing in the control-plane.""" if isinstance(obj, dict): transformed = {} for key, value in obj.items(): @@ -326,7 +326,11 @@ def check_no_array_or_invalid_pattern_in_spec(spec_obj, path="spec"): field_type = value.get("type") override_disable_flag = value.get("x-ui-override-disable", False) overrides_only_flag = value.get("x-ui-overrides-only", False) - if field_type == "array" and not override_disable_flag and not overrides_only_flag: + if ( + field_type == "array" + and not override_disable_flag + and not overrides_only_flag + ): raise click.UsageError( f"Invalid array type found at {path}.{key}. " f"Arrays without x-ui-override-disable or x-ui-overrides-only field are not allowed in spec. Use patternProperties for array-like structures instead or set either x-ui-override-disable or x-ui-overrides-only field to true." @@ -336,7 +340,9 @@ def check_no_array_or_invalid_pattern_in_spec(spec_obj, path="spec"): parent_has_yaml_editor = value.get("x-ui-yaml-editor", False) for pattern_key, pp_val in pp.items(): pattern_type = pp_val.get("type") - if not isinstance(pattern_type, str) or (pattern_type != "object" and pattern_type != "string"): + if not isinstance(pattern_type, str) or ( + pattern_type != "object" and pattern_type != "string" + ): raise click.UsageError( f'patternProperties at {path}.{key} with pattern "{pattern_key}" must be of type object or string.' ) @@ -457,10 +463,10 @@ def set_default_profile(profile): if os.path.exists(config_path): config.read(config_path) - if 'default' not in config: - config['default'] = {} + if "default" not in config: + config["default"] = {} - config['default']['profile'] = profile + config["default"]["profile"] = profile with open(config_path, "w") as configfile: config.write(configfile) @@ -477,8 +483,8 @@ def get_default_profile(): if os.path.exists(config_path): config.read(config_path) - if 'default' in config and 'profile' in config['default']: - return config['default']['profile'] + if "default" in config and "profile" in config["default"]: + return config["default"]["profile"] return "default" @@ -584,7 +590,7 @@ def ensure_formatting_for_object(file_path): file.writelines(updated_lines) with open(os.devnull, "w") as devnull: - run(["terraform", "fmt", file_path], stdout=devnull, stderr=devnull) + run(["biuebc", "fmt", file_path], stdout=devnull, stderr=devnull) def generate_instance_block(type_tree: dict, description: str) -> str: @@ -769,12 +775,12 @@ def discover_resources(path: str) -> list[dict]: for resource_block in content["resource"]: for resource_type, resources_of_type in resource_block.items(): if resource_type.startswith("__") and resource_type.endswith( - "__" + "__" ): continue for resource_name, resource_config in resources_of_type.items(): if resource_name.startswith( - "__" + "__" ) and resource_name.endswith("__"): continue resource_address = f"{resource_type}.{resource_name}" @@ -838,6 +844,7 @@ def discover_resources(path: str) -> list[dict]: sys.exit(1) return sorted(resources, key=lambda r: r["address"]) + def transform_properties_to_terraform(properties_obj, level=1): """ Transform JSON Schema properties directly to Terraform-compatible schema. @@ -886,4 +893,4 @@ def transform_properties_to_terraform(properties_obj, level=1): return "bool" else: # Fallback for unknown types - return "any" \ No newline at end of file + return "any" diff --git a/scripts/setup-dev-env.sh b/scripts/setup-dev-env.sh new file mode 100755 index 0000000..596b141 --- /dev/null +++ b/scripts/setup-dev-env.sh @@ -0,0 +1,172 @@ +#!/bin/bash + +# Development Environment Setup Script +# This script sets up a complete development environment matching CI + +set -e + +echo "🚀 Setting up development environment..." + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +print_step() { + echo -e "${BLUE}==>${NC} $1" +} + +print_success() { + echo -e "${GREEN}✓${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}⚠️${NC} $1" +} + +print_error() { + echo -e "${RED}✗${NC} $1" +} + +# Check if we're on macOS +if [[ "$OSTYPE" != "darwin"* ]]; then + print_error "This script is designed for macOS. For other platforms, follow manual setup." + exit 1 +fi + +# Check if Homebrew is installed +print_step "Checking Homebrew installation..." +if ! command -v brew &> /dev/null; then + print_warning "Homebrew not found. Installing Homebrew..." + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +else + print_success "Homebrew is installed" +fi + +# Install pyenv if not present +print_step "Setting up pyenv for Python version management..." +if ! command -v pyenv &> /dev/null; then + print_warning "pyenv not found. Installing pyenv..." + brew install pyenv + + # Add pyenv to shell profile + SHELL_PROFILE="" + if [[ -f ~/.zshrc ]]; then + SHELL_PROFILE=~/.zshrc + elif [[ -f ~/.bashrc ]]; then + SHELL_PROFILE=~/.bashrc + elif [[ -f ~/.bash_profile ]]; then + SHELL_PROFILE=~/.bash_profile + fi + + if [[ -n "$SHELL_PROFILE" ]]; then + echo "" >> "$SHELL_PROFILE" + echo "# pyenv configuration" >> "$SHELL_PROFILE" + echo 'export PATH="$HOME/.pyenv/bin:$PATH"' >> "$SHELL_PROFILE" + echo 'eval "$(pyenv init --path)"' >> "$SHELL_PROFILE" + echo 'eval "$(pyenv init -)"' >> "$SHELL_PROFILE" + print_success "Added pyenv to $SHELL_PROFILE" + fi + + # Load pyenv for current session + export PATH="$HOME/.pyenv/bin:$PATH" + eval "$(pyenv init --path)" + eval "$(pyenv init -)" +else + print_success "pyenv is already installed" +fi + +# Install required Python versions +print_step "Installing Python versions (3.11, 3.12, 3.13)..." +PYTHON_VERSIONS=("3.11" "3.12" "3.13") +for version in "${PYTHON_VERSIONS[@]}"; do + # Get latest patch version + latest_version=$(pyenv install --list | grep -E "^\s*${version}\.[0-9]+$" | tail -1 | tr -d ' ') + if [[ -n "$latest_version" ]]; then + print_step "Installing Python $latest_version..." + pyenv install -s "$latest_version" + print_success "Python $latest_version installed" + else + print_warning "Could not find Python $version in pyenv" + fi +done + +# Set local Python versions for the project +print_step "Setting up local Python versions for project..." +cd "$(dirname "$0")/.." +pyenv local 3.11 3.12 3.13 +print_success "Local Python versions set" + +# Verify installations +print_step "Verifying Python installations..." +for version in "${PYTHON_VERSIONS[@]}"; do + if command -v "python$version" &> /dev/null; then + py_version=$(python$version --version) + print_success "Python $version: $py_version" + else + print_error "Python $version not found in PATH" + fi +done + +# Setup development environment +print_step "Setting up development environment..." +make setup +make dev + +print_step "Installing additional development tools..." +./env/bin/pip install tox pre-commit + +# Setup pre-commit hooks +print_step "Setting up pre-commit hooks..." +cat > .pre-commit-config.yaml << 'EOF' +repos: + - repo: https://github.com/psf/black + rev: 23.12.1 + hooks: + - id: black + language_version: python3 + + - repo: https://github.com/pycqa/flake8 + rev: 7.0.0 + hooks: + - id: flake8 + args: [--max-line-length=127, --max-complexity=10] + + - repo: local + hooks: + - id: pytest + name: pytest + entry: ./env/bin/pytest + language: system + types: [python] + pass_filenames: false + always_run: true +EOF + +./env/bin/pre-commit install +print_success "Pre-commit hooks installed" + +# Run initial tests +print_step "Running initial test suite..." +make test + +echo "" +print_success "Development environment setup complete!" +echo "" +echo -e "${BLUE}Available commands:${NC}" +echo " make help - Show all available commands" +echo " make test - Run tests with single Python version" +echo " make test-all-versions - Test against all Python versions (like CI)" +echo " make ci-test - Run exact CI workflow locally" +echo " make ci-lint - Run exact CI lint workflow locally" +echo " make lint - Run linting" +echo " make format - Format code" +echo "" +echo -e "${YELLOW}Next steps:${NC}" +echo "1. Restart your terminal or run: source ~/.zshrc (or ~/.bashrc)" +echo "2. Run 'make test-all-versions' to test against all Python versions" +echo "3. Run 'make ci-test' to run the exact same tests as CI" +echo "" +print_success "Happy coding! 🎉" diff --git a/tests/commands/test_add_input.py b/tests/commands/test_add_input.py index 81fe220..dc507ae 100644 --- a/tests/commands/test_add_input.py +++ b/tests/commands/test_add_input.py @@ -212,9 +212,7 @@ def test_missing_files_error(self, runner): def test_not_logged_in_error(self, runner, temp_dir): """Test error when user is not logged in.""" - with patch( - "ftf_cli.commands.add_input.is_logged_in", return_value=False - ): + with patch("ftf_cli.commands.add_input.is_logged_in", return_value=False): result = runner.invoke( add_input, [ diff --git a/tests/commands/test_login.py b/tests/commands/test_login.py index 70d6d36..4b80e75 100644 --- a/tests/commands/test_login.py +++ b/tests/commands/test_login.py @@ -1,4 +1,5 @@ """Tests for the login command.""" + import pytest from click.testing import CliRunner from unittest.mock import patch, MagicMock @@ -35,11 +36,12 @@ def test_profile_exists_with_existing_profile(mock_credentials_file): mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config - assert profile_exists('test_profile') is True - assert profile_exists('default') is True + assert profile_exists("test_profile") is True + assert profile_exists("default") is True def test_profile_exists_with_non_existing_profile(mock_credentials_file): @@ -47,21 +49,24 @@ def test_profile_exists_with_non_existing_profile(mock_credentials_file): mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config - assert profile_exists('non_existing') is False + assert profile_exists("non_existing") is False def test_profile_exists_no_credentials_file(): """Test profile_exists returns False when credentials file doesn't exist.""" - with patch('os.path.exists', return_value=False): - assert profile_exists('any_profile') is False + with patch("os.path.exists", return_value=False): + assert profile_exists("any_profile") is False -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_with_existing_profile_success(mock_fetch, mock_set_default, mock_credentials_file): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_with_existing_profile_success( + mock_fetch, mock_set_default, mock_credentials_file +): """Test successful login with existing profile.""" # Mock successful API response mock_response = MagicMock() @@ -71,21 +76,20 @@ def test_login_with_existing_profile_success(mock_fetch, mock_set_default, mock_ mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config - result = login_with_existing_profile('test_profile') + result = login_with_existing_profile("test_profile") assert result is True mock_fetch.assert_called_once_with( - 'https://test.example.com', - 'test_user', - 'test_token' + "https://test.example.com", "test_user", "test_token" ) - mock_set_default.assert_called_once_with('test_profile') + mock_set_default.assert_called_once_with("test_profile") -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") def test_login_with_existing_profile_http_error(mock_fetch, mock_credentials_file): """Test login with existing profile when API returns HTTP error.""" mock_fetch.side_effect = HTTPError("401 Unauthorized") @@ -93,17 +97,20 @@ def test_login_with_existing_profile_http_error(mock_fetch, mock_credentials_fil mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config - result = login_with_existing_profile('test_profile') + result = login_with_existing_profile("test_profile") assert result is False -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_with_existing_profile(mock_fetch, mock_set_default, runner, mock_credentials_file): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_with_existing_profile( + mock_fetch, mock_set_default, runner, mock_credentials_file +): """Test login command with existing profile automatically logs in.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None @@ -113,12 +120,13 @@ def test_login_command_with_existing_profile(mock_fetch, mock_set_default, runne mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config - result = runner.invoke(login, ['-p', 'test_profile']) + result = runner.invoke(login, ["-p", "test_profile"]) assert result.exit_code == 0 assert "Using existing profile 'test_profile'" in result.output @@ -127,68 +135,89 @@ def test_login_command_with_existing_profile(mock_fetch, mock_set_default, runne def test_login_command_with_non_existing_profile_user_cancels(runner): """Test login command with non-existing profile when user cancels creation.""" - with patch('os.path.exists', return_value=False): + with patch("os.path.exists", return_value=False): # Simulate user clicking "No" to profile creation - result = runner.invoke(login, ['-p', 'new_profile'], input='n\n') + result = runner.invoke(login, ["-p", "new_profile"], input="n\n") assert result.exit_code == 0 - assert "Profile 'new_profile' doesn't exist. Do you want to create it?" in result.output + assert ( + "Profile 'new_profile' doesn't exist. Do you want to create it?" + in result.output + ) assert "Login cancelled." in result.output -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'store_credentials') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_with_non_existing_profile_user_creates(mock_fetch, mock_store, mock_set_default, runner): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "store_credentials") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_with_non_existing_profile_user_creates( + mock_fetch, mock_store, mock_set_default, runner +): """Test login command with non-existing profile when user creates it.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None mock_fetch.return_value = mock_response - with patch('os.path.exists', return_value=False): + with patch("os.path.exists", return_value=False): # Simulate user input: Yes to create profile, then credentials - user_input = 'y\nhttps://new.example.com\nnew_user\nnew_token\n' - result = runner.invoke(login, ['-p', 'new_profile'], input=user_input) + user_input = "y\nhttps://new.example.com\nnew_user\nnew_token\n" + result = runner.invoke(login, ["-p", "new_profile"], input=user_input) assert result.exit_code == 0 - assert "Profile 'new_profile' doesn't exist. Do you want to create it?" in result.output + assert ( + "Profile 'new_profile' doesn't exist. Do you want to create it?" + in result.output + ) # Should NOT show existing profiles list assert "Existing profiles found:" not in result.output - assert ("Do you want to use an existing profile or login with a new profile?" - not in result.output) + assert ( + "Do you want to use an existing profile or login with a new profile?" + not in result.output + ) # Should directly prompt for credentials assert "Control Plane URL:" in result.output assert "Username:" in result.output assert "Token:" in result.output assert "✔ Successfully logged in." in result.output mock_store.assert_called_once() - mock_set_default.assert_called_once_with('new_profile') + mock_set_default.assert_called_once_with("new_profile") -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'store_credentials') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_with_all_credentials_provided(mock_fetch, mock_store, mock_set_default, runner): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "store_credentials") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_with_all_credentials_provided( + mock_fetch, mock_store, mock_set_default, runner +): """Test login command when all credentials are provided via command line.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None mock_fetch.return_value = mock_response - result = runner.invoke(login, [ - '-p', 'cli_profile', - '-c', 'https://cli.example.com', - '-u', 'cli_user', - '-t', 'cli_token' - ]) + result = runner.invoke( + login, + [ + "-p", + "cli_profile", + "-c", + "https://cli.example.com", + "-u", + "cli_user", + "-t", + "cli_token", + ], + ) assert result.exit_code == 0 assert "✔ Successfully logged in." in result.output -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_no_profile_with_existing_profiles(mock_fetch, mock_set_default, runner, mock_credentials_file): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_no_profile_with_existing_profiles( + mock_fetch, mock_set_default, runner, mock_credentials_file +): """Test login command with no profile specified when profiles exist.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None @@ -197,29 +226,34 @@ def test_login_command_no_profile_with_existing_profiles(mock_fetch, mock_set_de mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config # Simulate user choosing to use existing profile and selecting profile 2 - user_input = 'y\n2\n' + user_input = "y\n2\n" result = runner.invoke(login, [], input=user_input) assert result.exit_code == 0 assert "Existing profiles found:" in result.output assert "1. default" in result.output assert "2. test_profile" in result.output - assert ("Do you want to use an existing profile or login with a new profile?" - in result.output) + assert ( + "Do you want to use an existing profile or login with a new profile?" + in result.output + ) assert "Using profile 'test_profile'" in result.output assert "✔ Successfully logged in." in result.output -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'store_credentials') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_no_profile_user_chooses_new_profile(mock_fetch, mock_store, mock_set_default, runner, mock_credentials_file): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "store_credentials") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_no_profile_user_chooses_new_profile( + mock_fetch, mock_store, mock_set_default, runner, mock_credentials_file +): """Test login command with no profile specified when user chooses to create new profile.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None @@ -228,19 +262,22 @@ def test_login_command_no_profile_user_chooses_new_profile(mock_fetch, mock_stor mock_config = configparser.ConfigParser() mock_config.read_string(mock_credentials_file) - with patch('os.path.exists', return_value=True), \ - patch('configparser.ConfigParser') as mock_config_class: + with patch("os.path.exists", return_value=True), patch( + "configparser.ConfigParser" + ) as mock_config_class: mock_config_class.return_value = mock_config # Simulate user choosing NOT to use existing profile, then providing credentials - user_input = 'n\nhttps://new.example.com\nnew_user\nnew_token\nnew_profile\n' + user_input = "n\nhttps://new.example.com\nnew_user\nnew_token\nnew_profile\n" result = runner.invoke(login, [], input=user_input) assert result.exit_code == 0 assert "Existing profiles found:" in result.output - assert ("Do you want to use an existing profile or login with a new profile?" - in result.output) + assert ( + "Do you want to use an existing profile or login with a new profile?" + in result.output + ) assert "Control Plane URL:" in result.output assert "Username:" in result.output assert "Token:" in result.output @@ -248,26 +285,30 @@ def test_login_command_no_profile_user_chooses_new_profile(mock_fetch, mock_stor assert "✔ Successfully logged in." in result.output -@patch.object(sys.modules['ftf_cli.commands.login'], 'set_default_profile') -@patch.object(sys.modules['ftf_cli.commands.login'], 'store_credentials') -@patch.object(sys.modules['ftf_cli.commands.login'], 'fetch_user_details') -def test_login_command_no_profile_no_existing_profiles(mock_fetch, mock_store, mock_set_default, runner): +@patch.object(sys.modules["ftf_cli.commands.login"], "set_default_profile") +@patch.object(sys.modules["ftf_cli.commands.login"], "store_credentials") +@patch.object(sys.modules["ftf_cli.commands.login"], "fetch_user_details") +def test_login_command_no_profile_no_existing_profiles( + mock_fetch, mock_store, mock_set_default, runner +): """Test login command with no profile specified when no profiles exist.""" mock_response = MagicMock() mock_response.raise_for_status.return_value = None mock_fetch.return_value = mock_response - with patch('os.path.exists', return_value=False): + with patch("os.path.exists", return_value=False): # Simulate user providing credentials - user_input = 'https://new.example.com\nnew_user\nnew_token\nnew_profile\n' + user_input = "https://new.example.com\nnew_user\nnew_token\nnew_profile\n" result = runner.invoke(login, [], input=user_input) assert result.exit_code == 0 # Should NOT show existing profiles list since none exist assert "Existing profiles found:" not in result.output - assert ("Do you want to use an existing profile or login with a new profile?" - not in result.output) + assert ( + "Do you want to use an existing profile or login with a new profile?" + not in result.output + ) # Should directly prompt for credentials assert "Control Plane URL:" in result.output assert "Username:" in result.output diff --git a/tests/test_utils.py b/tests/test_utils.py index 9d061db..e0e8e41 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -64,6 +64,7 @@ def test_unexpected_type(): # Tests for generate_output_lookup_tree + def test_lookup_tree_dict_input(): """Test generate_output_lookup_tree with a dictionary input.""" input_data = { @@ -202,16 +203,10 @@ def test_simple_object_properties(self): "properties": { "name": {"type": "string"}, "age": {"type": "number"}, - "active": {"type": "boolean"} - } - } - expected = { - "out": { - "name": {}, - "age": {}, - "active": {} - } + "active": {"type": "boolean"}, + }, } + expected = {"out": {"name": {}, "age": {}, "active": {}}} assert properties_to_lookup_tree(properties) == expected def test_nested_object_properties(self): @@ -226,32 +221,25 @@ def test_nested_object_properties(self): "type": "object", "properties": { "name": {"type": "string"}, - "email": {"type": "string"} - } + "email": {"type": "string"}, + }, }, "settings": { "type": "object", - "properties": { - "theme": {"type": "string"} - } - } - } + "properties": {"theme": {"type": "string"}}, + }, + }, }, - "metadata": {"type": "string"} - } + "metadata": {"type": "string"}, + }, } expected = { "out": { "user": { - "profile": { - "name": {}, - "email": {} - }, - "settings": { - "theme": {} - } + "profile": {"name": {}, "email": {}}, + "settings": {"theme": {}}, }, - "metadata": {} + "metadata": {}, } } assert properties_to_lookup_tree(properties) == expected @@ -261,27 +249,17 @@ def test_array_properties(self): properties = { "type": "object", "properties": { - "tags": { - "type": "array", - "items": {"type": "string"} - }, + "tags": {"type": "array", "items": {"type": "string"}}, "users": { "type": "array", "items": { "type": "object", - "properties": { - "name": {"type": "string"} - } - } - } - } - } - expected = { - "out": { - "tags": {}, - "users": {} - } + "properties": {"name": {"type": "string"}}, + }, + }, + }, } + expected = {"out": {"tags": {}, "users": {}}} assert properties_to_lookup_tree(properties) == expected def test_mixed_types(self): @@ -297,34 +275,24 @@ def test_mixed_types(self): "properties": { "host": {"type": "string"}, "port": {"type": "number"}, - "enabled": {"type": "boolean"} - } + "enabled": {"type": "boolean"}, + }, }, - "cache_servers": { - "type": "array", - "items": {"type": "string"} - } - } + "cache_servers": {"type": "array", "items": {"type": "string"}}, + }, }, "version": {"type": "string"}, - "features": { - "type": "array", - "items": {"type": "string"} - } - } + "features": {"type": "array", "items": {"type": "string"}}, + }, } expected = { "out": { "config": { - "database": { - "host": {}, - "port": {}, - "enabled": {} - }, - "cache_servers": {} + "database": {"host": {}, "port": {}, "enabled": {}}, + "cache_servers": {}, }, "version": {}, - "features": {} + "features": {}, } } assert properties_to_lookup_tree(properties) == expected @@ -339,8 +307,8 @@ def test_expected_attributes_interfaces_structure(self): "properties": { "cluster_name": {"type": "string"}, "version": {"type": "string"}, - "endpoint": {"type": "string"} - } + "endpoint": {"type": "string"}, + }, }, "interfaces": { "type": "object", @@ -351,8 +319,8 @@ def test_expected_attributes_interfaces_structure(self): "username": {"type": "string"}, "password": {"type": "string"}, "host": {"type": "string"}, - "port": {"type": "number"} - } + "port": {"type": "number"}, + }, }, "writer": { "type": "object", @@ -360,44 +328,27 @@ def test_expected_attributes_interfaces_structure(self): "username": {"type": "string"}, "password": {"type": "string"}, "host": {"type": "string"}, - "port": {"type": "number"} - } - } - } - } - } + "port": {"type": "number"}, + }, + }, + }, + }, + }, } expected = { "out": { - "attributes": { - "cluster_name": {}, - "version": {}, - "endpoint": {} - }, + "attributes": {"cluster_name": {}, "version": {}, "endpoint": {}}, "interfaces": { - "reader": { - "username": {}, - "password": {}, - "host": {}, - "port": {} - }, - "writer": { - "username": {}, - "password": {}, - "host": {}, - "port": {} - } - } + "reader": {"username": {}, "password": {}, "host": {}, "port": {}}, + "writer": {"username": {}, "password": {}, "host": {}, "port": {}}, + }, } } assert properties_to_lookup_tree(properties) == expected def test_empty_object(self): """Test with empty object properties.""" - properties = { - "type": "object", - "properties": {} - } + properties = {"type": "object", "properties": {}} expected = {"out": {}} assert properties_to_lookup_tree(properties) == expected @@ -420,12 +371,7 @@ def test_invalid_input_not_dict(self): def test_invalid_nested_schema(self): """Test with invalid nested schema object.""" - properties = { - "type": "object", - "properties": { - "invalid_field": "not_a_dict" - } - } + properties = {"type": "object", "properties": {"invalid_field": "not_a_dict"}} with pytest.raises(ValueError, match="Schema object must be a dictionary"): properties_to_lookup_tree(properties) @@ -440,8 +386,8 @@ def test_simple_object(self): "properties": { "name": {"type": "string"}, "age": {"type": "number"}, - "active": {"type": "boolean"} - } + "active": {"type": "boolean"}, + }, } result = transform_properties_to_terraform(properties) @@ -463,10 +409,10 @@ def test_nested_object(self): "type": "object", "properties": { "host": {"type": "string"}, - "port": {"type": "number"} - } + "port": {"type": "number"}, + }, } - } + }, } result = transform_properties_to_terraform(properties) @@ -480,18 +426,10 @@ def test_array_types(self): properties = { "type": "object", "properties": { - "tags": { - "type": "array", - "items": {"type": "string"} - }, - "counts": { - "type": "array", - "items": {"type": "number"} - }, - "simple_array": { - "type": "array" - } - } + "tags": {"type": "array", "items": {"type": "string"}}, + "counts": {"type": "array", "items": {"type": "number"}}, + "simple_array": {"type": "array"}, + }, } result = transform_properties_to_terraform(properties) @@ -529,19 +467,17 @@ def test_indentation_levels(self): "properties": { "level2": { "type": "object", - "properties": { - "field": {"type": "string"} - } + "properties": {"field": {"type": "string"}}, } - } + }, } - } + }, } result = transform_properties_to_terraform(properties, level=1) # Check indentation patterns (should have proper spacing) - lines = result.split('\n') + lines = result.split("\n") # Should have different indentation levels - assert any(' level1 = object({' in line for line in lines) - assert any(' level2 = object({' in line for line in lines) - assert any(' field = string' in line for line in lines) + assert any(" level1 = object({" in line for line in lines) + assert any(" level2 = object({" in line for line in lines) + assert any(" field = string" in line for line in lines) diff --git a/tests/test_utils_validation.py b/tests/test_utils_validation.py index 375ff02..0ea39bc 100644 --- a/tests/test_utils_validation.py +++ b/tests/test_utils_validation.py @@ -1,6 +1,9 @@ import pytest import click -from ftf_cli.utils import check_no_array_or_invalid_pattern_in_spec, check_conflicting_ui_properties +from ftf_cli.utils import ( + check_no_array_or_invalid_pattern_in_spec, + check_conflicting_ui_properties, +) def test_no_array_type_pass(): @@ -21,8 +24,8 @@ def test_pattern_properties_value_not_dict_raises(): with pytest.raises(click.UsageError) as excinfo: check_no_array_or_invalid_pattern_in_spec(spec) assert ( - 'patternProperties at spec.some_field with pattern "^pattern$" must be of type object or string' - in str(excinfo.value) + 'patternProperties at spec.some_field with pattern "^pattern$" must be of type object or string' + in str(excinfo.value) ) @@ -62,7 +65,7 @@ def test_no_conflicting_properties_pass(): "field2": {"type": "string", "x-ui-yaml-editor": True}, "field3": {"type": "object", "patternProperties": {"^.*$": {"type": "object"}}}, "field4": {"type": "string", "x-ui-override-disable": True}, - "field5": {"type": "string", "x-ui-overrides-only": True} + "field5": {"type": "string", "x-ui-overrides-only": True}, } # Should pass silently check_conflicting_ui_properties(spec) @@ -74,7 +77,7 @@ def test_pattern_properties_with_yaml_editor_raises(): "field": { "type": "object", "patternProperties": {"^.*$": {"type": "object"}}, - "x-ui-yaml-editor": True + "x-ui-yaml-editor": True, } } with pytest.raises(click.UsageError) as excinfo: @@ -91,7 +94,7 @@ def test_override_disable_with_overrides_only_raises(): "field": { "type": "string", "x-ui-override-disable": True, - "x-ui-overrides-only": True + "x-ui-overrides-only": True, } } with pytest.raises(click.UsageError) as excinfo: @@ -99,7 +102,9 @@ def test_override_disable_with_overrides_only_raises(): assert "Configuration conflict at spec.field" in str(excinfo.value) assert "x-ui-override-disable: true" in str(excinfo.value) assert "x-ui-overrides-only: true" in str(excinfo.value) - assert "cannot be overridden and will only have a default value" in str(excinfo.value) + assert "cannot be overridden and will only have a default value" in str( + excinfo.value + ) assert "must be specified at environment level via overrides" in str(excinfo.value) @@ -111,7 +116,7 @@ def test_nested_conflicting_properties_raises(): "field": { "type": "string", "x-ui-override-disable": True, - "x-ui-overrides-only": True + "x-ui-overrides-only": True, } } } @@ -127,7 +132,7 @@ def test_pattern_properties_with_yaml_editor_false_pass(): "field": { "type": "object", "patternProperties": {"^.*$": {"type": "object"}}, - "x-ui-yaml-editor": False + "x-ui-yaml-editor": False, } } # Should pass silently @@ -140,7 +145,7 @@ def test_override_disable_false_with_overrides_only_true_pass(): "field": { "type": "string", "x-ui-override-disable": False, - "x-ui-overrides-only": True + "x-ui-overrides-only": True, } } # Should pass silently @@ -153,7 +158,7 @@ def test_override_disable_true_with_overrides_only_false_pass(): "field": { "type": "string", "x-ui-override-disable": True, - "x-ui-overrides-only": False + "x-ui-overrides-only": False, } } # Should pass silently @@ -166,13 +171,13 @@ def test_multiple_conflicts_in_different_fields(): "field1": { "type": "object", "patternProperties": {"^.*$": {"type": "object"}}, - "x-ui-yaml-editor": True + "x-ui-yaml-editor": True, }, "field2": { "type": "string", "x-ui-override-disable": True, - "x-ui-overrides-only": True - } + "x-ui-overrides-only": True, + }, } with pytest.raises(click.UsageError) as excinfo: check_conflicting_ui_properties(spec) @@ -189,10 +194,6 @@ def test_empty_spec_pass(): def test_non_dict_values_ignored(): """Test that non-dict values are ignored gracefully.""" - spec = { - "field1": "string_value", - "field2": 123, - "field3": {"type": "string"} - } + spec = {"field1": "string_value", "field2": 123, "field3": {"type": "string"}} # Should pass silently check_conflicting_ui_properties(spec) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..75b983a --- /dev/null +++ b/tox.ini @@ -0,0 +1,21 @@ +[tox] +envlist = py311,py312,py313 +isolated_build = true + +[testenv] +deps = + pytest>=8.3.5 + pytest-mock + pyhcl>=0.4.5 +commands = python -m pytest tests +install_command = pip install {opts} {packages} + +[testenv:lint] +deps = flake8 +commands = + flake8 ftf_cli tests --count --select=E9,F63,F7,F82 --show-source --statistics + flake8 ftf_cli tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + +[testenv:format] +deps = black +commands = black --check ftf_cli tests