diff --git a/CHANGELOG.md b/CHANGELOG.md index e4fbcdd..0eb5fbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added +- **Automatic Binary Download**: SDK now automatically downloads capiscio-core binary if not found + - Downloads from GitHub releases (defaults to v2.4.0) + - Platform detection for macOS (arm64/x86_64), Linux (arm64/x86_64), and Windows + - Binary caching in `~/.capiscio/bin/` directory + - Automatic executable permissions for Unix-like systems + - Fallback search order: `CAPISCIO_BINARY` env var → local development path → system PATH → cached binary → auto-download + +### Changed +- **Improved Process Management**: Enhanced error logging and binary discovery + ## [2.4.1] - 2026-02-08 ### Added diff --git a/README.md b/README.md index c022b58..e230639 100644 --- a/README.md +++ b/README.md @@ -547,10 +547,21 @@ if pm.is_running(): **Auto-Start Behavior:** - ✅ Automatically downloads `capiscio-core` binary if not found + - Downloads from GitHub releases (capiscio/capiscio-core) + - Supports macOS (arm64/x86_64), Linux (arm64/x86_64), and Windows + - Caches binary in `~/.capiscio/bin/` for reuse + - Sets executable permissions automatically on Unix-like systems - ✅ Starts on Unix socket by default (`~/.capiscio/rpc.sock`) - ✅ Handles server crashes and restarts - ✅ Cleans up on process exit +**Binary Search Order:** +1. `CAPISCIO_BINARY` environment variable (if set) +2. `capiscio-core/bin/capiscio` relative to SDK (development mode) +3. System PATH (`capiscio-core` command) +4. Previously downloaded binary in `~/.capiscio/bin/` +5. Auto-download from GitHub releases (latest compatible version) + ## How It Works ### 1. The Handshake diff --git a/capiscio_sdk/_rpc/process.py b/capiscio_sdk/_rpc/process.py index 72dfd95..0cf8263 100644 --- a/capiscio_sdk/_rpc/process.py +++ b/capiscio_sdk/_rpc/process.py @@ -1,17 +1,29 @@ """Process manager for the capiscio-core gRPC server.""" import atexit +import logging import os +import platform import shutil +import stat import subprocess import time from pathlib import Path -from typing import Optional +from typing import Optional, Tuple + +import httpx + +logger = logging.getLogger(__name__) # Default socket path DEFAULT_SOCKET_DIR = Path.home() / ".capiscio" DEFAULT_SOCKET_PATH = DEFAULT_SOCKET_DIR / "rpc.sock" +# Binary download configuration +CORE_VERSION = "2.4.0" +GITHUB_REPO = "capiscio/capiscio-core" +CACHE_DIR = DEFAULT_SOCKET_DIR / "bin" + class ProcessManager: """Manages the capiscio-core gRPC server process. @@ -72,8 +84,9 @@ def find_binary(self) -> Optional[Path]: Search order: 1. CAPISCIO_BINARY environment variable - 2. capiscio-core/bin/capiscio relative to SDK + 2. capiscio-core/bin/capiscio relative to SDK (development) 3. System PATH + 4. Downloaded binary in ~/.capiscio/bin/ """ # Check environment variable env_path = os.environ.get("CAPISCIO_BINARY") @@ -96,7 +109,85 @@ def find_binary(self) -> Optional[Path]: if which_result: return Path(which_result) + # Check previously downloaded binary + cached = self._get_cached_binary_path() + if cached.exists(): + return cached + return None + + @staticmethod + def _get_platform_info() -> Tuple[str, str]: + """Determine OS and architecture for binary download.""" + system = platform.system().lower() + machine = platform.machine().lower() + + if system == "darwin": + os_name = "darwin" + elif system == "linux": + os_name = "linux" + elif system == "windows": + os_name = "windows" + else: + raise RuntimeError(f"Unsupported operating system: {system}") + + if machine in ("x86_64", "amd64"): + arch_name = "amd64" + elif machine in ("arm64", "aarch64"): + arch_name = "arm64" + else: + raise RuntimeError(f"Unsupported architecture: {machine}") + + return os_name, arch_name + + @staticmethod + def _get_cached_binary_path() -> Path: + """Get the path where the downloaded binary would be cached.""" + os_name, arch_name = ProcessManager._get_platform_info() + ext = ".exe" if os_name == "windows" else "" + filename = f"capiscio-{os_name}-{arch_name}{ext}" + return CACHE_DIR / CORE_VERSION / filename + + def _download_binary(self) -> Path: + """Download the capiscio-core binary for the current platform. + + Downloads from GitHub releases to ~/.capiscio/bin//. + Returns the path to the executable. + """ + os_name, arch_name = self._get_platform_info() + target_path = self._get_cached_binary_path() + + if target_path.exists(): + return target_path + + ext = ".exe" if os_name == "windows" else "" + filename = f"capiscio-{os_name}-{arch_name}{ext}" + url = f"https://github.com/{GITHUB_REPO}/releases/download/v{CORE_VERSION}/{filename}" + + logger.info("Downloading capiscio-core v%s for %s/%s...", CORE_VERSION, os_name, arch_name) + + target_path.parent.mkdir(parents=True, exist_ok=True) + try: + with httpx.stream("GET", url, follow_redirects=True, timeout=60.0) as resp: + resp.raise_for_status() + with open(target_path, "wb") as f: + for chunk in resp.iter_bytes(chunk_size=8192): + f.write(chunk) + + # Make executable + st = os.stat(target_path) + os.chmod(target_path, st.st_mode | stat.S_IEXEC) + + logger.info("Installed capiscio-core v%s at %s", CORE_VERSION, target_path) + return target_path + + except Exception as e: + if target_path.exists(): + target_path.unlink() + raise RuntimeError( + f"Failed to download capiscio-core from {url}: {e}\n" + "You can also set CAPISCIO_BINARY to point to an existing binary." + ) from e def ensure_running( self, @@ -129,12 +220,7 @@ def ensure_running( # Find binary binary = self.find_binary() if binary is None: - raise RuntimeError( - "capiscio binary not found. Please either:\n" - " 1. Set CAPISCIO_BINARY environment variable\n" - " 2. Install capiscio-core and add to PATH\n" - " 3. Build capiscio-core locally" - ) + binary = self._download_binary() self._binary_path = binary # Set up socket path diff --git a/capiscio_sdk/badge_keeper.py b/capiscio_sdk/badge_keeper.py index 5b215d9..b328161 100644 --- a/capiscio_sdk/badge_keeper.py +++ b/capiscio_sdk/badge_keeper.py @@ -201,9 +201,13 @@ def _run_keeper(self) -> None: """Background thread that runs the keeper loop.""" try: # Initialize RPC client + # When rpc_address is None, CapiscioRPCClient auto-starts capiscio-core + # via ProcessManager (socket at ~/.capiscio/rpc.sock). + # Only pass an explicit address if one was configured. self._rpc_client = CapiscioRPCClient( - address=self.config.rpc_address or "unix:///tmp/capiscio.sock" + address=self.config.rpc_address, ) + self._rpc_client.connect() logger.debug("BadgeKeeper thread started, streaming events from core...") diff --git a/capiscio_sdk/connect.py b/capiscio_sdk/connect.py index 2722cbf..f98e73f 100644 --- a/capiscio_sdk/connect.py +++ b/capiscio_sdk/connect.py @@ -109,7 +109,9 @@ def _ensure_did_registered( } payload = {"did": did} if public_key_jwk: - payload["publicKey"] = public_key_jwk + # Server expects publicKey as a JSON string (Go *string), not a raw object. + # The string must contain a valid Ed25519 JWK per RFC-003. + payload["publicKey"] = json.dumps(public_key_jwk) if isinstance(public_key_jwk, dict) else public_key_jwk try: resp = httpx.patch(url, headers=headers, json=payload, timeout=30.0) @@ -220,6 +222,8 @@ def connect( keys_dir: Optional[Path] = None, auto_badge: bool = True, dev_mode: bool = False, + domain: Optional[str] = None, + agent_card: Optional[dict] = None, ) -> AgentIdentity: """ Connect to CapiscIO and get a fully-configured agent identity. @@ -239,6 +243,8 @@ def connect( keys_dir: Directory for keys (default: ~/.capiscio/keys/{agent_id}/) auto_badge: Whether to automatically request a badge dev_mode: Use self-signed badges (Trust Level 0) + domain: Agent domain for badge issuance (default: derived from server_url host) + agent_card: A2A Agent Card dict to store in the registry (displayed in dashboard) Returns: AgentIdentity with full credentials and methods @@ -256,6 +262,8 @@ def connect( keys_dir=keys_dir, auto_badge=auto_badge, dev_mode=dev_mode, + domain=domain, + agent_card=agent_card, ) return connector.connect() @@ -300,6 +308,8 @@ def __init__( keys_dir: Optional[Path], auto_badge: bool, dev_mode: bool, + domain: Optional[str] = None, + agent_card: Optional[dict] = None, ): self.api_key = api_key self.name = name @@ -308,6 +318,13 @@ def __init__( self.keys_dir = keys_dir self.auto_badge = auto_badge self.dev_mode = dev_mode + self.agent_card = agent_card + # Derive domain: explicit > hostname from server_url + if domain: + self.domain = domain + else: + from urllib.parse import urlparse + self.domain = urlparse(self.server_url).hostname or "localhost" # HTTP client for registry API self._client = httpx.Client( @@ -346,6 +363,11 @@ def connect(self) -> AgentIdentity: did = self._init_identity() logger.info(f"DID: {did}") + # Step 3.5: Activate agent on server + # The DB defaults agents to "inactive" — we need to explicitly set "active" + # after successful identity initialization. + self._activate_agent() + # Step 4: Set up badge (if auto_badge) badge = None badge_expires_at = None @@ -393,7 +415,7 @@ def _ensure_agent(self) -> Dict[str, Any]: try: if self.agent_id: # Fetch specific agent - resp = self._client.get(f"/v1/agents/{self.agent_id}") + resp = self._client.get(f"/v1/sdk/agents/{self.agent_id}") if resp.status_code == 200: data = resp.json() return data.get("data", data) @@ -409,7 +431,7 @@ def _ensure_agent(self) -> Dict[str, Any]: return local_agent # List agents and find by name or use first one - resp = self._client.get("/v1/agents") + resp = self._client.get("/v1/sdk/agents") if resp.status_code != 200: raise RuntimeError(f"Failed to list agents (status {resp.status_code})") except httpx.RequestError as e: @@ -457,7 +479,7 @@ def _find_agent_from_local_keys(self) -> Optional[Dict[str, Any]]: if local_did: agent_id = user_keys_dir.name try: - resp = self._client.get(f"/v1/agents/{agent_id}") + resp = self._client.get(f"/v1/sdk/agents/{agent_id}") if resp.status_code == 200: agent_data = resp.json().get("data", resp.json()) server_did = agent_data.get("did") @@ -498,7 +520,7 @@ def _find_agent_from_local_keys(self) -> Optional[Dict[str, Any]]: # Verify agent exists on server with matching DID try: - resp = self._client.get(f"/v1/agents/{agent_id}") + resp = self._client.get(f"/v1/sdk/agents/{agent_id}") if resp.status_code == 200: agent_data = resp.json().get("data", resp.json()) server_did = agent_data.get("did") @@ -521,7 +543,7 @@ def _create_agent(self) -> Dict[str, Any]: name = self.name or f"Agent-{os.urandom(4).hex()}" try: - resp = self._client.post("/v1/agents", json={ + resp = self._client.post("/v1/sdk/agents", json={ "name": name, "protocol": "a2a", }) @@ -616,7 +638,7 @@ def _ensure_did_registered(self, did: str, public_jwk: dict) -> Optional[str]: """ try: # Check if server already has a DID for this agent - resp = self._client.get(f"/v1/agents/{self.agent_id}") + resp = self._client.get(f"/v1/sdk/agents/{self.agent_id}") if resp.status_code != 200: logger.warning(f"Failed to check agent DID status: {resp.status_code}") return None @@ -634,9 +656,12 @@ def _ensure_did_registered(self, did: str, public_jwk: dict) -> Optional[str]: # Server has no DID - try to register using PATCH (partial update) logger.info("Registering DID with server...") + # Server expects publicKey as a JSON string (Go *string), not a raw object. + # The string must contain a valid Ed25519 JWK per RFC-003. + pk_str = json.dumps(public_jwk) if isinstance(public_jwk, dict) else public_jwk resp = self._client.patch( f"/v1/sdk/agents/{self.agent_id}/identity", - json={"did": did, "publicKey": public_jwk}, + json={"did": did, "publicKey": pk_str}, ) if resp.status_code == 200: @@ -654,6 +679,51 @@ def _ensure_did_registered(self, did: str, public_jwk: dict) -> Optional[str]: return None + def _activate_agent(self): + """Set agent status to 'active' on the server. + + The DB defaults agents to 'inactive'. After successful identity + initialization, we activate the agent so the dashboard shows + the correct status and badge flow can proceed. + + Uses GET-then-PUT to avoid overwriting existing fields with zero values, + since the server's UpdateAgent writes all fields from the map. + """ + try: + # First, fetch the current agent data to preserve existing fields + resp = self._client.get(f"/v1/sdk/agents/{self.agent_id}") + if resp.status_code != 200: + logger.debug(f"Could not fetch agent for activation: {resp.status_code}") + return + + agent_data = resp.json().get("data", resp.json()) + + # Merge: keep all existing fields, update status, name, domain, and agent card + agent_data["status"] = "active" + if self.name: + agent_data["name"] = self.name + if self.domain: + agent_data["domain"] = self.domain + if self.agent_card: + agent_data["agentCard"] = self.agent_card + + # Remove server-managed fields that shouldn't be sent back + for field in ("created_at", "updated_at", "user_id", "org_id", "trust_level"): + agent_data.pop(field, None) + + resp = self._client.put( + f"/v1/sdk/agents/{self.agent_id}", + json=agent_data, + ) + + if resp.status_code == 200: + logger.info("Agent activated on server") + else: + logger.debug(f"Agent activation returned {resp.status_code} - non-critical") + except Exception as e: + # Don't fail connection just because activation failed + logger.debug(f"Agent activation failed: {e} - non-critical") + def _setup_badge(self): """Set up BadgeKeeper for automatic badge management.""" try: diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index 0592007..d8f80d2 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -11,6 +11,7 @@ keywords: A2A Security installation, Python middleware, agent protection, pip in - **Python:** 3.10 or higher - **Operating System:** Linux, macOS, or Windows - **Dependencies:** Automatically installed via pip +- **capiscio-core Binary:** Automatically downloaded if not found (no manual installation needed) ## Install from PyPI diff --git a/docs/guides/configuration.md b/docs/guides/configuration.md index dc72a20..e3a41ef 100644 --- a/docs/guides/configuration.md +++ b/docs/guides/configuration.md @@ -594,6 +594,37 @@ Load configuration from environment variables using `SecurityConfig.from_env()`. | `CAPISCIO_FAIL_MODE` | string | `block` | Fail mode: block, monitor, log | | `CAPISCIO_TIMEOUT_MS` | int | `5000` | Validation timeout (milliseconds) | +### Binary Management Variables + +The SDK automatically manages the `capiscio-core` binary. These variables customize binary discovery and download behavior: + +| Variable | Type | Default | Description | +|----------|------|---------|-------------| +| `CAPISCIO_BINARY` | string | (auto-detect) | Path to capiscio-core binary (overrides auto-detection) | + +**Binary Search Order:** +1. `CAPISCIO_BINARY` environment variable (if set) +2. `capiscio-core/bin/capiscio` relative to SDK (development mode) +3. System PATH (`capiscio-core` command) +4. Previously cached binary in `~/.capiscio/bin/` +5. **Auto-download from GitHub releases** (v2.4.0 compatible) + +**Auto-Download Features:** +- ✅ Platform detection (macOS arm64/x86_64, Linux arm64/x86_64, Windows) +- ✅ Binary caching in `~/.capiscio/bin/` directory +- ✅ Automatic executable permissions on Unix-like systems +- ✅ No manual installation required + +**Example: Custom Binary Location** +```bash +# Use a specific binary version +export CAPISCIO_BINARY=/opt/capiscio/v2.4.0/capiscio-core + +# Or specify in code +import os +os.environ['CAPISCIO_BINARY'] = '/opt/capiscio/v2.4.0/capiscio-core' +``` + ### Example: Docker Compose ```yaml diff --git a/tests/unit/test_connect.py b/tests/unit/test_connect.py index df0852e..89c87f2 100644 --- a/tests/unit/test_connect.py +++ b/tests/unit/test_connect.py @@ -215,6 +215,8 @@ def test_connect_calls_connector(self): keys_dir=None, auto_badge=True, dev_mode=False, + domain=None, + agent_card=None, ) mock_connect.assert_called_once() assert result == mock_identity @@ -359,7 +361,7 @@ def test_ensure_agent_with_agent_id(self): result = connector._ensure_agent() - connector._client.get.assert_called_once_with("/v1/agents/specific-agent-id") + connector._client.get.assert_called_once_with("/v1/sdk/agents/specific-agent-id") assert result == {"id": "specific-agent-id", "name": "My Agent"} def test_ensure_agent_not_found(self): @@ -454,7 +456,7 @@ def test_create_agent(self): result = connector._create_agent() - connector._client.post.assert_called_once_with("/v1/agents", json={ + connector._client.post.assert_called_once_with("/v1/sdk/agents", json={ "name": "New Agent", "protocol": "a2a", }) @@ -920,7 +922,7 @@ def test_server_returns_error(self, tmp_path): # Should not raise, just log warning connector._ensure_did_registered("did:key:z6MkTest", {"kty": "OKP", "kid": "did:key:z6MkTest"}) - mock_client.get.assert_called_once_with("/v1/agents/agent-123") + mock_client.get.assert_called_once_with("/v1/sdk/agents/agent-123") def test_server_has_same_did(self, tmp_path): """Test _ensure_did_registered when server already has the same DID.""" diff --git a/tests/unit/test_process.py b/tests/unit/test_process.py new file mode 100644 index 0000000..9414d3e --- /dev/null +++ b/tests/unit/test_process.py @@ -0,0 +1,216 @@ +"""Unit tests for capiscio_sdk._rpc.process module.""" + +import os +import platform +import pytest +from pathlib import Path +from unittest.mock import MagicMock, patch, mock_open + +from capiscio_sdk._rpc.process import ProcessManager, CORE_VERSION, CACHE_DIR + + +class TestProcessManager: + """Tests for ProcessManager class.""" + + def test_get_platform_info_darwin_x86_64(self): + """Test platform detection for macOS x86_64.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Darwin"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + os_name, arch_name = ProcessManager._get_platform_info() + assert os_name == "darwin" + assert arch_name == "amd64" + + def test_get_platform_info_darwin_arm64(self): + """Test platform detection for macOS ARM64.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Darwin"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="arm64"): + os_name, arch_name = ProcessManager._get_platform_info() + assert os_name == "darwin" + assert arch_name == "arm64" + + def test_get_platform_info_linux_x86_64(self): + """Test platform detection for Linux x86_64.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + os_name, arch_name = ProcessManager._get_platform_info() + assert os_name == "linux" + assert arch_name == "amd64" + + def test_get_platform_info_linux_aarch64(self): + """Test platform detection for Linux ARM64.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="aarch64"): + os_name, arch_name = ProcessManager._get_platform_info() + assert os_name == "linux" + assert arch_name == "arm64" + + def test_get_platform_info_windows_amd64(self): + """Test platform detection for Windows x86_64.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Windows"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="amd64"): + os_name, arch_name = ProcessManager._get_platform_info() + assert os_name == "windows" + assert arch_name == "amd64" + + def test_get_platform_info_unsupported_os(self): + """Test platform detection with unsupported OS.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="FreeBSD"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + with pytest.raises(RuntimeError, match="Unsupported operating system"): + ProcessManager._get_platform_info() + + def test_get_platform_info_unsupported_arch(self): + """Test platform detection with unsupported architecture.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="mips"): + with pytest.raises(RuntimeError, match="Unsupported architecture"): + ProcessManager._get_platform_info() + + def test_get_cached_binary_path(self): + """Test cached binary path generation.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + path = ProcessManager._get_cached_binary_path() + expected = CACHE_DIR / CORE_VERSION / "capiscio-linux-amd64" + assert path == expected + + def test_get_cached_binary_path_windows(self): + """Test cached binary path generation for Windows.""" + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Windows"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + path = ProcessManager._get_cached_binary_path() + expected = CACHE_DIR / CORE_VERSION / "capiscio-windows-amd64.exe" + assert path == expected + + def test_find_binary_env_var(self): + """Test find_binary checks CAPISCIO_BINARY environment variable.""" + pm = ProcessManager() + test_path = "/usr/local/bin/custom-capiscio" + + # We can't fully test this without the file existing, but we can verify + # the env var is checked by ensuring non-existent path returns None + with patch.dict(os.environ, {"CAPISCIO_BINARY": test_path}): + # Mock ALL Path.exists() calls to return False so it doesn't find dev binary + # but then the env var path also returns False + with patch.object(Path, "exists", return_value=False): + with patch("shutil.which", return_value=None): + result = pm.find_binary() + # With env var path not existing and dev binary not existing, + # should return None + assert result is None + + def test_find_binary_system_path(self): + """Test find_binary finds binary in system PATH.""" + pm = ProcessManager() + + with patch.dict(os.environ, {}, clear=True): + with patch("shutil.which", return_value="/usr/local/bin/capiscio-core"): + result = pm.find_binary() + assert result == Path("/usr/local/bin/capiscio-core") + + def test_find_binary_cached(self): + """Test find_binary finds previously downloaded binary.""" + pm = ProcessManager() + + with patch.dict(os.environ, {}, clear=True): + with patch("shutil.which", return_value=None): + with patch.object(ProcessManager, "_get_cached_binary_path") as mock_cached: + mock_path = MagicMock() + mock_path.exists.return_value = True + mock_cached.return_value = mock_path + + result = pm.find_binary() + assert result == mock_path + + def test_find_binary_not_found(self): + """Test find_binary returns None when binary not found.""" + pm = ProcessManager() + + with patch.dict(os.environ, {}, clear=True): + with patch("shutil.which", return_value=None): + with patch.object(ProcessManager, "_get_cached_binary_path") as mock_cached: + mock_path = MagicMock() + mock_path.exists.return_value = False + mock_cached.return_value = mock_path + + result = pm.find_binary() + assert result is None + + @patch("httpx.stream") + @patch("os.chmod") + @patch("os.stat") + def test_download_binary_success(self, mock_stat, mock_chmod, mock_stream): + """Test successful binary download.""" + pm = ProcessManager() + + # Mock platform detection + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + # Mock cached path doesn't exist + with patch.object(ProcessManager, "_get_cached_binary_path") as mock_cached: + mock_path = MagicMock() + mock_path.exists.return_value = False + mock_path.parent = MagicMock() + mock_cached.return_value = mock_path + + # Mock HTTP response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.iter_bytes.return_value = [b"binary", b"data"] + mock_stream.return_value.__enter__.return_value = mock_response + + # Mock file operations + m_open = mock_open() + with patch("builtins.open", m_open): + result = pm._download_binary() + + # Verify download was attempted + mock_stream.assert_called_once() + assert result == mock_path + + @patch("httpx.stream") + def test_download_binary_already_cached(self, mock_stream): + """Test download skips if binary already cached.""" + pm = ProcessManager() + + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + with patch.object(ProcessManager, "_get_cached_binary_path") as mock_cached: + mock_path = MagicMock() + mock_path.exists.return_value = True + mock_cached.return_value = mock_path + + result = pm._download_binary() + + # Should not attempt download + mock_stream.assert_not_called() + assert result == mock_path + + @patch("httpx.stream") + def test_download_binary_http_error(self, mock_stream): + """Test download handles HTTP errors.""" + pm = ProcessManager() + + with patch("capiscio_sdk._rpc.process.platform.system", return_value="Linux"): + with patch("capiscio_sdk._rpc.process.platform.machine", return_value="x86_64"): + with patch.object(ProcessManager, "_get_cached_binary_path") as mock_cached: + mock_path = MagicMock() + mock_path.exists.side_effect = [False, False] # Not exists before download, not exists after cleanup + mock_path.parent = MagicMock() + mock_cached.return_value = mock_path + + # Mock HTTP error + mock_response = MagicMock() + mock_response.raise_for_status.side_effect = Exception("404 Not Found") + mock_stream.return_value.__enter__.return_value = mock_response + + with pytest.raises(RuntimeError, match="Failed to download capiscio-core"): + pm._download_binary() + + def test_binary_download_triggered_when_not_found(self): + """Test that _download_binary method exists and is callable.""" + pm = ProcessManager() + + # Just verify the method exists and can be mocked for integration + assert hasattr(pm, "_download_binary") + assert callable(pm._download_binary)