Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions python/copilot/generated/rpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,10 @@ class Supports:
@staticmethod
def from_dict(obj: Any) -> 'Supports':
assert isinstance(obj, dict)
reasoning_effort = from_bool(obj.get("reasoningEffort"))
vision = from_bool(obj.get("vision"))
reasoning_effort_raw = obj.get("reasoningEffort")
reasoning_effort = bool(reasoning_effort_raw) if reasoning_effort_raw is not None else False
vision_raw = obj.get("vision")
vision = bool(vision_raw) if vision_raw is not None else False
return Supports(reasoning_effort, vision)

def to_dict(self) -> dict:
Expand Down
12 changes: 8 additions & 4 deletions python/copilot/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,18 +751,21 @@ class ModelLimits:
"""Model limits"""

max_prompt_tokens: int | None = None
max_output_tokens: int | None = None
max_context_window_tokens: int | None = None
vision: ModelVisionLimits | None = None

@staticmethod
def from_dict(obj: Any) -> ModelLimits:
assert isinstance(obj, dict)
max_prompt_tokens = obj.get("max_prompt_tokens")
max_output_tokens = obj.get("max_output_tokens")
max_context_window_tokens = obj.get("max_context_window_tokens")
vision_dict = obj.get("vision")
vision = ModelVisionLimits.from_dict(vision_dict) if vision_dict else None
return ModelLimits(
max_prompt_tokens=max_prompt_tokens,
max_output_tokens=max_output_tokens,
max_context_window_tokens=max_context_window_tokens,
vision=vision,
)
Expand All @@ -771,6 +774,8 @@ def to_dict(self) -> dict:
result: dict = {}
if self.max_prompt_tokens is not None:
result["max_prompt_tokens"] = self.max_prompt_tokens
if self.max_output_tokens is not None:
result["max_output_tokens"] = self.max_output_tokens
if self.max_context_window_tokens is not None:
result["max_context_window_tokens"] = self.max_context_window_tokens
if self.vision is not None:
Expand All @@ -788,11 +793,10 @@ class ModelSupports:
@staticmethod
def from_dict(obj: Any) -> ModelSupports:
assert isinstance(obj, dict)
vision = obj.get("vision")
if vision is None:
raise ValueError("Missing required field 'vision' in ModelSupports")
vision_raw = obj.get("vision")
vision = bool(vision_raw) if vision_raw is not None else False
reasoning_effort = obj.get("reasoningEffort", False)
return ModelSupports(vision=bool(vision), reasoning_effort=bool(reasoning_effort))
return ModelSupports(vision=vision, reasoning_effort=bool(reasoning_effort))

def to_dict(self) -> dict:
result: dict = {}
Expand Down
106 changes: 106 additions & 0 deletions python/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,115 @@
import pytest

from copilot import CopilotClient
from copilot.types import ModelInfo
from e2e.testharness import CLI_PATH


class TestModelInfoFromDict:
"""Unit tests for ModelInfo.from_dict robustness with nullable/missing fields."""

def test_parses_normal_model(self):
data = {
"id": "gpt-4o",
"name": "GPT-4o",
"capabilities": {
"supports": {"vision": True, "reasoningEffort": False},
"limits": {"max_context_window_tokens": 128000},
},
}
model = ModelInfo.from_dict(data)
assert model.id == "gpt-4o"
assert model.capabilities.supports.vision is True
assert model.capabilities.supports.reasoning_effort is False

def test_parses_model_with_null_vision(self):
"""Models returning vision: null should be parsed as vision=False."""
data = {
"id": "gemini-3-flash",
"name": "Gemini 3 Flash (Preview)",
"capabilities": {
"supports": {"vision": None, "reasoningEffort": False},
"limits": {"max_context_window_tokens": 128000},
},
}
model = ModelInfo.from_dict(data)
assert model.id == "gemini-3-flash"
assert model.capabilities.supports.vision is False

def test_parses_model_with_missing_vision(self):
"""Models omitting the vision key should be parsed as vision=False."""
data = {
"id": "grok-code-fast-1",
"name": "Groke Code Fast 1",
"capabilities": {
"supports": {"reasoningEffort": False},
"limits": {"max_context_window_tokens": 128000},
},
}
model = ModelInfo.from_dict(data)
assert model.id == "grok-code-fast-1"
assert model.capabilities.supports.vision is False

def test_parses_model_with_missing_reasoning_effort(self):
"""Models omitting reasoningEffort should be parsed as reasoning_effort=False."""
data = {
"id": "raptor-mini",
"name": "Raptor mini (Preview)",
"capabilities": {
"supports": {"vision": False},
"limits": {"max_context_window_tokens": 128000},
},
}
model = ModelInfo.from_dict(data)
assert model.id == "raptor-mini"
assert model.capabilities.supports.reasoning_effort is False

def test_parses_model_with_max_output_tokens(self):
"""Models with max_output_tokens in limits should be parsed correctly."""
data = {
"id": "gpt-4o",
"name": "GPT-4o",
"capabilities": {
"supports": {"vision": True, "reasoningEffort": False},
"limits": {"max_context_window_tokens": 128000, "max_output_tokens": 16384},
},
}
model = ModelInfo.from_dict(data)
assert model.capabilities.limits.max_output_tokens == 16384

def test_list_comprehension_succeeds_with_mixed_models(self):
"""list_models() should return all models even when some have null/missing fields."""
models_data = [
{
"id": "gpt-4o",
"name": "GPT-4o",
"capabilities": {
"supports": {"vision": True, "reasoningEffort": False},
"limits": {"max_context_window_tokens": 128000},
},
},
{
"id": "gemini-3-flash",
"name": "Gemini 3 Flash (Preview)",
"capabilities": {
"supports": {"vision": None},
"limits": {"max_context_window_tokens": 100000},
},
},
{
"id": "grok-code-fast-1",
"name": "Groke Code Fast 1",
"capabilities": {
"supports": {},
"limits": {"max_context_window_tokens": 131072},
},
},
]
models = [ModelInfo.from_dict(m) for m in models_data]
assert len(models) == 3
assert [m.id for m in models] == ["gpt-4o", "gemini-3-flash", "grok-code-fast-1"]


class TestHandleToolCallRequest:
@pytest.mark.asyncio
async def test_returns_failure_when_tool_not_registered(self):
Expand Down
7 changes: 7 additions & 0 deletions scripts/codegen/python.ts
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,13 @@ async function generateRpc(schemaPath?: string): Promise<void> {
typesCode = typesCode.replace(/except:/g, "except Exception:");
// Remove unnecessary pass when class has methods (quicktype generates pass for empty schemas)
typesCode = typesCode.replace(/^(\s*)pass\n\n(\s*@staticmethod)/gm, "$2");
// Fix from_bool() calls to handle missing/null boolean fields gracefully.
// The API may return null or omit boolean fields for some models; default to False.
typesCode = typesCode.replace(
/(\w+) = from_bool\(obj\.get\("([^"]+)"\)\)/g,
(_, varName, jsonKey) =>
`${varName}_raw = obj.get("${jsonKey}")\n ${varName} = bool(${varName}_raw) if ${varName}_raw is not None else False`
);

const lines: string[] = [];
lines.push(`"""
Expand Down