Skip to content

Commit 29a8a68

Browse files
committed
fix stats
1 parent 3ce099a commit 29a8a68

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

src/inferencesh/models/llm.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -237,13 +237,15 @@ def __init__(self):
237237
def update_from_chunk(self, chunk: Dict[str, Any], timing: Any) -> None:
238238
"""Update response state from a chunk."""
239239
# Update usage stats if present
240-
if "usage" in chunk and chunk["usage"] is not None:
240+
if "usage" in chunk:
241241
usage = chunk["usage"]
242-
self.usage_stats.update({
243-
"prompt_tokens": usage.get("prompt_tokens", self.usage_stats["prompt_tokens"]),
244-
"completion_tokens": usage.get("completion_tokens", self.usage_stats["completion_tokens"]),
245-
"total_tokens": usage.get("total_tokens", self.usage_stats["total_tokens"])
246-
})
242+
if usage is not None:
243+
self.usage_stats = {
244+
"prompt_tokens": usage.get("prompt_tokens", 0),
245+
"completion_tokens": usage.get("completion_tokens", 0),
246+
"total_tokens": usage.get("total_tokens", 0),
247+
"stop_reason": self.usage_stats["stop_reason"] # Preserve existing stop reason
248+
}
247249

248250
# Get the delta from the chunk
249251
delta = chunk.get("choices", [{}])[0]

0 commit comments

Comments
 (0)