We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e354925 commit efeefa9Copy full SHA for efeefa9
src/inferencesh/models/llm.py
@@ -705,7 +705,7 @@ def _generate_worker():
705
msg_type, data = response_queue.get(timeout=0.1)
706
chunk_count += 1
707
if chunk_count % 10 == 0: # Log every 10th chunk to avoid spam
708
- print(f"[DEBUG] Main loop received chunk {chunk_count}")
+ print(f"[DEBUG] Main loop received chunk {chunk_count} chunk sample: {data}")
709
except Empty:
710
continue
711
0 commit comments