We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent efeefa9 commit 1390931Copy full SHA for 1390931
src/inferencesh/models/llm.py
@@ -233,7 +233,7 @@ def render_message(msg: ContextMessage, allow_multipart: bool) -> str | List[dic
233
messages = [{"role": "system", "content": input_data.system_prompt}] if input_data.system_prompt is not None and input_data.system_prompt != "" else []
234
235
def merge_messages(messages: List[ContextMessage]) -> ContextMessage:
236
- text = "\n\n".join(msg.text for msg in messages if msg.text)
+ text = " ".join(msg.text for msg in messages if msg.text)
237
images = [msg.image for msg in messages if msg.image]
238
image = images[0] if images else None # TODO: handle multiple images
239
return ContextMessage(role=messages[0].role, text=text, image=image)
0 commit comments