Skip to content

Commit 9645ded

Browse files
committed
Added gemma3 setting.chat_format into server/model.py
1 parent 297439d commit 9645ded

File tree

1 file changed

+14
-0
lines changed

1 file changed

+14
-0
lines changed

llama_cpp/server/model.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,20 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
171171
chat_handler = llama_cpp.llama_chat_format.MiniCPMv26ChatHandler(
172172
clip_model_path=settings.clip_model_path, verbose=settings.verbose
173173
)
174+
elif settings.chat_format == "gemma3":
175+
assert settings.clip_model_path is not None, "clip model not found"
176+
if settings.hf_model_repo_id is not None:
177+
chat_handler = (
178+
llama_cpp.llama_chat_format.Gemma3ChatHandler.from_pretrained(
179+
repo_id=settings.hf_model_repo_id,
180+
filename=settings.clip_model_path,
181+
verbose=settings.verbose,
182+
)
183+
)
184+
else:
185+
chat_handler = llama_cpp.llama_chat_format.Gemma3ChatHandler(
186+
clip_model_path=settings.clip_model_path, verbose=settings.verbose
187+
)
174188
elif settings.chat_format == "qwen2.5-vl":
175189
assert settings.clip_model_path is not None, "clip model not found"
176190
if settings.hf_model_repo_id is not None:

0 commit comments

Comments
 (0)