We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6bd1075 commit 8fa2ef1Copy full SHA for 8fa2ef1
llama_cpp/llama.py
@@ -696,9 +696,7 @@ def _create_completion(
696
llama_cpp.llama_reset_timings(self.ctx)
697
698
if len(prompt_tokens) + max_tokens > self._n_ctx:
699
- raise ValueError(
700
- f"Requested tokens exceed context window of {self._n_ctx}"
701
- )
+ raise ValueError(f"Requested tokens exceed context window of {self._n_ctx}")
702
703
if stop != []:
704
stop_sequences = [s.encode("utf-8") for s in stop]
0 commit comments