@@ -964,7 +964,7 @@ def _create_completion(
964964 )
965965 ],
966966 "text_offset" : [text_offset ],
967- "token_logprobs" : [sorted_logprobs [int (token )][ 0 ]],
967+ "token_logprobs" : [current_logprobs [int (token )]],
968968 "top_logprobs" : [top_logprob ],
969969 }
970970 returned_tokens += 1
@@ -1039,7 +1039,7 @@ def _create_completion(
10391039 self .detokenize ([token ]).decode ("utf-8" , errors = "ignore" )
10401040 ],
10411041 "text_offset" : [text_offset ],
1042- "token_logprobs" : [sorted_logprobs [int (token )][ 0 ]],
1042+ "token_logprobs" : [current_logprobs [int (token )]],
10431043 "top_logprobs" : [top_logprob ],
10441044 }
10451045
@@ -1163,7 +1163,7 @@ def _create_completion(
11631163 zip (logprobs_token , range (len (logprobs_token ))), reverse = True
11641164 )
11651165 )
1166- token_logprobs .append (sorted_logprobs [int (token )][ 0 ])
1166+ token_logprobs .append (logprobs_token [int (token )])
11671167 top_logprob : Optional [Dict [str , float ]] = {
11681168 self .detokenize ([i ]).decode ("utf-8" , errors = "ignore" ): logprob
11691169 for logprob , i in sorted_logprobs [:logprobs ]
0 commit comments