@@ -424,10 +424,13 @@ async def node_children_evaluation(self, node: LATSNode) -> None:
424424 score = 0
425425 else :
426426 trajectory = child .get_trajectory ()
427- prompt = create_llm_prompt (trajectory , self .goal )
428- # , child.observation.image
429- result = score_trajectory_with_openai (prompt , openai_client , self .config .evaluation_model )
430- score = result ["overall_score" ]
427+ if len (trajectory ) == 0 :
428+ score = 0
429+ else :
430+ prompt = create_llm_prompt (trajectory , self .goal )
431+ # , child.observation.image
432+ result = score_trajectory_with_openai (prompt , openai_client , self .config .evaluation_model )
433+ score = result ["overall_score" ]
431434 scores .append (score )
432435
433436 for child , score in zip (node .children , scores ):
@@ -454,13 +457,16 @@ async def node_evaluation(self, node: LATSNode) -> None:
454457 if node .is_terminal :
455458 score = 0
456459 else :
457- prompt = create_llm_prompt (trajectory , self .goal )
458- result = score_trajectory_with_openai (
459- prompt ,
460- openai_client ,
461- model = self .config .evaluation_model
462- )
463- score = result ["overall_score" ]
460+ if len (trajectory ) == 0 :
461+ score = 0
462+ else :
463+ prompt = create_llm_prompt (trajectory , self .goal )
464+ result = score_trajectory_with_openai (
465+ prompt ,
466+ openai_client ,
467+ model = self .config .evaluation_model
468+ )
469+ score = result ["overall_score" ]
464470
465471 except Exception as e :
466472 error_msg = f"Error scoring node { id (node )} : { str (e )} "
0 commit comments