diff --git a/api/core/prompt/output_parser/suggested_questions_after_answer.py b/api/core/prompt/output_parser/suggested_questions_after_answer.py index d8bb0809cf..e37142ec91 100644 --- a/api/core/prompt/output_parser/suggested_questions_after_answer.py +++ b/api/core/prompt/output_parser/suggested_questions_after_answer.py @@ -4,7 +4,6 @@ from typing import Any from langchain.schema import BaseOutputParser -from core.model_runtime.errors.invoke import InvokeError from core.prompt.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT @@ -14,11 +13,11 @@ class SuggestedQuestionsAfterAnswerOutputParser(BaseOutputParser): return SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT def parse(self, text: str) -> Any: - json_string = text.strip() - action_match = re.search(r".*(\[\".+\"\]).*", json_string, re.DOTALL) + action_match = re.search(r"\[.*?\]", text.strip(), re.DOTALL) if action_match is not None: - json_obj = json.loads(action_match.group(1).strip(), strict=False) + json_obj = json.loads(action_match.group(0).strip()) else: - raise InvokeError("Could not parse LLM output: {text}") + json_obj= [] + print(f"Could not parse LLM output: {text}") return json_obj