From a3d3e30e3a884b549fbfe3386f2a81562fb81ba5 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 13 Feb 2025 10:24:05 +0800 Subject: [PATCH] fix: fix tongyi models blocking mode with incremental_output=stream (#13620) --- api/core/model_runtime/model_providers/tongyi/llm/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 1dce372bba..bb987d4998 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -197,7 +197,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel): else: # nothing different between chat model and completion model in tongyi params["messages"] = self._convert_prompt_messages_to_tongyi_messages(prompt_messages) - response = Generation.call(**params, result_format="message", stream=stream, incremental_output=True) + response = Generation.call(**params, result_format="message", stream=stream, incremental_output=stream) if stream: return self._handle_generate_stream_response(model, credentials, response, prompt_messages)