fix: prompt for baichuan text generation models (#1299)
This commit is contained in:
parent
df07fb5951
commit
8480b0197b
@ -37,6 +37,12 @@ class BaichuanModel(BaseLLM):
|
|||||||
prompts = self._get_prompt_from_messages(messages)
|
prompts = self._get_prompt_from_messages(messages)
|
||||||
return self._client.generate([prompts], stop, callbacks)
|
return self._client.generate([prompts], stop, callbacks)
|
||||||
|
|
||||||
|
def prompt_file_name(self, mode: str) -> str:
|
||||||
|
if mode == 'completion':
|
||||||
|
return 'baichuan_completion'
|
||||||
|
else:
|
||||||
|
return 'baichuan_chat'
|
||||||
|
|
||||||
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
|
def get_num_tokens(self, messages: List[PromptMessage]) -> int:
|
||||||
"""
|
"""
|
||||||
get num tokens of prompt messages.
|
get num tokens of prompt messages.
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user