Support Xinference (#320)

### What problem does this PR solve?

Issue link:#299

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
KevinHuSh 2024-04-11 18:22:25 +08:00 committed by GitHub
parent cb2cbf500c
commit 91068edf16
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 81 additions and 7 deletions

View File

@ -172,6 +172,7 @@ $ docker compose up -d
## 🆕 Latest Features
- 2024-04-11 Support [Xinference](./docs/xinference.md) for local LLM deployment.
- 2024-04-10 Add a new layout recognize model for method 'Laws'.
- 2024-04-08 Support [Ollama](./docs/ollama.md) for local LLM deployment.
- 2024-04-07 Support Chinese UI.

View File

@ -171,6 +171,8 @@ $ docker compose up -d
```
## 🆕 最新の新機能
- 2024-04-11 ローカル LLM デプロイメント用に [Xinference](./docs/xinference.md) をサポートします。
- 2024-04-10 メソッド「Laws」に新しいレイアウト認識モデルを追加します。
- 2024-04-08 [Ollama](./docs/ollama.md) を使用した大規模モデルのローカライズされたデプロイメントをサポートします。
- 2024-04-07 中国語インターフェースをサポートします。

View File

@ -172,6 +172,7 @@ $ docker compose up -d
## 🆕 最近新特性
- 2024-04-11 支持用 [Xinference](./docs/xinference.md) for local LLM deployment.
- 2024-04-10 为Laws版面分析增加了模型。
- 2024-04-08 支持用 [Ollama](./docs/ollama.md) 对大模型进行本地化部署。
- 2024-04-07 支持中文界面。

View File

@ -22,6 +22,7 @@ from werkzeug.wrappers.request import Request
from flask_cors import CORS
from api.db import StatusEnum
from api.db.db_models import close_connection
from api.db.services import UserService
from api.utils import CustomJSONEncoder
@ -42,7 +43,7 @@ for h in access_logger.handlers:
Request.json = property(lambda self: self.get_json(force=True, silent=True))
app = Flask(__name__)
CORS(app, supports_credentials=True,max_age = 2592000)
CORS(app, supports_credentials=True,max_age=2592000)
app.url_map.strict_slashes = False
app.json_encoder = CustomJSONEncoder
app.errorhandler(Exception)(server_error_response)
@ -94,8 +95,6 @@ client_urls_prefix = [
]
@login_manager.request_loader
def load_user(web_request):
jwt = Serializer(secret_key=SECRET_KEY)
@ -112,4 +111,9 @@ def load_user(web_request):
stat_logger.exception(e)
return None
else:
return None
return None
@app.teardown_request
def _db_close(exc):
close_connection()

View File

@ -360,6 +360,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl):
"|" for r in tbl["rows"]]
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
if not docid_idx or not docnm_idx:
chat_logger.warning("SQL missing field: " + sql)
return {

View File

@ -109,6 +109,12 @@ factory_infos = [{
"logo": "",
"tags": "LLM,TEXT EMBEDDING",
"status": "1",
},
{
"name": "Xinference",
"logo": "",
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
"status": "1",
},
# {
# "name": "文心一言",

View File

@ -20,7 +20,6 @@ services:
- 443:443
volumes:
- ./service_conf.yaml:/ragflow/conf/service_conf.yaml
- ./entrypoint.sh:/ragflow/entrypoint.sh
- ./ragflow-logs:/ragflow/logs
- ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
- ./nginx/proxy.conf:/etc/nginx/proxy.conf

View File

@ -19,7 +19,6 @@ services:
- 443:443
volumes:
- ./service_conf.yaml:/ragflow/conf/service_conf.yaml
- ./entrypoint.sh:/ragflow/entrypoint.sh
- ./ragflow-logs:/ragflow/logs
- ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
- ./nginx/proxy.conf:/etc/nginx/proxy.conf

View File

@ -21,6 +21,7 @@ from .cv_model import *
EmbeddingModel = {
"Ollama": OllamaEmbed,
"OpenAI": OpenAIEmbed,
"Xinference": XinferenceEmbed,
"Tongyi-Qianwen": HuEmbedding, #QWenEmbed,
"ZHIPU-AI": ZhipuEmbed,
"Moonshot": HuEmbedding
@ -30,6 +31,7 @@ EmbeddingModel = {
CvModel = {
"OpenAI": GptV4,
"Ollama": OllamaCV,
"Xinference": XinferenceCV,
"Tongyi-Qianwen": QWenCV,
"ZHIPU-AI": Zhipu4V,
"Moonshot": LocalCV
@ -41,6 +43,7 @@ ChatModel = {
"ZHIPU-AI": ZhipuChat,
"Tongyi-Qianwen": QWenChat,
"Ollama": OllamaChat,
"Xinference": XinferenceChat,
"Moonshot": MoonshotChat
}

View File

@ -158,6 +158,28 @@ class OllamaChat(Base):
return "**ERROR**: " + str(e), 0
class XinferenceChat(Base):
def __init__(self, key=None, model_name="", base_url=""):
self.client = OpenAI(api_key="xxx", base_url=base_url)
self.model_name = model_name
def chat(self, system, history, gen_conf):
if system:
history.insert(0, {"role": "system", "content": system})
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=history,
**gen_conf)
ans = response.choices[0].message.content.strip()
if response.choices[0].finish_reason == "length":
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
return ans, response.usage.completion_tokens
except openai.APIError as e:
return "**ERROR**: " + str(e), 0
class LocalLLM(Base):
class RPCProxy:
def __init__(self, host, port):

View File

@ -161,6 +161,22 @@ class OllamaCV(Base):
except Exception as e:
return "**ERROR**: " + str(e), 0
class XinferenceCV(Base):
def __init__(self, key, model_name="", lang="Chinese", base_url=""):
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name
self.lang = lang
def describe(self, image, max_tokens=300):
b64 = self.image2base64(image)
res = self.client.chat.completions.create(
model=self.model_name,
messages=self.prompt(b64),
max_tokens=max_tokens,
)
return res.choices[0].message.content.strip(), res.usage.total_tokens
class LocalCV(Base):
def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):

View File

@ -170,3 +170,20 @@ class OllamaEmbed(Base):
res = self.client.embeddings(prompt=text,
model=self.model_name)
return np.array(res["embedding"]), 128
class XinferenceEmbed(Base):
def __init__(self, key, model_name="", base_url=""):
self.client = OpenAI(api_key="xxx", base_url=base_url)
self.model_name = model_name
def encode(self, texts: list, batch_size=32):
res = self.client.embeddings.create(input=texts,
model=self.model_name)
return np.array([d.embedding for d in res.data]
), res.usage.total_tokens
def encode_queries(self, text):
res = self.client.embeddings.create(input=[text],
model=self.model_name)
return np.array(res.data[0].embedding), res.usage.total_tokens

View File

@ -34,7 +34,7 @@ LoggerFactory.set_directory(
"logs",
"rag"))
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
LoggerFactory.LEVEL = 10
LoggerFactory.LEVEL = 30
es_logger = getLogger("es")
minio_logger = getLogger("minio")

View File

@ -24,6 +24,8 @@ import sys
import time
import traceback
from functools import partial
from api.db.db_models import close_connection
from rag.settings import database_logger
from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
from multiprocessing import Pool
@ -302,3 +304,4 @@ if __name__ == "__main__":
comm = MPI.COMM_WORLD
while True:
main(int(sys.argv[2]), int(sys.argv[1]))
close_connection()