mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
add support for Gemini (#1465)
### What problem does this PR solve? #1036 ### Type of change - [x] New Feature (non-breaking change which adds functionality) Co-authored-by: Zhedong Cen <cenzhedong2@126.com>
This commit is contained in:
@ -621,3 +621,64 @@ class BedrockChat(Base):
|
||||
yield ans + f"ERROR: Can't invoke '{self.model_name}'. Reason: {e}"
|
||||
|
||||
yield num_tokens_from_string(ans)
|
||||
|
||||
class GeminiChat(Base):
|
||||
|
||||
def __init__(self, key, model_name,base_url=None):
|
||||
from google.generativeai import client,GenerativeModel
|
||||
|
||||
client.configure(api_key=key)
|
||||
_client = client.get_default_generative_client()
|
||||
self.model_name = 'models/' + model_name
|
||||
self.model = GenerativeModel(model_name=self.model_name)
|
||||
self.model._client = _client
|
||||
|
||||
def chat(self,system,history,gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "user", "parts": system})
|
||||
if 'max_tokens' in gen_conf:
|
||||
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
||||
for k in list(gen_conf.keys()):
|
||||
if k not in ["temperature", "top_p", "max_output_tokens"]:
|
||||
del gen_conf[k]
|
||||
for item in history:
|
||||
if 'role' in item and item['role'] == 'assistant':
|
||||
item['role'] = 'model'
|
||||
if 'content' in item :
|
||||
item['parts'] = item.pop('content')
|
||||
|
||||
try:
|
||||
response = self.model.generate_content(
|
||||
history,
|
||||
generation_config=gen_conf)
|
||||
ans = response.text
|
||||
return ans, response.usage_metadata.total_token_count
|
||||
except Exception as e:
|
||||
return "**ERROR**: " + str(e), 0
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
if system:
|
||||
history.insert(0, {"role": "user", "parts": system})
|
||||
if 'max_tokens' in gen_conf:
|
||||
gen_conf['max_output_tokens'] = gen_conf['max_tokens']
|
||||
for k in list(gen_conf.keys()):
|
||||
if k not in ["temperature", "top_p", "max_output_tokens"]:
|
||||
del gen_conf[k]
|
||||
for item in history:
|
||||
if 'role' in item and item['role'] == 'assistant':
|
||||
item['role'] = 'model'
|
||||
if 'content' in item :
|
||||
item['parts'] = item.pop('content')
|
||||
ans = ""
|
||||
try:
|
||||
response = self.model.generate_content(
|
||||
history,
|
||||
generation_config=gen_conf,stream=True)
|
||||
for resp in response:
|
||||
ans += resp.text
|
||||
yield ans
|
||||
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield response._chunks[-1].usage_metadata.total_token_count
|
||||
@ -203,6 +203,29 @@ class XinferenceCV(Base):
|
||||
)
|
||||
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
||||
|
||||
class GeminiCV(Base):
|
||||
def __init__(self, key, model_name="gemini-1.0-pro-vision-latest", lang="Chinese", **kwargs):
|
||||
from google.generativeai import client,GenerativeModel
|
||||
client.configure(api_key=key)
|
||||
_client = client.get_default_generative_client()
|
||||
self.model_name = model_name
|
||||
self.model = GenerativeModel(model_name=self.model_name)
|
||||
self.model._client = _client
|
||||
self.lang = lang
|
||||
|
||||
def describe(self, image, max_tokens=2048):
|
||||
from PIL.Image import open
|
||||
gen_config = {'max_output_tokens':max_tokens}
|
||||
prompt = "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。" if self.lang.lower() == "chinese" else \
|
||||
"Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out."
|
||||
b64 = self.image2base64(image)
|
||||
img = open(BytesIO(base64.b64decode(b64)))
|
||||
input = [prompt,img]
|
||||
res = self.model.generate_content(
|
||||
input,
|
||||
generation_config=gen_config,
|
||||
)
|
||||
return res.text,res.usage_metadata.total_token_count
|
||||
|
||||
class LocalCV(Base):
|
||||
def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
|
||||
|
||||
@ -31,7 +31,7 @@ import numpy as np
|
||||
import asyncio
|
||||
from api.utils.file_utils import get_home_cache_dir
|
||||
from rag.utils import num_tokens_from_string, truncate
|
||||
|
||||
import google.generativeai as genai
|
||||
|
||||
class Base(ABC):
|
||||
def __init__(self, key, model_name):
|
||||
@ -419,3 +419,27 @@ class BedrockEmbed(Base):
|
||||
|
||||
return np.array(embeddings), token_count
|
||||
|
||||
class GeminiEmbed(Base):
|
||||
def __init__(self, key, model_name='models/text-embedding-004',
|
||||
**kwargs):
|
||||
genai.configure(api_key=key)
|
||||
self.model_name = 'models/' + model_name
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
texts = [truncate(t, 2048) for t in texts]
|
||||
token_count = sum(num_tokens_from_string(text) for text in texts)
|
||||
result = genai.embed_content(
|
||||
model=self.model_name,
|
||||
content=texts,
|
||||
task_type="retrieval_document",
|
||||
title="Embedding of list of strings")
|
||||
return np.array(result['embedding']),token_count
|
||||
|
||||
def encode_queries(self, text):
|
||||
result = genai.embed_content(
|
||||
model=self.model_name,
|
||||
content=truncate(text,2048),
|
||||
task_type="retrieval_document",
|
||||
title="Embedding of single string")
|
||||
token_count = num_tokens_from_string(text)
|
||||
return np.array(result['embedding']),token_count
|
||||
Reference in New Issue
Block a user