apply pep8 formalize (#155)

This commit is contained in:
KevinHuSh
2024-03-27 11:33:46 +08:00
committed by GitHub
parent a02e836790
commit fd7fcb5baf
55 changed files with 1568 additions and 753 deletions

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zhipuai import ZhipuAI
from dashscope import Generation
from abc import ABC
from openai import OpenAI
import openai
@ -34,7 +36,8 @@ class GptTurbo(Base):
self.model_name = model_name
def chat(self, system, history, gen_conf):
if system: history.insert(0, {"role": "system", "content": system})
if system:
history.insert(0, {"role": "system", "content": system})
try:
response = self.client.chat.completions.create(
model=self.model_name,
@ -46,16 +49,18 @@ class GptTurbo(Base):
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
return ans, response.usage.completion_tokens
except openai.APIError as e:
return "**ERROR**: "+str(e), 0
return "**ERROR**: " + str(e), 0
class MoonshotChat(GptTurbo):
def __init__(self, key, model_name="moonshot-v1-8k"):
self.client = OpenAI(api_key=key, base_url="https://api.moonshot.cn/v1",)
self.client = OpenAI(
api_key=key, base_url="https://api.moonshot.cn/v1",)
self.model_name = model_name
def chat(self, system, history, gen_conf):
if system: history.insert(0, {"role": "system", "content": system})
if system:
history.insert(0, {"role": "system", "content": system})
try:
response = self.client.chat.completions.create(
model=self.model_name,
@ -67,10 +72,9 @@ class MoonshotChat(GptTurbo):
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
return ans, response.usage.completion_tokens
except openai.APIError as e:
return "**ERROR**: "+str(e), 0
return "**ERROR**: " + str(e), 0
from dashscope import Generation
class QWenChat(Base):
def __init__(self, key, model_name=Generation.Models.qwen_turbo):
import dashscope
@ -79,7 +83,8 @@ class QWenChat(Base):
def chat(self, system, history, gen_conf):
from http import HTTPStatus
if system: history.insert(0, {"role": "system", "content": system})
if system:
history.insert(0, {"role": "system", "content": system})
response = Generation.call(
self.model_name,
messages=history,
@ -92,20 +97,21 @@ class QWenChat(Base):
ans += response.output.choices[0]['message']['content']
tk_count += response.usage.output_tokens
if response.output.choices[0].get("finish_reason", "") == "length":
ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
return ans, tk_count
return "**ERROR**: " + response.message, tk_count
from zhipuai import ZhipuAI
class ZhipuChat(Base):
def __init__(self, key, model_name="glm-3-turbo"):
self.client = ZhipuAI(api_key=key)
self.model_name = model_name
def chat(self, system, history, gen_conf):
if system: history.insert(0, {"role": "system", "content": system})
if system:
history.insert(0, {"role": "system", "content": system})
try:
response = self.client.chat.completions.create(
self.model_name,
@ -120,6 +126,7 @@ class ZhipuChat(Base):
except Exception as e:
return "**ERROR**: " + str(e), 0
class LocalLLM(Base):
class RPCProxy:
def __init__(self, host, port):
@ -129,14 +136,17 @@ class LocalLLM(Base):
def __conn(self):
from multiprocessing.connection import Client
self._connection = Client((self.host, self.port), authkey=b'infiniflow-token4kevinhu')
self._connection = Client(
(self.host, self.port), authkey=b'infiniflow-token4kevinhu')
def __getattr__(self, name):
import pickle
def do_rpc(*args, **kwargs):
for _ in range(3):
try:
self._connection.send(pickle.dumps((name, args, kwargs)))
self._connection.send(
pickle.dumps((name, args, kwargs)))
return pickle.loads(self._connection.recv())
except Exception as e:
self.__conn()
@ -148,7 +158,8 @@ class LocalLLM(Base):
self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
def chat(self, system, history, gen_conf):
if system: history.insert(0, {"role": "system", "content": system})
if system:
history.insert(0, {"role": "system", "content": system})
try:
ans = self.client.chat(
history,

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zhipuai import ZhipuAI
import io
from abc import ABC
@ -57,8 +58,8 @@ class Base(ABC):
},
},
{
"text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。" if self.lang.lower() == "chinese" else \
"Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
"text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。" if self.lang.lower() == "chinese" else
"Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
},
],
}
@ -92,8 +93,9 @@ class QWenCV(Base):
def prompt(self, binary):
# stupid as hell
tmp_dir = get_project_base_directory("tmp")
if not os.path.exists(tmp_dir): os.mkdir(tmp_dir)
path = os.path.join(tmp_dir, "%s.jpg"%get_uuid())
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
path = os.path.join(tmp_dir, "%s.jpg" % get_uuid())
Image.open(io.BytesIO(binary)).save(path)
return [
{
@ -103,8 +105,8 @@ class QWenCV(Base):
"image": f"file://{path}"
},
{
"text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。" if self.lang.lower() == "chinese" else \
"Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
"text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。" if self.lang.lower() == "chinese" else
"Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
},
],
}
@ -120,9 +122,6 @@ class QWenCV(Base):
return response.message, 0
from zhipuai import ZhipuAI
class Zhipu4V(Base):
def __init__(self, key, model_name="glm-4v", lang="Chinese"):
self.client = ZhipuAI(api_key=key)

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zhipuai import ZhipuAI
import os
from abc import ABC
@ -40,11 +41,11 @@ flag_model = FlagModel(model_dir,
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
use_fp16=torch.cuda.is_available())
class Base(ABC):
def __init__(self, key, model_name):
pass
def encode(self, texts: list, batch_size=32):
raise NotImplementedError("Please implement encode method!")
@ -67,11 +68,11 @@ class HuEmbedding(Base):
"""
self.model = flag_model
def encode(self, texts: list, batch_size=32):
texts = [t[:2000] for t in texts]
token_count = 0
for t in texts: token_count += num_tokens_from_string(t)
for t in texts:
token_count += num_tokens_from_string(t)
res = []
for i in range(0, len(texts), batch_size):
res.extend(self.model.encode(texts[i:i + batch_size]).tolist())
@ -90,7 +91,8 @@ class OpenAIEmbed(Base):
def encode(self, texts: list, batch_size=32):
res = self.client.embeddings.create(input=texts,
model=self.model_name)
return np.array([d.embedding for d in res.data]), res.usage.total_tokens
return np.array([d.embedding for d in res.data]
), res.usage.total_tokens
def encode_queries(self, text):
res = self.client.embeddings.create(input=[text],
@ -111,7 +113,7 @@ class QWenEmbed(Base):
for i in range(0, len(texts), batch_size):
resp = dashscope.TextEmbedding.call(
model=self.model_name,
input=texts[i:i+batch_size],
input=texts[i:i + batch_size],
text_type="document"
)
embds = [[] for _ in range(len(resp["output"]["embeddings"]))]
@ -123,14 +125,14 @@ class QWenEmbed(Base):
def encode_queries(self, text):
resp = dashscope.TextEmbedding.call(
model=self.model_name,
input=text[:2048],
text_type="query"
)
return np.array(resp["output"]["embeddings"][0]["embedding"]), resp["usage"]["total_tokens"]
model=self.model_name,
input=text[:2048],
text_type="query"
)
return np.array(resp["output"]["embeddings"][0]
["embedding"]), resp["usage"]["total_tokens"]
from zhipuai import ZhipuAI
class ZhipuEmbed(Base):
def __init__(self, key, model_name="embedding-2"):
self.client = ZhipuAI(api_key=key)
@ -139,9 +141,10 @@ class ZhipuEmbed(Base):
def encode(self, texts: list, batch_size=32):
res = self.client.embeddings.create(input=texts,
model=self.model_name)
return np.array([d.embedding for d in res.data]), res.usage.total_tokens
return np.array([d.embedding for d in res.data]
), res.usage.total_tokens
def encode_queries(self, text):
res = self.client.embeddings.create(input=text,
model=self.model_name)
return np.array(res["data"][0]["embedding"]), res.usage.total_tokens
return np.array(res["data"][0]["embedding"]), res.usage.total_tokens

View File

@ -9,7 +9,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
class RPCHandler:
def __init__(self):
self._functions = { }
self._functions = {}
def register_function(self, func):
self._functions[func.__name__] = func
@ -21,12 +21,12 @@ class RPCHandler:
func_name, args, kwargs = pickle.loads(connection.recv())
# Run the RPC and send a response
try:
r = self._functions[func_name](*args,**kwargs)
r = self._functions[func_name](*args, **kwargs)
connection.send(pickle.dumps(r))
except Exception as e:
connection.send(pickle.dumps(e))
except EOFError:
pass
pass
def rpc_server(hdlr, address, authkey):
@ -44,11 +44,17 @@ def rpc_server(hdlr, address, authkey):
models = []
tokenizer = None
def chat(messages, gen_conf):
global tokenizer
model = Model()
try:
conf = {"max_new_tokens": int(gen_conf.get("max_tokens", 256)), "temperature": float(gen_conf.get("temperature", 0.1))}
conf = {
"max_new_tokens": int(
gen_conf.get(
"max_tokens", 256)), "temperature": float(
gen_conf.get(
"temperature", 0.1))}
print(messages, conf)
text = tokenizer.apply_chat_template(
messages,
@ -65,7 +71,8 @@ def chat(messages, gen_conf):
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return tokenizer.batch_decode(
generated_ids, skip_special_tokens=True)[0]
except Exception as e:
return str(e)
@ -75,10 +82,15 @@ def Model():
random.seed(time.time())
return random.choice(models)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Model name")
parser.add_argument("--port", default=7860, type=int, help="RPC serving port")
parser.add_argument(
"--port",
default=7860,
type=int,
help="RPC serving port")
args = parser.parse_args()
handler = RPCHandler()
@ -93,4 +105,5 @@ if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# Run the server
rpc_server(handler, ('0.0.0.0', args.port), authkey=b'infiniflow-token4kevinhu')
rpc_server(handler, ('0.0.0.0', args.port),
authkey=b'infiniflow-token4kevinhu')