Fix: resolve hash collisions by switching to UUID &correct logic for always-true statements & Update GPT api integration & Support qianwen-deepresearch (#10208)

### What problem does this PR solve?

Fix: resolve hash collisions by switching to UUID &correct logic for
always-true statements, solved: #10165
Feat: Update GPT api integration, solved: #10204 
Feat: Support qianwen-deepresearch, solved: #10163 
### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
Billy Bao
2025-09-23 09:34:30 +08:00
committed by GitHub
parent c8b79dfed4
commit da82566304
5 changed files with 28 additions and 16 deletions

View File

@ -75,7 +75,7 @@ class Base(ABC):
def chat(self, system, history, gen_conf, images=[], **kwargs):
try:
response = self.client.chat.completions.create(
response = self.client.responses.create(
model=self.model_name,
messages=self._form_history(system, history, images)
)
@ -87,7 +87,7 @@ class Base(ABC):
ans = ""
tk_count = 0
try:
response = self.client.chat.completions.create(
response = self.client.responses.create(
model=self.model_name,
messages=self._form_history(system, history, images),
stream=True
@ -174,7 +174,8 @@ class GptV4(Base):
def describe(self, image):
b64 = self.image2base64(image)
res = self.client.chat.completions.create(
# Check if this is a GPT-5 model and use responses.create API
res = self.client.responses.create(
model=self.model_name,
messages=self.prompt(b64),
)
@ -182,7 +183,7 @@ class GptV4(Base):
def describe_with_prompt(self, image, prompt=None):
b64 = self.image2base64(image)
res = self.client.chat.completions.create(
res = self.client.responses.create(
model=self.model_name,
messages=self.vision_llm_prompt(b64, prompt),
)