feat: 允许用户在配置文件里更改每次向模型上传的上下文文字量

This commit is contained in:
Rock Chin
2022-12-10 17:39:02 +08:00
parent d45bd83012
commit e3442512c4
3 changed files with 25 additions and 10 deletions

View File

@@ -38,20 +38,29 @@ openai_config = {
completion_api_params = {
"model": "text-davinci-003",
"temperature": 0.8,
"max_tokens": 200,
"max_tokens": 200, # 每次向OpenAI请求的最大字符数
"top_p": 1,
"frequency_penalty": 0.2,
"presence_penalty": 0.4,
}
# 每次向OpenAI接口发送对话记录上下文的字符数
# 最大不超过(4096 - max_tokens)个字符max_tokens为上述completion_api_params中的max_tokens
# 注意较大的prompt_submit_length会导致OpenAI账户额度消耗更快
prompt_submit_length = 1024
# 消息处理的超时时间
# 每次向OpenAI接口发送对话记录上下文的聊天回合数
# 不建议过大向OpenAI接口发送对话上下文时保证内容不超过prompt_submit_length个字符
# 不超过prompt_submit_round_amount个回合
prompt_submit_round_amount = 8
# 消息处理的超时时间,单位为秒
process_message_timeout = 20
# 消息处理超时重试次数
retry_times = 3
# 每个会话的过期时间
# 每个会话的过期时间,单位为秒
# 默认值20分钟
session_expire_time = 60 * 20

View File

@@ -94,10 +94,12 @@ class Session:
def append(self, text: str) -> str:
self.last_interact_timestamp = int(time.time())
max_length = config.prompt_submit_length if config.prompt_submit_length is not None else 1024
# 向API请求补全
response = pkg.openai.manager.get_inst().request_completion(self.cut_out(self.prompt + self.user_name + ':' +
text + '\n' + self.bot_name + ':',
7, 1024), self.user_name + ':')
7, max_length), self.user_name + ':')
self.prompt += self.user_name + ':' + text + '\n' + self.bot_name + ':'
# print(response)
@@ -119,7 +121,7 @@ class Session:
return res_ans
# 截取prompt里不多于max_rounds个回合长度大于max_tokens的最小整数字符串
# 从尾部截取prompt里不多于max_rounds个回合长度大于max_tokens的字符串
# 保证都是完整的对话
def cut_out(self, prompt: str, max_rounds: int, max_tokens: int) -> str:
# 分隔出每个回合
@@ -130,15 +132,17 @@ class Session:
checked_rounds = 0
# 从后往前遍历加到result前面检查result是否符合要求
for i in range(len(rounds_spt_by_user_name) - 1, 0, -1):
result = self.user_name + ':' + rounds_spt_by_user_name[i] + result
result_temp = self.user_name + ':' + rounds_spt_by_user_name[i] + result
checked_rounds += 1
if checked_rounds >= max_rounds:
if checked_rounds > max_rounds:
break
if len(result) > max_tokens:
if len(result_temp) > max_tokens:
break
result = result_temp
logging.debug('cut_out: {}'.format(result))
return result

View File

@@ -142,12 +142,14 @@ class QQBotManager:
reply = "[GPT]" + session.append(text_message)
except openai.error.APIConnectionError as e:
self.notify_admin("{}会话调用API失败:{}".format(session_name, e))
# logging.exception(e)
reply = "[bot]err:调用API失败请联系作者或等待修复"
except openai.error.RateLimitError as e:
# logging.exception(e)
self.notify_admin("API调用额度超限,请向OpenAI账户充值或在config.py中更换api_key")
reply = "[bot]err:API调用额度超额请联系作者或等待修复"
except openai.error.InvalidRequestError as e:
self.notify_admin("{}API调用参数错误:{}\n\n这可能是由于config.py中的prompt_submit_length参数或"
"completion_api_params中的max_tokens参数数值过大导致的请尝试将其降低".format(session_name, e))
reply = "[bot]err:API调用参数错误请联系作者或等待修复"
except Exception as e:
logging.exception(e)
reply = "[bot]err:{}".format(e)