|
|
|
|
import openai
|
|
|
|
|
import re
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_score(response_text):
|
|
|
|
|
response_text = str(response_text)
|
|
|
|
|
# 提取评分
|
|
|
|
|
pattern = [
|
|
|
|
|
r"^评分为([1-5])分",
|
|
|
|
|
r"评分:([1-5])分",
|
|
|
|
|
r"评分为([1-5])"
|
|
|
|
|
]
|
|
|
|
|
score_list = []
|
|
|
|
|
for p in pattern:
|
|
|
|
|
if len(score_list) == 0:
|
|
|
|
|
score_list = re.findall(p, response_text)
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
if len(score_list) == 0:
|
|
|
|
|
return '3'
|
|
|
|
|
return score_list[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def request_gpt(prompt, retries=3):
|
|
|
|
|
ordinal = lambda n: str(n) + {1: "st", 2: "nd", 3: "rd"}.get(10 <= n % 100 <= 20 and n or n % 10, "th")
|
|
|
|
|
for i in range(retries):
|
|
|
|
|
try:
|
|
|
|
|
response = openai.ChatCompletion.create(
|
|
|
|
|
model="gpt-3.5-turbo",
|
|
|
|
|
messages=prompt,
|
|
|
|
|
)
|
|
|
|
|
return response.choices[0]['message']['content']
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"An error occurred while scoring with ChatGPT: {e}, it's the {ordinal(i+1)} time.")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class GPTScorer:
|
|
|
|
|
def __init__(self, api_key):
|
|
|
|
|
openai.api_key = api_key
|
|
|
|
|
self.eval_mode = "accuracy"
|
|
|
|
|
|
|
|
|
|
def mode(self, mode):
|
|
|
|
|
# 判断模式是否合法
|
|
|
|
|
if mode not in ["accuracy", "fluency", "diff"]:
|
|
|
|
|
raise ValueError("Invalid mode. Must be one of 'accuracy', 'fluency' or 'diff'.")
|
|
|
|
|
self.eval_mode = mode
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
def score_with_chatgpt(self, question, model_result, reference, origin_model_result=None):
|
|
|
|
|
prompt = self.generate_scoring_prompt(question, model_result, reference, origin_model_result)
|
|
|
|
|
try:
|
|
|
|
|
chatgpt_response = request_gpt(prompt, retries=5)
|
|
|
|
|
chatgpt_score = extract_score(chatgpt_response)
|
|
|
|
|
return chatgpt_response, chatgpt_score
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print("An error occurred while extract score:", e)
|
|
|
|
|
return None, '2'
|
|
|
|
|
|
|
|
|
|
def generate_scoring_prompt(self, question, model_result, reference, origin_model_result=None):
|
|
|
|
|
# 生成评分提示
|
|
|
|
|
base_prompt = []
|
|
|
|
|
if self.eval_mode == "accuracy":
|
|
|
|
|
base_prompt = [{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": "你是一个汽车领域专家,接下来将向你提供一个问题、一个参考答案和一个大模型生成的结果。"
|
|
|
|
|
"请对比参考答案和大模型生成结果,从信息准确性的角度评分以下生成的结果,以评估其质量。满分为4分。"
|
|
|
|
|
"信息的准确性应当被首要考虑,多余的未知真假的信息不应该带来加分。"
|
|
|
|
|
"评分标准为:模型回答正确——4分。模型回答模糊,但部分准确——3分。"
|
|
|
|
|
"模型无法给出解答,但明确表示无法解答——2分。模型给出错误或无法理解的回答/模型回答语句不完整——1分。"
|
|
|
|
|
"回复格式为:理由:xxx。因此,评分为x分。"
|
|
|
|
|
}]
|
|
|
|
|
elif self.eval_mode == "fluency":
|
|
|
|
|
base_prompt = [{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": "你是一个汽车领域专家,接下来将向你提供一个问题、一个参考答案和一个大模型生成的结果。"
|
|
|
|
|
"请从语言流畅度的角度评分大模型生成的结果,以评估其质量。满分为3分。"
|
|
|
|
|
"评分标准为:模型回答流畅,符合日常语言习惯——3分。模型回答流畅,但存在突然中断等情况——2分。"
|
|
|
|
|
"模型回答无条理,可能重复输出某些单词——1分。"
|
|
|
|
|
"回复格式为:理由:xxx。因此,评分为x分。"
|
|
|
|
|
}]
|
|
|
|
|
elif self.eval_mode == "diff":
|
|
|
|
|
base_prompt = [{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": "你是一个汽车领域专家,接下来将向你提供一个问题、一个参考答案、一个大模型生成的结果和一个微调后大模型生成结果。"
|
|
|
|
|
"请对比这些结果,判断微调后大模型的结果是否优于原模型。满分为3分。"
|
|
|
|
|
"信息的准确性应当被首要考虑,多余的未知真假的信息不应该带来加分。"
|
|
|
|
|
"对比时请关注结果和参考答案的契合度。"
|
|
|
|
|
"评分标准为:认为回答优于原模型——3分。认为回答与原模型持平——2分。"
|
|
|
|
|
"认为回答不如原模型——1分。"
|
|
|
|
|
"回复格式为:理由:xxx。因此,评分为x分。"
|
|
|
|
|
}]
|
|
|
|
|
if self.eval_mode == "diff":
|
|
|
|
|
if origin_model_result is None:
|
|
|
|
|
raise ValueError("The original model result is required in 'diff' mode.")
|
|
|
|
|
prompt = base_prompt + [
|
|
|
|
|
{
|
|
|
|
|
"role": "user",
|
|
|
|
|
"content": f"问题:{question}\n\n原模型生成的结果:{origin_model_result}\n\n"
|
|
|
|
|
f"微调后模型生成的结果:{model_result}\n\n参考答案:{reference}"
|
|
|
|
|
}
|
|
|
|
|
]
|
|
|
|
|
else:
|
|
|
|
|
prompt = base_prompt + [
|
|
|
|
|
{
|
|
|
|
|
"role": "user",
|
|
|
|
|
"content": f"问题:{question}\n\n生成的结果:{model_result}\n\n参考答案:{reference}"
|
|
|
|
|
}
|
|
|
|
|
]
|
|
|
|
|
return prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 示例用法
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
my_api_key = "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"
|
|
|
|
|
|
|
|
|
|
# 初始化模型评分器
|
|
|
|
|
scorer = GPTScorer(my_api_key)
|
|
|
|
|
|
|
|
|
|
print(extract_score('理由:参考答案与生成的结果完全一致,信息准确无误。因此,评分为4分。'))
|
|
|
|
|
|
|
|
|
|
# 要评分的大模型结果
|
|
|
|
|
# sample_question = "秦Plus-DMi车型的安全气囊有哪些类型?"
|
|
|
|
|
# sample_model_result = ("截止到我最后更新知识的时候,关于秦Plus-DMi车型的具体安全气囊类型的信息我并没有。"
|
|
|
|
|
# "通常来说,汽车的安全气囊系统可能包括驾驶员气囊、副驾驶气囊、侧面气囊、头部气囊等。"
|
|
|
|
|
# "但具体车型的安全气囊配置可能会因地区、年份和车型的不同而有所差异。"
|
|
|
|
|
# "建议您直接查询该车型的官方资料或者联系经销商以获取最准确的信息。")
|
|
|
|
|
# sample_reference = "秦Plus-DMi配备有驾驶员安全气囊、前排乘员安全气囊、侧帘式安全气囊和座椅侧安全气囊。"
|
|
|
|
|
#
|
|
|
|
|
# # 获取ChatGPT评分
|
|
|
|
|
# response_text, score = scorer.mode('accuracy').score_with_chatgpt(sample_question, sample_model_result, sample_reference)
|
|
|
|
|
# if response_text is not None:
|
|
|
|
|
# print("ChatGPT评分:", score, "\nChatGPT回复:", response_text)
|
|
|
|
|
# else:
|
|
|
|
|
# print("无法获取ChatGPT评分。")
|