from knn_prompt import generate_prompt from LLM import zero_shot from save_response import save_responses_knn import json model_series = 'gpt' model_name='deepseek-ai/DeepSeek-R1' type = 'yes_no' shot_type = 'one_shot' shot_type = 'two_shot' shot_type = 'three_shot' prompt = generate_prompt(type, './data/knn_'+type+'.json', example_num=3) j=0 q=0 # # 统计目前处理到第几条 # json_path = './output/knn/'+shot_type+'/'+model_name+'.json' # j=len(json.load(open(json_path,'r',encoding='utf-8'))) # q=len(json.load(open('./output/knn/'+shot_type+'_raw/'+model_name+'.json','r',encoding='utf-8'))) print(j) # # 当q=j时才继续处理 if q==j: for i in range(j,len(prompt)): response = zero_shot(model_series, model_name, prompt[i]+'\n'+'不要包含任何其他无关解释和无意义回复') print(prompt[i]) # print(response) # 解析响应并保存 save_responses_knn(prompt[i], type, i, response, './output/knn/'+shot_type+'/'+model_name+'.json', './output/knn/'+shot_type+'_raw/'+model_name+'.json') else: print('q!=j')