ManavSinghal157 commited on
Commit
3f53269
·
verified ·
1 Parent(s): 702219b

Upload 840 files

Browse files

Pushing first version of the code.

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. datasets/humanevalclassify.jsonl +0 -0
  2. datasets/latency.jsonl +0 -0
  3. datasets/maintainability.jsonl +0 -0
  4. datasets/resource_util.jsonl +0 -0
  5. datasets/runtime_efficiency.jsonl +0 -0
  6. datasets/security.jsonl +0 -0
  7. eval_setup.sh +18 -0
  8. requirements.txt +26 -0
  9. src/classification_generation.py +98 -0
  10. src/evaluation.py +332 -0
  11. src/evaluation/pie-perf/.gitignore +129 -0
  12. src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.0.txt +2 -0
  13. src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.1.txt +2 -0
  14. src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.2.txt +2 -0
  15. src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.0.txt +3 -0
  16. src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.1.txt +1 -0
  17. src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.2.txt +3 -0
  18. src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.0.txt +2 -0
  19. src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.1.txt +2 -0
  20. src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.2.txt +2 -0
  21. src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.0.txt +1 -0
  22. src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.1.txt +1 -0
  23. src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.2.txt +1 -0
  24. src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.0.txt +1 -0
  25. src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.1.txt +1 -0
  26. src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.2.txt +1 -0
  27. src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.0.txt +1 -0
  28. src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.1.txt +1 -0
  29. src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.2.txt +1 -0
  30. src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.0.txt +1 -0
  31. src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.1.txt +1 -0
  32. src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.2.txt +1 -0
  33. src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.0.txt +1 -0
  34. src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.1.txt +1 -0
  35. src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.2.txt +1 -0
  36. src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.0.txt +1 -0
  37. src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.1.txt +1 -0
  38. src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.2.txt +1 -0
  39. src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.3.txt +1 -0
  40. src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.0.txt +1 -0
  41. src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.1.txt +1 -0
  42. src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.2.txt +1 -0
  43. src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.3.txt +1 -0
  44. src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.0.txt +2 -0
  45. src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.1.txt +2 -0
  46. src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.2.txt +2 -0
  47. src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.3.txt +2 -0
  48. src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.0.txt +1 -0
  49. src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.1.txt +1 -0
  50. src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.2.txt +1 -0
datasets/humanevalclassify.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
datasets/latency.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
datasets/maintainability.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
datasets/resource_util.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
datasets/runtime_efficiency.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
datasets/security.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
eval_setup.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ mkdir $HOME/codeql-home
3
+
4
+ wget https://github.com/github/codeql-cli-binaries/releases/download/v2.5.0/codeql.zip -P $HOME/codeql-home/
5
+ unzip $HOME/codeql-home/codeql.zip -d $HOME/codeql-home/
6
+
7
+ git clone https://github.com/github/codeql.git $HOME/codeql-home/codeql-repo
8
+ cd $HOME/codeql-home/codeql-repo
9
+ git checkout 20416ae0342c66aa05bc099af8e5a020b018a978
10
+
11
+ echo 'export PATH="$HOME/codeql-home/codeql:$PATH"' >> ~/.bashrc
12
+ source ~/.bashrc
13
+
14
+ codeql resolve languages
15
+ codeql resolve qlpacks
16
+
17
+ mv -v ~/src/evaluation/qls_for_security/cpp/* ~/codeql-home/codeql-repo/cpp/ql/src/
18
+ mv -v ~/src/evaluation/qls_for_security/python/* ~/codeql-home/codeql-repo/python/ql/src/
requirements.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ os
2
+ sys
3
+ re
4
+ json
5
+ pandas
6
+ argparse
7
+ csv
8
+ tqdm
9
+ vllm
10
+ nltk
11
+ scipy
12
+ transformers
13
+ jinja2
14
+ datasets
15
+ datetime
16
+ jsonlines
17
+ statistics
18
+ tempfile
19
+ subprocess
20
+ tree_sitter
21
+ joblib == 1.1.0
22
+ numpy == 1.23.1
23
+ pandas == 1.4.4
24
+ psutil == 5.9.2
25
+ tqdm == 4.49.0
26
+ multiprocess
src/classification_generation.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import time
4
+ import argparse
5
+ from transformers import AutoTokenizer
6
+ import jsonlines
7
+ from tqdm import tqdm
8
+ from vllm import LLM, SamplingParams
9
+
10
+ #Input all the arguments
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument("--data_subset", type = str, default = "latency", help = "type of non-func requirement")
13
+ parser.add_argument("--temperature", type = float, default = 0.0, help = "temperature")
14
+ parser.add_argument("--max_new_tokens", type = int, default = 5192, help = "max length of tokens")
15
+ parser.add_argument("--top_p", type = float, default = 0.95, help = "top_p")
16
+ parser.add_argument("--prompt", type = str, default = "base_prompt", help = "type of prompt")
17
+ parser.add_argument("--num_samples", type = int, default = 1, help = "number of samples")
18
+ parser.add_argument("--model_path", type = str, required = True, help = "HF path for OS models")
19
+ parser.add_argument("--load_in_8bit", action = "store_true", help = "Load model in 8bit")
20
+ parser.add_argument("--load_in_4bit", action = "store_true", help = "Load model in 4bit")
21
+ parser.add_argument("--precision", type = str, default = "fp16", help = "Model precision, from: fp32, fp16 or bf16")
22
+ parser.add_argument("--tensor_parallel_size", type = int, default = 1, help = "Tensor parallel size")
23
+ parser.add_argument("--swap_space", type = int, default = 4, help = "The size (GiB) of CPU memory per GPU to use as swap space.")
24
+ parser.add_argument("--batch_size", type = int, default = 1, help = "Number of examples to send to llm engine at once.")
25
+ args = parser.parse_args()
26
+ argsdict = vars(args)
27
+
28
+ def extract_single_predictions(input_string):
29
+
30
+ if input_string.strip().split()[0].lower() == "A".lower():
31
+ return "A"
32
+ elif input_string.strip().split()[0].lower() == "B".lower():
33
+ return "B"
34
+ return None
35
+
36
+ def model_query(all_messages, batch_size = 1):
37
+
38
+ llm_tokenizer = AutoTokenizer.from_pretrained(
39
+ args.model_path,
40
+ truncation_side = "left",
41
+ padding_side = "right", # padding on the right is needed to cut off padding in `complete_code`
42
+ )
43
+ if args.num_samples == 1:
44
+ GREEDY = True
45
+ else:
46
+ GREEDY = False
47
+ assert args.num_samples % batch_size == 0, "num_samples must be divisible by batch_size"
48
+ sampling_params = SamplingParams(
49
+ n=batch_size, # for multisamples we sample multiple times
50
+ temperature=args.temperature if not GREEDY else 0.0,
51
+ top_p=args.top_p if not GREEDY else 1.0,
52
+ top_k=50 if not GREEDY else -1,
53
+ max_tokens=args.max_new_tokens,
54
+ stop_token_ids=[llm_tokenizer.eos_token_id])
55
+ llm = LLM(model=args.model_path,
56
+ tensor_parallel_size=args.tensor_parallel_size,
57
+ swap_space=args.swap_space,
58
+ trust_remote_code=True)
59
+ # tokenizer="hf-internal-testing/llama-tokenizer" if 'llama' in args.checkpoint_path.lower() else None,)
60
+ llm_outputs = llm.generate(left_prompts, sampling_params)
61
+ predictions = [extract_single_predictions(output.outputs[0].text) for output in llm_outputs]
62
+ return predictions
63
+
64
+ dataset_path = os.path.join("datasets",f"{args.data_subset}.jsonl")
65
+
66
+ max_tokens=[]
67
+ generations=[]
68
+ left_prompts = []
69
+ right_prompts = []
70
+ data=[]
71
+
72
+ with jsonlines.open(dataset_path) as data_file:
73
+ for data_item in data_file:
74
+ data.append(data_item)
75
+ left_prompts.append(data_item["classification_left_prompt"])
76
+ right_prompts.append(data_item["classification_right_prompt"])
77
+
78
+ print("Starting model inference...")
79
+ left_predictions = model_query(all_messages=left_prompts, batch_size=args.batch_size)
80
+ right_predictions = model_query(all_messages=right_prompts, batch_size=args.batch_size)
81
+
82
+ generations = []
83
+ for i, data_item in tqdm(enumerate(left_predictions)):
84
+ #Model Inference
85
+ curr_sample = data[i]
86
+ curr_sample["left_output"] = left_predictions[i]
87
+ curr_sample["right_output"] = right_predictions[i]
88
+ for prompt in ["base_prompt", "coding_concepts","chain_of_thought","one_shot","classification_left_prompt","classification_right_prompt"]:
89
+ if(prompt in curr_sample):
90
+ del curr_sample[prompt]
91
+ generations.append(curr_sample)
92
+
93
+ generations = pd.DataFrame(generations)
94
+ path = os.path.join("generations","classification",os.path.split(args.model_path)[1],args.data_subset,args.prompt,f"{args.num_samples}_samples")
95
+ if not os.path.exists(path):
96
+ os.makedirs(path)
97
+ path=os.path.join(path, "generated_outputs.jsonl")
98
+ generations.to_json(path, orient="records", lines=True)
src/evaluation.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import jsonlines
4
+ import pandas as pd
5
+ import time
6
+ from jinja2 import Environment, FileSystemLoader
7
+ import json
8
+ import csv
9
+ from statistics import mean
10
+ from utils import pass_at_k_continuous_vals, diff_bleu, post_process_generations, statistical_significance_test, remove_comments, remove_blank_lines,get_files_with_syntax_errors
11
+ from datasets import load_dataset
12
+
13
+
14
+ parser = argparse.ArgumentParser()
15
+ parser.add_argument("--data_subset", type=str, default="latency", help="latency/resource_util/runtime_efficiency/maintenance/security")
16
+ parser.add_argument("--model", type=str, default="wizardcoder", help="model name")
17
+ parser.add_argument("--model_path", type=str, required=True, help="HF path for OS models")
18
+ parser.add_argument("--prompt", type=str, default="base_prompt", help="base_prompt/coding_concepts/chain_of_thought/one-shot")
19
+ parser.add_argument("--num_samples", type=int, default=1, help = "Number of samples")
20
+ parser.add_argument("--score_k", type=str, default="1,5,10,20", help="K value for score@k (should not be greater than num_samples and can be comma-separated)")
21
+ parser.add_argument("--metric", type=str, default="runtime", help="runtime/diffbleu/codeql-diffbleu")
22
+ args = parser.parse_args()
23
+
24
+ generations_path = os.path.join("generations",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples","generated_outputs.jsonl")
25
+
26
+ args.model = args.model_path.split("/")[-1]
27
+
28
+ # To calculate runtimes(Applicable for non-func runtime_efficiency)
29
+ if args.metric == "runtime":
30
+
31
+ start_time = time.time()
32
+
33
+ with jsonlines.open(generations_path) as reader:
34
+
35
+ samples = []
36
+
37
+ for generation in reader:
38
+
39
+ parsed_generations = []
40
+
41
+ for l in range(args.num_samples):
42
+
43
+ generated_answers = post_process_generations(generated_answers=generation['generated_answers'][l],model = args.model,prompt = args.prompt,pl = "Python")[1]
44
+ parsed_generations.append(generated_answers)
45
+
46
+ samples.append(dict(problem_id = generation['problem_id'],submission_id_v0 = generation['submission_id_v0'],cpu_time_v0 = generation['cpu_time_v0'],cpu_time_v1 = generation['cpu_time_v1'],input=generation['input'],target=generation['target'],
47
+ generated_answers=parsed_generations, inference_time=generation['inference_time']))
48
+
49
+ samples = pd.DataFrame(samples)
50
+ path = os.path.join("evaluation","pie-perf","generated_outputs.jsonl")
51
+ samples.to_json(path, orient="records", lines = True)
52
+
53
+ env = Environment(loader = FileSystemLoader(os.path.join("evaluation","pie-perf","data","sample")))
54
+ template = env.get_template('sample_eval_config_template.yaml')
55
+ output_path = os.path.join("evaluation_results",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples","generated_outputs.report")
56
+ rendered_yaml = template.render(output_path = output_path)
57
+ config_file_path = os.path.join("evaluation","pie-perf","data","sample","sample_eval_config.yaml")
58
+ f=open(config_file_path,"w")
59
+ f.write(rendered_yaml)
60
+ f.close()
61
+ path = os.path.split(output_path)[0]
62
+
63
+ if not os.path.exists(path):
64
+ os.makedirs(path)
65
+
66
+ run_file = os.path.join("evaluation","pie-perf","src","codenet_eval","run_eval.py")
67
+
68
+ os.system(f'python3 {run_file} --eval_config {config_file_path}')
69
+ k_values = list(map(int,args.score_k.split(",")))
70
+ scores = statistical_significance_test(output_path,args.num_samples,k_values)
71
+
72
+ results = {"model":args.model,"prompt":args.prompt,"num_samples":args.num_samples}
73
+
74
+ for i,j in zip(range(2,len(results),3),range(len(k_values))):
75
+
76
+ results[f"Average Speedups@{k_values[j]},{args.num_samples}"] = scores[i-2]
77
+ results[f"Correct@{k_values[j]},{args.num_samples}"] = scores[i-1]
78
+ results[f"Improvements@{k_values[j]},{args.num_samples}"] = scores[i]
79
+
80
+ samples = pd.DataFrame([results])
81
+ samples.to_json(os.path.join(path,"results.jsonl"), orient="records", lines=True)
82
+ print(",".join(list(map(str,scores))))
83
+
84
+ # To calculate diffbleu(Applicable for all splits)
85
+ elif args.metric=="diffbleu":
86
+ generations_path="final_generations/android/starcoder/base_prompt/1_samples/2023-11-06/generated_outputs.jsonl"
87
+
88
+ k_values = list(map(int,args.score_k.split(",")))
89
+ overall_score={}
90
+
91
+ for k in k_values:
92
+ overall_score[k] = []
93
+
94
+ passed = 0
95
+ count = 0
96
+
97
+ with jsonlines.open(generations_path) as reader:
98
+
99
+ for generation in reader:
100
+
101
+ count += 1
102
+ scores = []
103
+
104
+ for l in range(args.num_samples):
105
+
106
+ generated_answers = post_process_generations(generated_answers = generation['generated_answers'][l],model = args.model,prompt = args.prompt,pl = "Python")
107
+ passed += generated_answers[0]
108
+ diff_score_bleu = diff_bleu(source_code = generation['source_code'],target = generation['target'],generated_answers = generated_answers[1],pl = "Python")
109
+ scores.append(diff_score_bleu)
110
+
111
+ scores.sort(reverse = True)
112
+
113
+ for k in k_values:
114
+
115
+ overall_score[k].append(pass_at_k_continuous_vals(n = args.num_samples,k = k,vals = scores))
116
+
117
+ scores = []
118
+ scores.append((passed*100)/(count*args.num_samples))
119
+ results = {"model":args.model,"prompt":args.prompt,"num_samples":args.num_samples}
120
+
121
+ for k in k_values:
122
+
123
+ results[f"Score@{k},{args.num_samples}"] = round(mean(overall_score[k])*100,1)
124
+ scores.append(round(mean(overall_score[k])*100,1))
125
+
126
+ results["Passed"] = (passed*100)/(count*args.num_samples)
127
+ samples = pd.DataFrame([results])
128
+ path = os.path.join("evaluation_results",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples")
129
+ if not os.path.exists(path):
130
+ os.makedirs(path)
131
+ samples.to_json(os.path.join(path,"results.jsonl"), orient="records", lines=True)
132
+ print("Pass Rate: {}, DiffBleu Score: {}".format(scores[0],scores[1]))
133
+
134
+ # To run codeql(Applicable for security and maintenance)
135
+ elif args.metric=="codeql":
136
+ generations_path="final_generations/security/starcoder/base_prompt/1_samples/2023-11-06/generated_outputs.jsonl"
137
+
138
+ all_check_paths={}
139
+ query_lang = {}
140
+
141
+ with jsonlines.open(generations_path) as reader:
142
+
143
+ for generation in reader:
144
+
145
+ query = generation['codeql_check'].split("/")[-1].split(".ql")[0]
146
+
147
+ try:
148
+ all_check_paths[query].append(generation['codeql_check'])
149
+ except:
150
+ all_check_paths[query]=generation['codeql_check']
151
+
152
+ code_path="evaluation_results/{}/{}/{}/{}_samples/generated_code/{}/".format(args.data_subset,args.model,args.prompt,args.num_samples,query)
153
+
154
+ if not os.path.exists(code_path):
155
+ os.makedirs(code_path)
156
+
157
+ if(generation['pl']=="python"):
158
+ ext=".py"
159
+ pl="Python"
160
+ query_lang[query]="python"
161
+ else:
162
+ ext=".c"
163
+ pl="C"
164
+ query_lang[query]="cpp"
165
+
166
+ for index in range(len(generation['generated_answers'])):
167
+
168
+ code_path_indexed = code_path + "{}_{}{}".format(generation['code_file_path'].split("/")[-2]+"_"+generation['code_file_path'].split("/")[-1].split(ext)[0],index,ext)
169
+
170
+ f=open(code_path_indexed,"w+")
171
+
172
+ generated_answers = post_process_generations(generated_answers=generation['generated_answers'][index],model=args.model,prompt=args.prompt,pl=generation['pl'])[1]
173
+
174
+ code = remove_comments(generated_answers,generation['pl'])
175
+
176
+ if remove_blank_lines(code).strip() == "":
177
+ generated_answers = generation['source_code']
178
+
179
+ f.write(generated_answers)
180
+
181
+ f.close()
182
+
183
+ if(pl=="C"):
184
+
185
+ f=open(code_path+"Makefile","w+")
186
+ f.write("SRCS=$(wildcard *.c)\nOBJS=$(SRCS:.c=.o)\n\nall: $(OBJS)\n\n%.o: %.c\n gcc -g -O -c $< -o $@ || (echo \"Deleting $<\" && echo \"$<\" >> rejected_files.log && mv $< $<.reject)\n\nclean:\n\trm -rf *.o")
187
+ f.close()
188
+
189
+ for query in all_check_paths.keys():
190
+
191
+ code_path_generations="evaluation_results/{}/{}/{}/{}_samples/generated_code/".format(args.data_subset,args.model,args.prompt,args.num_samples)
192
+
193
+ code_path_db="evaluation_results/{}/{}/{}/{}_samples/generated_code_db/".format(args.data_subset,args.model,args.prompt,args.num_samples)
194
+ if not os.path.exists(code_path_db):
195
+ os.makedirs(code_path_db)
196
+
197
+ code_path_results="evaluation_results/{}/{}/{}/{}_samples/generated_code_results/".format(args.data_subset,args.model,args.prompt,args.num_samples)
198
+ if not os.path.exists(code_path_results):
199
+ os.makedirs(code_path_results)
200
+
201
+ os.system("codeql-home/codeql/codeql database create --quiet --language={} --source-root={}{} {}{}".format(query_lang[query],code_path_generations,query,code_path_db,query))
202
+ os.system("codeql-home/codeql/codeql database analyze --rerun {}{} {} --format=csv --output={}{}.csv --threads=0".format(code_path_db,query,all_check_paths[query],code_path_results,query))
203
+
204
+ k_values = list(map(int,args.score_k.split(",")))
205
+ overall_score={}
206
+
207
+ for k in k_values:
208
+ overall_score[k] = []
209
+ syntax_errors={}
210
+ syn_errors=[]
211
+ done = []
212
+ scores_dump = []
213
+
214
+ with jsonlines.open(generations_path) as reader:
215
+
216
+ parsed=0
217
+ for generation in reader:
218
+ query = generation['codeql_check'].split("/")[-1].split(".ql")[0]
219
+
220
+ code_path="evaluation_results/{}/{}/{}/{}_samples/generated_code/{}/".format(args.data_subset,args.model,args.prompt,args.num_samples,query)
221
+ scores=[]
222
+ code_path_results="evaluation_results/{}/{}/{}/{}_samples/generated_code_results/{}.csv".format(args.data_subset,args.model,args.prompt,args.num_samples,query)
223
+ code_path_generations="evaluation_results/{}/{}/{}/{}_samples/generated_code/{}/".format(args.data_subset,args.model,args.prompt,args.num_samples,query)
224
+ code_path_db="evaluation_results/{}/{}/{}/{}_samples/generated_code_db/".format(args.data_subset,args.model,args.prompt,args.num_samples)
225
+
226
+ errors=[]
227
+
228
+ with open(code_path_results) as f:
229
+ csvfile = csv.reader(f)
230
+ for error in csvfile:
231
+ errors.append(error[-5].split("/")[1])
232
+
233
+ errors = list(set(errors))
234
+ index = 0
235
+ scores=[]
236
+ ans = []
237
+ syn=get_files_with_syntax_errors(generated_code_path=code_path_generations, codeql_db_path=code_path_db, query=query)
238
+
239
+ if(len(syn)>0 and query not in done):
240
+ syn_errors+=syn
241
+ try:
242
+ syntax_errors[query]+=syn
243
+ except:
244
+ syntax_errors[query]=syn
245
+ done.append(query)
246
+
247
+ for index in range(len(generation['generated_answers'])):
248
+
249
+ if(generation['pl']=="python"):
250
+ ext=".py"
251
+ pl="Python"
252
+ else:
253
+ ext=".c"
254
+ pl="C"
255
+
256
+ filename = "{}_{}{}".format(generation['code_file_path'].split("/")[-2]+"_"+generation['code_file_path'].split("/")[-1].split(ext)[0],index,ext)
257
+
258
+ index+=1
259
+
260
+ if(filename in errors or filename in syn_errors):
261
+ scores.append(0)
262
+ else:
263
+ scores.append(1)
264
+
265
+ scores.sort(reverse=True)
266
+
267
+ for k in k_values:
268
+
269
+ overall_score[k].append(pass_at_k_continuous_vals(n = args.num_samples,k = k,vals = scores))
270
+ print(scores)
271
+ scores_dump.append(scores)
272
+ scores=[]
273
+ path = os.path.join("evaluation_results",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples")
274
+ f = open(os.path.join(path,"results.txt"),'w')
275
+ f.write(str(scores_dump))
276
+ f.close()
277
+ results = {"model":args.model,"prompt":args.prompt,"num_samples":args.num_samples}
278
+
279
+ for k in k_values:
280
+
281
+ results[f"Score@{k},{args.num_samples}"] = round(mean(overall_score[k])*100,1)
282
+
283
+ results["syntax_errors"] = syntax_errors
284
+ results["no_of_syntax"] = len(syn_errors)
285
+ samples = pd.DataFrame([results])
286
+ path = os.path.join("evaluation_results",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples")
287
+ samples.to_json(os.path.join(path,"results.jsonl"), orient="records", lines=True)
288
+ print(",".join(list(map(str,scores))))
289
+
290
+
291
+
292
+ # get codeql*diffbleu numbers(Applicable for security and maintenance)
293
+ elif args.metric == "codeql-diffbleu":
294
+ k_values = list(map(int,args.score_k.split(",")))
295
+ overall_score={}
296
+ for k in k_values:
297
+ overall_score[k]=[]
298
+ generations_path = os.path.join("generations",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples","generated_outputs.jsonl")
299
+ passed = 0
300
+ count = 0
301
+ with jsonlines.open(generations_path) as reader:
302
+ res_path = os.path.split(args.file)[0].split('/')
303
+ res_path.insert(1,"evaluation_results")
304
+ res_path = os.path.join("/".join(res_path),"results.txt")
305
+ codeql_results = eval(open(res_path).read())
306
+ for generation,res in zip(reader,codeql_results):
307
+ scores=[]
308
+
309
+
310
+ for l in range(len(generation['generated_answers'])):
311
+ generated_answers=post_process_generations(generated_answers=generation['generated_answers'][l],model=args.model,prompt=args.prompt,pl=generation['pl'])
312
+ count += generated_answers[0]
313
+
314
+ diff_score_bleu=res[l]*diff_bleu(source_code=generation['source_code'],target=generation['target'],generated_answers=generated_answers[1],pl=generation['pl'])
315
+
316
+ scores.append(diff_score_bleu)
317
+
318
+ scores.sort(reverse=True)
319
+ for k in k_values:
320
+ overall_score[k].append(pass_at_k_continuous_vals(n=args.num_samples,k=k,vals=scores))
321
+ scores = []
322
+ scores.append((passed*100)/(count*args.num_samples))
323
+ results = {"model":args.model,"prompt":args.prompt,"num_samples":args.num_samples}
324
+ for k in k_values:
325
+ results[f"Score@{k},{args.num_samples}"] = round(mean(overall_score[k])*100,1)
326
+ scores.append(round(mean(overall_score[k])*100,1))
327
+ results["Passed"] = (passed*100)/(count*args.num_samples)
328
+ scores.append((passed*100)/(count*args.num_samples))
329
+ samples = pd.DataFrame([results])
330
+ path = os.path.join("evaluation_results",args.data_subset,args.model,args.prompt,f"{args.num_samples}_samples")
331
+ samples.to_json(os.path.join(path,"results.jsonl"), orient="records", lines=True)
332
+ print(",".join(list(map(str,scores))))
src/evaluation/pie-perf/.gitignore ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.0.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ aabaaa
2
+ aa
src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.1.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ xyzz
2
+ yz
src/evaluation/pie-perf/codenet/public_test_cases/p02248/input.2.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ aabaaa
2
+ aa
src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.0.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 0
2
+ 3
3
+ 4
src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1
src/evaluation/pie-perf/codenet/public_test_cases/p02248/output.2.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 0
2
+ 3
3
+ 4
src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.0.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 5
2
+ 1 5 3 4 2
src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.1.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 4
2
+ 4 3 2 1
src/evaluation/pie-perf/codenet/public_test_cases/p02278/input.2.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 5
2
+ 1 5 3 4 2
src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 7
src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 10
src/evaluation/pie-perf/codenet/public_test_cases/p02278/output.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 7
src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 5 4 2 2 1
src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 5 4 2 2 1
src/evaluation/pie-perf/codenet/public_test_cases/p02394/input.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 5 4 2 4 1
src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Yes
src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Yes
src/evaluation/pie-perf/codenet/public_test_cases/p02394/output.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ No
src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1
src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1
src/evaluation/pie-perf/codenet/public_test_cases/p02552/input.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0
src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0
src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0
src/evaluation/pie-perf/codenet/public_test_cases/p02552/output.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1
src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 7
src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 7
src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1729
src/evaluation/pie-perf/codenet/public_test_cases/p02555/input.3.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 2
src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 3
src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 3
src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 294867501
src/evaluation/pie-perf/codenet/public_test_cases/p02555/output.3.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0
src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.0.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 3
2
+ 3 4 5
src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.1.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 3
2
+ 6 10 16
src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.2.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 3
2
+ 6 10 15
src/evaluation/pie-perf/codenet/public_test_cases/p02574/input.3.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 3
2
+ 3 4 5
src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.0.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pairwise coprime
src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.1.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ not coprime
src/evaluation/pie-perf/codenet/public_test_cases/p02574/output.2.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ setwise coprime