yazan-amer commited on
Commit
88ad7fb
·
1 Parent(s): a8fedb5

real scores pending

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/workflows/verify.yaml +20 -0
  2. .gitignore +2 -1
  3. app.py +47 -22
  4. bike_bench_internal/.gitignore +9 -0
  5. bike_bench_internal/README.md +1 -0
  6. bike_bench_internal/benchmark_models/__init__.py +0 -0
  7. bike_bench_internal/benchmark_models/baseline_dataset.ipynb +0 -0
  8. bike_bench_internal/benchmark_models/benchmarking_utils.py +201 -0
  9. bike_bench_internal/benchmark_models/generative_modeling_utils.py +641 -0
  10. bike_bench_internal/benchmark_models/libmoon/__init__.py +0 -0
  11. bike_bench_internal/benchmark_models/libmoon/example.py +4 -0
  12. bike_bench_internal/benchmark_models/libmoon/problem/__init__.py +0 -0
  13. bike_bench_internal/benchmark_models/libmoon/problem/mop.py +93 -0
  14. bike_bench_internal/benchmark_models/libmoon/problem/mtl/__init__.py +0 -0
  15. bike_bench_internal/benchmark_models/libmoon/problem/mtl/fair_classify.py +0 -0
  16. bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/__init__.py +1 -0
  17. bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/adult_loader.py +124 -0
  18. bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/compas_loader.py +101 -0
  19. bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/credit_loader.py +80 -0
  20. bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/multimnist_loader.py +101 -0
  21. bike_bench_internal/benchmark_models/libmoon/problem/mtl/mnist.py +189 -0
  22. bike_bench_internal/benchmark_models/libmoon/problem/mtl/model/__init__.py +0 -0
  23. bike_bench_internal/benchmark_models/libmoon/problem/mtl/model/simple.py +77 -0
  24. bike_bench_internal/benchmark_models/libmoon/problem/mtl/objectives.py +179 -0
  25. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/__init__.py +4 -0
  26. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/dtlz.py +150 -0
  27. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/maf.py +44 -0
  28. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/re.py +619 -0
  29. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/re_original.py +1335 -0
  30. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/vlmop.py +67 -0
  31. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/wfg.py +3 -0
  32. bike_bench_internal/benchmark_models/libmoon/problem/synthetic/zdt.py +175 -0
  33. bike_bench_internal/benchmark_models/libmoon/solver/__init__.py +1 -0
  34. bike_bench_internal/benchmark_models/libmoon/solver/gradient/__init__.py +24 -0
  35. bike_bench_internal/benchmark_models/libmoon/solver/gradient/base_solver.py +82 -0
  36. bike_bench_internal/benchmark_models/libmoon/solver/gradient/core_solver.py +82 -0
  37. bike_bench_internal/benchmark_models/libmoon/solver/gradient/epo_solver.py +198 -0
  38. bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_evaluation.py +94 -0
  39. bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_hv_grad_3d.py +215 -0
  40. bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_hv_python3.py +274 -0
  41. bike_bench_internal/benchmark_models/libmoon/solver/gradient/gradhv.py +122 -0
  42. bike_bench_internal/benchmark_models/libmoon/solver/gradient/mgda_core.py +72 -0
  43. bike_bench_internal/benchmark_models/libmoon/solver/gradient/mgda_solver.py +71 -0
  44. bike_bench_internal/benchmark_models/libmoon/solver/gradient/min_norm_solvers_numpy.py +386 -0
  45. bike_bench_internal/benchmark_models/libmoon/solver/gradient/moosvgd.py +101 -0
  46. bike_bench_internal/benchmark_models/libmoon/solver/gradient/pmgda.py +14 -0
  47. bike_bench_internal/benchmark_models/libmoon/solver/gradient/pmtl.py +147 -0
  48. bike_bench_internal/benchmark_models/libmoon/solver/gradient/run/__init__.py +0 -0
  49. bike_bench_internal/benchmark_models/libmoon/solver/gradient/run/run_grad.py +99 -0
  50. bike_bench_internal/benchmark_models/libmoon/solver/gradient/utils/__init__.py +0 -0
.github/workflows/verify.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Verify
2
+
3
+ on:
4
+ push:
5
+ branches: [ "*" ]
6
+
7
+ jobs:
8
+ build-backend:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - name: Check out the repository
12
+ uses: actions/checkout@v5
13
+ - name: Set up Python 3.10
14
+ uses: actions/setup-python@v6
15
+ with:
16
+ python-version: '3.10'
17
+ - name: Install dependencies
18
+ run: |
19
+ python -m pip install --upgrade pip
20
+ pip install -r requirements.txt
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  **/.idea
2
  **/.venv
3
- **/venv
 
 
1
  **/.idea
2
  **/.venv
3
+ **/venv
4
+ **/local-run-data
app.py CHANGED
@@ -1,21 +1,39 @@
1
  import datetime
2
- import random
 
3
  import uuid
4
  from os import PathLike
5
 
6
  import gradio as gr
7
  import pandas as pd
 
8
 
9
  from config import APP_CONFIG
10
  from data_repository import REPOSITORY_INSTANCE, ModelScoringResult
11
  from designs_submission_validations import validate_github_link, validate_user_designs
 
 
 
 
 
12
 
13
  def compute_scores(user_gen_designs: pd.DataFrame) -> ModelScoringResult:
 
 
 
 
 
 
 
14
  return ModelScoringResult(
15
- score=random.randint(50, 5000),
16
- scoring_time=datetime.datetime.now(),
17
  submission_time=datetime.datetime.now(),
18
- uuid=str(uuid.uuid4())
 
 
 
 
 
19
  )
20
 
21
 
@@ -29,21 +47,28 @@ def process_generated_designs(github_link: str, file: PathLike[str]):
29
  return f"File uploaded successfully, uuid {scores.uuid}"
30
 
31
 
32
- with gr.Blocks() as gradio_app:
33
- with gr.Tab("Bike Bench Leaderboard"):
34
- gr.Markdown("Hello beautiful people!")
35
- gr.Dataframe(REPOSITORY_INSTANCE.get_data_to_display, label="Scores of Previous Files")
36
-
37
- with gr.Tab("Upload File"):
38
- gr.Interface(
39
- fn=process_generated_designs,
40
- inputs=[
41
- gr.Textbox(label="Github Link"),
42
- gr.File(label="Upload a file"),
43
- ],
44
- outputs="text",
45
- title="Bike Bench Leaderboard",
46
- description="Upload a file to see the result."
47
- )
48
-
49
- gradio_app.launch(debug=(not APP_CONFIG.production))
 
 
 
 
 
 
 
 
1
  import datetime
2
+ import os.path
3
+ import sys
4
  import uuid
5
  from os import PathLike
6
 
7
  import gradio as gr
8
  import pandas as pd
9
+ import torch
10
 
11
  from config import APP_CONFIG
12
  from data_repository import REPOSITORY_INSTANCE, ModelScoringResult
13
  from designs_submission_validations import validate_github_link, validate_user_designs
14
+ from domain_constants import SCORE_NAMES_MAP, USER_GEN_DESIGNS_COLUMNS
15
+
16
+ sys.path.append(os.path.join(os.path.dirname(__file__), "bike_bench_internal/src/"))
17
+ from bikebench.benchmarking import benchmarking_utils
18
+
19
 
20
  def compute_scores(user_gen_designs: pd.DataFrame) -> ModelScoringResult:
21
+ user_gen_designs = pd.DataFrame(user_gen_designs, columns=USER_GEN_DESIGNS_COLUMNS)
22
+ designs_length = len(user_gen_designs)
23
+ if designs_length < 10_000:
24
+ raise Exception(f"Too few designs to evaluate. Expected > 10,000, got {designs_length}")
25
+ data_tens = torch.tensor(user_gen_designs.values, dtype=torch.float32)
26
+ main_scores, detailed_scores, all_evaluation_scores = benchmarking_utils.evaluate(data_tens, device="cpu",
27
+ evaluate_as_aggregate=False)
28
  return ModelScoringResult(
29
+ uuid=str(uuid.uuid4()),
 
30
  submission_time=datetime.datetime.now(),
31
+ design_quality=main_scores[SCORE_NAMES_MAP["design_quality"]],
32
+ diversity_dpp=main_scores[SCORE_NAMES_MAP["diversity_dpp"]],
33
+ mean_novelty=main_scores[SCORE_NAMES_MAP["mean_novelty"]],
34
+ sim_to_data_mmd=main_scores[SCORE_NAMES_MAP["sim_to_data_mmd"]],
35
+ mean_violations=main_scores[SCORE_NAMES_MAP["mean_violations"]],
36
+ binary_validity=main_scores[SCORE_NAMES_MAP["binary_validity"]],
37
  )
38
 
39
 
 
47
  return f"File uploaded successfully, uuid {scores.uuid}"
48
 
49
 
50
+ def build_approval_app():
51
+ pass
52
+
53
+
54
+ def build_app():
55
+ with gr.Blocks() as gradio_app:
56
+ with gr.Tab("Bike Bench Leaderboard"):
57
+ gr.Markdown("Hello beautiful people!")
58
+ gr.Dataframe(REPOSITORY_INSTANCE.get_data_to_display, label="Scores of Previous Files")
59
+
60
+ with gr.Tab("Upload File"):
61
+ gr.Interface(
62
+ fn=process_generated_designs,
63
+ inputs=[
64
+ gr.Textbox(label="Github Link"),
65
+ gr.File(label="Upload a file"),
66
+ ],
67
+ outputs="text",
68
+ title="Bike Bench Leaderboard",
69
+ description="Upload a file to see the result."
70
+ )
71
+ return gradio_app
72
+
73
+
74
+ build_app().launch(debug=(not APP_CONFIG.production))
bike_bench_internal/.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__/
2
+ __pycache__/
3
+ src/resources/datasets/Generative_Modeling_Datasets/
4
+ src/resources/datasets/Predictive_Modeling_Datasets/
5
+ src/resources/datasets/Original_BIKED_Data/
6
+ src/resources/datasets/Real_Extended_Data/
7
+ src/resources/datasets/Synthetic_Extended_Data/
8
+ **/BikeCAD_17.1_configuration/
9
+ src/bikebench/introductory_notebooks/frames
bike_bench_internal/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Bike-Bench-Internal
bike_bench_internal/benchmark_models/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/baseline_dataset.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
bike_bench_internal/benchmark_models/benchmarking_utils.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import pandas as pd
4
+ from bikebench.design_evaluation.scoring import construct_scorer, MainScores, DetailedScores
5
+ from bikebench.design_evaluation.design_evaluation import get_standard_evaluations
6
+ from bikebench.conditioning import conditioning
7
+ from tqdm import trange, tqdm
8
+
9
+
10
+
11
+ def get_condition_by_idx(idx=0):
12
+ rider_condition = conditioning.sample_riders(10, split="test")
13
+ use_case_condition = conditioning.sample_use_case(10, split="test")
14
+ image_embeddings = conditioning.sample_image_embedding(10, split="test")
15
+ condition = {"Rider": rider_condition[idx], "Use Case": use_case_condition[idx], "Embedding": image_embeddings[idx]}
16
+ return condition
17
+
18
+ def get_conditions_10k():
19
+ rider_condition = conditioning.sample_riders(10000, split="test")
20
+ use_case_condition = conditioning.sample_use_case(10000, split="test")
21
+ image_embeddings = conditioning.sample_image_embedding(10000, split="test")
22
+ conditions = {"Rider": rider_condition, "Use Case": use_case_condition, "Embedding": image_embeddings}
23
+ return conditions
24
+
25
+ def evaluate_uncond(result_tens, name, cond_idx, data_columns, device, save=True):
26
+
27
+ condition = get_condition_by_idx(cond_idx)
28
+
29
+ result_dir = os.path.join("results", "unconditional", f"cond_{cond_idx}", name)
30
+ os.makedirs(result_dir, exist_ok=True)
31
+
32
+ main_scorer = construct_scorer(MainScores, get_standard_evaluations(device), data_columns)
33
+ detailed_scorer = construct_scorer(DetailedScores, get_standard_evaluations(device), data_columns)
34
+
35
+ main_scores = main_scorer(result_tens, condition)
36
+
37
+ detailed_scores = detailed_scorer(result_tens, condition)
38
+
39
+ if save:
40
+ result_tens = result_tens.cpu()
41
+ torch.save(result_tens, os.path.join(result_dir, "result_tens.pt"))
42
+ main_scores.to_csv(os.path.join(result_dir, "main_scores.csv"), index_label=False, header=False)
43
+ detailed_scores.to_csv(os.path.join(result_dir, "detailed_scores.csv"), index_label=False, header=False)
44
+ return main_scores, detailed_scores
45
+
46
+ def evaluate_cond(result_tens, name, data_columns, device, save=True):
47
+ condition = get_conditions_10k()
48
+
49
+ condition = {"Rider": condition["Rider"], "Use Case": condition["Use Case"], "Embedding": condition["Embedding"]}
50
+
51
+ result_dir = os.path.join("results", "conditional", name)
52
+ os.makedirs(result_dir, exist_ok=True)
53
+
54
+ main_scorer = construct_scorer(MainScores, get_standard_evaluations(device), data_columns, device)
55
+ detailed_scorer = construct_scorer(DetailedScores, get_standard_evaluations(device), data_columns, device)
56
+
57
+ main_scores = main_scorer(result_tens, condition)
58
+ detailed_scores = detailed_scorer(result_tens, condition)
59
+
60
+ if save:
61
+ result_tens = result_tens.cpu()
62
+ torch.save(result_tens, os.path.join(result_dir, "result_tens.pt"))
63
+ main_scores.to_csv(os.path.join(result_dir, "main_scores.csv"), index_label=False, header=False)
64
+ detailed_scores.to_csv(os.path.join(result_dir, "detailed_scores.csv"), index_label=False, header=False)
65
+
66
+ return main_scores, detailed_scores
67
+
68
+
69
+ def create_score_report_conditional():
70
+ """
71
+ Looks through the results folder and creates a score report for each conditional result.
72
+ """
73
+ all_scores = []
74
+ result_dir = os.path.join("results", "conditional")
75
+ for name in os.listdir(result_dir):
76
+ if os.path.isdir(os.path.join(result_dir, name)):
77
+ main_scores = pd.read_csv(os.path.join(result_dir, name, "main_scores.csv"), header=None)
78
+ main_scores.columns = ["Metric", "Score"]
79
+ main_scores["Model"] = name
80
+ all_scores.append(main_scores)
81
+ all_scores = pd.concat(all_scores, axis=0)
82
+ #make metric names the three columns, make models the rows
83
+ all_scores = all_scores.pivot(index="Model", columns="Metric", values="Score")
84
+ #drop the index name and the column name
85
+ all_scores.columns.name = None
86
+ all_scores.index.name = None
87
+
88
+ return all_scores
89
+
90
+ def create_score_report_unconditional():
91
+ """
92
+ Looks through the results folder and creates a score report for each unconditional result.
93
+ """
94
+ all_scores = []
95
+ result_dir = os.path.join("results", "unconditional")
96
+ for i in range(10):
97
+ c_dir = os.path.join(result_dir, f"cond_{i}")
98
+ for name in os.listdir(c_dir):
99
+ dirname = os.path.join(c_dir, name)
100
+ if os.path.isdir(dirname):
101
+ main_scores = pd.read_csv(os.path.join(dirname, "main_scores.csv"), header=None)
102
+ main_scores.columns = ["Metric", "Score"]
103
+ main_scores["Model"] = name
104
+ main_scores["Condition"] = i
105
+ all_scores.append(main_scores)
106
+ all_scores = pd.concat(all_scores, axis=0)
107
+ #average over condition
108
+ all_scores = all_scores.groupby(["Model", "Metric"]).mean().reset_index()
109
+ #make metric names the three columns, make models the rows
110
+ all_scores = all_scores.pivot(index="Model", columns="Metric", values="Score")
111
+ #drop the index name and the column name
112
+ all_scores.columns.name = None
113
+ all_scores.index.name = None
114
+ return all_scores
115
+
116
+
117
+ def rescore_unconditional(data_columns, device, cond_idxs = None, model_names = None, results_root="results/unconditional"):
118
+ """
119
+ Recompute main and detailed scores for all unconditional results.
120
+ Overwrites only the CSV score files, leaves result_tens.pt untouched.
121
+ """
122
+
123
+ evals = get_standard_evaluations(device)
124
+ main_scorer = construct_scorer(MainScores, evals, data_columns, device)
125
+ detailed_scorer = construct_scorer(DetailedScores, evals, data_columns, device)
126
+ device = torch.device(device)
127
+ if cond_idxs is None:
128
+ cond_idxs = range(10)
129
+ for cond_idx in tqdm(cond_idxs):
130
+ cond_dir = os.path.join(results_root, f"cond_{cond_idx}")
131
+ if not os.path.isdir(cond_dir):
132
+ continue
133
+ # fetch the one shared condition for this index
134
+ condition = get_condition_by_idx(cond_idx)
135
+
136
+ if model_names is not None:
137
+ models = model_names
138
+ else:
139
+ models = os.listdir(cond_dir)
140
+
141
+ for model_name in models:
142
+ model_dir = os.path.join(cond_dir, model_name)
143
+ tensor_path = os.path.join(model_dir, "result_tens.pt")
144
+ if not os.path.isdir(model_dir) or not os.path.isfile(tensor_path):
145
+ continue
146
+
147
+ # load results
148
+ result_tens = torch.load(tensor_path, map_location=device)
149
+
150
+ # rescore
151
+ main_scores = main_scorer(result_tens, condition)
152
+ detailed_scores = detailed_scorer(result_tens, condition)
153
+
154
+ # overwrite only the CSVs
155
+ main_scores.to_csv(
156
+ os.path.join(model_dir, "main_scores.csv"), header=False
157
+ )
158
+ detailed_scores.to_csv(
159
+ os.path.join(model_dir, "detailed_scores.csv"), header=False
160
+ )
161
+
162
+
163
+ def rescore_conditional(data_columns, device, model_names, results_root="results/conditional"):
164
+ """
165
+ Recompute main and detailed scores for all conditional results.
166
+ Overwrites only the CSV score files, leaves result_tens.pt untouched.
167
+ """
168
+ device = torch.device(device)
169
+ # fetch the full 10k‐point condition set once
170
+ condition = get_conditions_10k()
171
+
172
+ # build scorers
173
+ evals = get_standard_evaluations(device)
174
+ main_scorer = construct_scorer(MainScores, evals, data_columns, device)
175
+ detailed_scorer = construct_scorer(DetailedScores, evals, data_columns, device)
176
+
177
+
178
+ if model_names is not None:
179
+ models = model_names
180
+ else:
181
+ models = os.listdir(results_root)
182
+ for model_name in models:
183
+ model_dir = os.path.join(results_root, model_name)
184
+ tensor_path = os.path.join(model_dir, "result_tens.pt")
185
+ if not os.path.isdir(model_dir) or not os.path.isfile(tensor_path):
186
+ continue
187
+
188
+ # load results
189
+ result_tens = torch.load(tensor_path, map_location=device)
190
+
191
+ # rescore
192
+ main_scores = main_scorer(result_tens, condition)
193
+ detailed_scores = detailed_scorer(result_tens, condition)
194
+
195
+ # overwrite only the CSVs
196
+ main_scores.to_csv(
197
+ os.path.join(model_dir, "main_scores.csv"), header=False
198
+ )
199
+ detailed_scores.to_csv(
200
+ os.path.join(model_dir, "detailed_scores.csv"), header=False
201
+ )
bike_bench_internal/benchmark_models/generative_modeling_utils.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tqdm import tqdm, trange
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.utils.data import DataLoader, Dataset, TensorDataset
5
+ import numpy as np
6
+ from torch.autograd import grad
7
+ from diffusers import DDPMScheduler
8
+ from torch.nn import MSELoss
9
+
10
+
11
+ from bikebench.conditioning import conditioning
12
+ from bikebench.design_evaluation.design_evaluation import *
13
+ from bikebench.conditioning import conditioning
14
+ from bikebench.design_evaluation import scoring
15
+
16
+ class TorchScaler:
17
+ def __init__(self, data):
18
+ self.data = data
19
+ self.mean = torch.mean(data, dim=0)
20
+ self.std = torch.std(data, dim=0)
21
+
22
+ def scale(self, x):
23
+ return (x - self.mean) / self.std
24
+
25
+ def unscale(self, x):
26
+ return x * self.std + self.mean
27
+
28
+ def sample_continuous(num_samples, split="test", randomize = False):
29
+ emb = conditioning.sample_image_embedding(num_samples, split, randomize)
30
+ rider = conditioning.sample_riders(num_samples, split, randomize)
31
+ use_case = conditioning.sample_use_case(num_samples, split, randomize)
32
+ all = torch.cat((emb, rider, use_case), dim=1)
33
+ return all
34
+
35
+ def sample_continuous_text(text_strings, device, split="test", randomize = False):
36
+ embedding_model = clip_embedding_calculator.ClipEmbeddingCalculator(
37
+ device=device, batch_size=64
38
+ )
39
+ text_embedding = embedding_model.embed_texts(text_strings)
40
+ num_samples = text_embedding.shape[0]
41
+ rider = conditioning.sample_riders(num_samples, split, randomize)
42
+ use_case = conditioning.sample_use_case(num_samples, split, randomize)
43
+ all = torch.cat((text_embedding, rider, use_case), dim=1)
44
+ return all
45
+
46
+ def sample_standard(num_samples, split="test", randomize = False):
47
+ emb = conditioning.sample_image_embedding(num_samples, split, randomize)
48
+ rider = conditioning.sample_riders(num_samples, split, randomize)
49
+ use_case = conditioning.sample_use_case(num_samples, split, randomize)
50
+ condition = {"Rider": rider, "Use Case": use_case, "Embedding": emb}
51
+ return condition
52
+
53
+ def parse_continuous_condition(condition):
54
+ image_embeddings = condition[:, :512]
55
+ use_case_condition = condition[:, -3:]
56
+ rider_condition = condition[:, 512:-3]
57
+ condition = {"Rider": rider_condition, "Use Case": use_case_condition, "Embedding": image_embeddings}
58
+ return condition
59
+
60
+ def piecewise_constraint_score(constraint_scores, constraint_falloff = 10):
61
+ constraint_scores_safexp = torch.clamp(constraint_scores, max=0.0)
62
+ piece1 = torch.exp(constraint_scores_safexp * constraint_falloff)/constraint_falloff
63
+ piece2 = constraint_scores + 1/constraint_falloff
64
+ mask = constraint_scores < 0.0
65
+ mask = mask.float()
66
+ result = piece1 * mask + piece2 * (1 - mask)
67
+ return result
68
+
69
+ def get_composite_score_fn(scaler, columns, evaluations, constrant_vs_objective_weight = 10.0, constraint_falloff=10.0, device="cpu"):
70
+ evaluator, requirement_names, requirement_types = construct_tensor_evaluator(evaluations, columns)
71
+
72
+ isobjective = torch.tensor(requirement_types) == 1
73
+
74
+ weights = scoring.get_ref_point(evaluator, requirement_names, requirement_names, reduction="meanabs", device=device)
75
+ weights = torch.tensor(weights, dtype=torch.float32, device=device)
76
+
77
+ assert weights.min() > 0, "Ref point should be greater than 0"
78
+
79
+ def composite_score_fn(x, continuous_condition, evaluator = evaluator, scaler = scaler):
80
+ #print if there are any NaN values in x
81
+ if torch.isnan(x).any():
82
+ print("NaN values in x")
83
+ x = scaler.unscale(x)
84
+ condition = parse_continuous_condition(continuous_condition)
85
+ eval_scores = evaluator(x, condition)
86
+ scaled_scores = eval_scores / weights
87
+ objective_scores = scaled_scores[:, isobjective]
88
+ constraint_scores_raw = scaled_scores[:, ~isobjective]
89
+ constraint_scores = piecewise_constraint_score(constraint_scores_raw, constraint_falloff)
90
+ total_scores = torch.sum(objective_scores, dim=1) + torch.sum(constraint_scores, dim=1) * constrant_vs_objective_weight
91
+ composite_scores = total_scores / constrant_vs_objective_weight
92
+
93
+ quality_scores = 1/composite_scores
94
+ # print("Quality scores: ", quality_scores)
95
+ if torch.isnan(quality_scores).any():
96
+ print("NaN values in quality scores")
97
+
98
+ mean_comp_scores = torch.mean(composite_scores)
99
+ constraint_satisfaction_rate = torch.mean(torch.all(constraint_scores_raw <= 0, dim=1).float())
100
+ report = {"CSR": constraint_satisfaction_rate, "MCS": mean_comp_scores}
101
+ return quality_scores, report
102
+
103
+ return composite_score_fn
104
+
105
+ def get_uneven_batch_sizes(total_data_points, batch_size):
106
+ """
107
+ Given the total number of data points and a target batch size,
108
+ returns a list of batch sizes that sum up to the total number of data points.
109
+ The batch sizes are distributed as evenly as possible but may be uneven.
110
+
111
+ :param total_data_points: Total number of data points (int).
112
+ :param batch_size: Target batch size (int).
113
+ :return: A list of batch sizes (list of int).
114
+ """
115
+ # Calculate the number of batches needed
116
+ num_batches = total_data_points // batch_size
117
+ remainder = total_data_points % batch_size
118
+
119
+ # Initialize the batch sizes
120
+ batch_sizes = [batch_size] * num_batches
121
+
122
+ # Distribute the remainder across the batches
123
+ for i in range(remainder):
124
+ batch_sizes[i%num_batches] += 1
125
+
126
+ return batch_sizes
127
+
128
+
129
+ def get_diversity_loss_fn(scaler:TorchScaler, columns, evaluations, diversity_weight=0.1, score_weight=0.1, constraint_vs_objective_weight=10.0, constraint_falloff=10.0, dpp_batch=16, device="cpu"):
130
+ composite_score_fn = get_composite_score_fn(scaler, columns, evaluations, constrant_vs_objective_weight = constraint_vs_objective_weight, constraint_falloff = constraint_falloff, device=device)
131
+
132
+ def diversity_loss_fn(x, condition, diversity_weight=diversity_weight, score_weight=score_weight):
133
+ if diversity_weight == 0:
134
+ return torch.tensor(0.0), {"DIV-OFF": 0.0}
135
+ scores, report= composite_score_fn(x, condition)
136
+
137
+ # Initialize the total loss
138
+ total_loss = 0.0
139
+
140
+ # Get uneven batch sizes based on the total number of data points
141
+ batch_sizes = get_uneven_batch_sizes(x.size(0), dpp_batch)
142
+
143
+ # Split the data into uneven batches
144
+ start_idx = 0
145
+ for batch_size in batch_sizes:
146
+ # Get the current batch
147
+ end_idx = start_idx + batch_size
148
+ x_batch = x[start_idx:end_idx]
149
+ scores_batch = scores[start_idx:end_idx]
150
+ # Compute pairwise squared Euclidean distances for the batch
151
+ r = torch.sum(x_batch ** 2, dim=1, keepdim=True)
152
+ D = r - 2 * torch.matmul(x_batch, x_batch.T) + r.T
153
+ D_norm = D / x_batch.size(1) # Normalize by the number of features
154
+ # Compute the similarity matrix using RBF for the batch
155
+ S = torch.exp(-0.5 * D_norm ** 2) / 2
156
+
157
+ # Compute the quality matrix for the batch
158
+ Q = torch.matmul(scores_batch, scores_batch.T)
159
+ Q = torch.pow(Q, score_weight)
160
+ L = S * Q
161
+
162
+ L = (L + L.T) / 2.0
163
+
164
+ L_stable = L + 1e-6 * torch.eye(L.size(0), device=L.device)
165
+
166
+ # Compute the eigenvalues of the similarity matrix for the batch
167
+ try:
168
+ eig_val = torch.linalg.eigvalsh(L_stable)
169
+ except:
170
+ print(f"Eigenvalue computation failed for batch with size {batch_size}")
171
+ eig_val = torch.ones(x_batch.size(0), device=x.device)
172
+ if torch.isnan(eig_val).any():
173
+ print("NaNs detected in eig_val")
174
+ # if (eig_val <= 0).any():
175
+ # print("Nonpositive eigenvalues:", eig_val)
176
+ # Compute the loss for the batch as the negative mean log of the eigenvalues
177
+ if torch.isinf(torch.log(eig_val)).any():
178
+ print("Log produced inf! Min/max eig_val:", eig_val.min().item(), eig_val.max().item())
179
+
180
+ batch_loss = -torch.mean(torch.log(torch.clamp(eig_val, min=1e-6, max=1e6)))
181
+
182
+ total_loss += batch_loss
183
+
184
+ # Update the start index for the next batch
185
+ start_idx = end_idx
186
+ # Compute the final loss by averaging across batches
187
+ loss = total_loss / len(batch_sizes)* diversity_weight
188
+ return loss, report
189
+ return diversity_loss_fn
190
+
191
+ class Down_Model(nn.Module):
192
+ def __init__(self, in_dim, out_dim, hidden_dim=400, num_hidden_layers=2):
193
+ super(Down_Model, self).__init__()
194
+
195
+ self.layers = nn.ModuleList([nn.Linear(in_dim, hidden_dim), nn.LeakyReLU()])
196
+
197
+ for _ in range(num_hidden_layers - 1):
198
+ self.layers.append(nn.Linear(hidden_dim, hidden_dim))
199
+ self.layers.append(nn.LeakyReLU())
200
+
201
+ self.layers.append(nn.Linear(hidden_dim, out_dim))
202
+
203
+ def forward(self, inputs):
204
+ x = inputs
205
+ for layer in self.layers:
206
+ x = layer(x)
207
+ return x
208
+
209
+ class Up_Model(nn.Module):
210
+ def __init__(self, in_dim, out_dim, hidden_dim=400, num_hidden_layers=2):
211
+ super(Up_Model, self).__init__()
212
+
213
+ self.layers = nn.ModuleList([nn.Linear(in_dim, hidden_dim), nn.LeakyReLU()])
214
+
215
+ for _ in range(num_hidden_layers - 1):
216
+ self.layers.append(nn.Linear(hidden_dim, hidden_dim))
217
+ self.layers.append(nn.LeakyReLU())
218
+
219
+ self.layers.append(nn.Linear(hidden_dim, out_dim))
220
+
221
+ def forward(self, inputs):
222
+ x = inputs
223
+ for layer in self.layers:
224
+ x = layer(x)
225
+ return x
226
+
227
+
228
+
229
+ def GAN_step(D, G, D_opt, G_opt, data_batch, cond_batch, noise_batch, batch_size, device, auxiliary_loss_fn):
230
+ criterion = nn.BCEWithLogitsLoss()
231
+ D.zero_grad()
232
+ real_label = torch.full((batch_size,), 1, dtype=torch.float, device=device)
233
+ fake_label = torch.full((batch_size,), 0, dtype=torch.float, device=device)
234
+
235
+ data_and_condition = torch.cat([data_batch, cond_batch], dim=1)
236
+ noise_and_condition = torch.cat([noise_batch, cond_batch], dim=1)
237
+
238
+ output = D(data_and_condition).view(-1)
239
+ L_D_real = criterion(output, real_label)
240
+
241
+ fake_data = G(noise_and_condition)
242
+ fake_data_and_condition = torch.cat([fake_data, cond_batch], dim=1)
243
+ output = D(fake_data_and_condition.detach()).view(-1)
244
+ L_D_fake = criterion(output, fake_label)
245
+
246
+ L_D_tot = L_D_real + L_D_fake
247
+ L_D_tot.backward()
248
+ D_opt.step()
249
+
250
+ G.zero_grad()
251
+ fake_data = G(noise_and_condition)
252
+ fake_data_and_condition = torch.cat([fake_data, cond_batch], dim=1)
253
+ output = D(fake_data_and_condition).view(-1)
254
+ L_G = criterion(output, real_label)
255
+
256
+ if auxiliary_loss_fn is not None:
257
+ L_aux, rep= auxiliary_loss_fn(fake_data, cond_batch)
258
+ L_G_tot = L_G + L_aux
259
+
260
+ report = {"L_D_real": L_D_real.item(), "L_D_fake": L_D_fake.item(), "L_G": L_G.item(), "L_aux": L_aux.item()}
261
+ report.update(rep)
262
+ else:
263
+ L_G_tot = L_G
264
+ report = {"L_D_real": L_D_real.item(), "L_D_fake": L_D_fake.item(), "L_G": L_G.item()}
265
+
266
+ L_G_tot.backward()
267
+
268
+ torch.nn.utils.clip_grad_norm_(G.parameters(), max_norm=20)
269
+
270
+ G_opt.step()
271
+
272
+ return report
273
+
274
+
275
+ def VAE_step(D, G, D_opt, G_opt, data_batch, cond_batch, noise_batch, batch_size, device, auxiliary_loss_fn):
276
+
277
+ D.zero_grad()
278
+ G.zero_grad()
279
+
280
+ alpha = 0.2
281
+
282
+ data_and_condition = torch.cat([data_batch, cond_batch], dim=1)
283
+
284
+ encoded = D(data_and_condition)
285
+ latent_dim = encoded.shape[1] // 2
286
+ mu = encoded[:, :latent_dim]
287
+ logvar = encoded[:, latent_dim:]
288
+
289
+ std = torch.exp(0.5 * logvar)
290
+ eps = torch.randn_like(std)
291
+ z = mu + eps * std # z = mu + sigma * epsilon
292
+
293
+ # Forward pass through decoder (G)
294
+ noise_and_condition = torch.cat([z, cond_batch], dim=1)
295
+
296
+ reconstructed = G(noise_and_condition)
297
+
298
+ # Compute losses
299
+ L_R = nn.MSELoss()(reconstructed, data_batch)
300
+ L_KL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) / data_batch.size(0)
301
+
302
+ if auxiliary_loss_fn is not None:
303
+ L_aux, rep = auxiliary_loss_fn(reconstructed, cond_batch)
304
+
305
+ L_tot = alpha * L_KL + L_R + L_aux
306
+
307
+ report = {"L_KL": L_KL.item(), "L_R": L_R.item(), "L_tot": L_tot.item(), "L_aux": L_aux.item()}
308
+ report.update(rep)
309
+ else:
310
+ L_tot = alpha * L_KL + L_R
311
+
312
+ report = {"L_KL": L_KL.item(), "L_R": L_R.item(), "L_tot": L_tot.item()}
313
+
314
+ L_tot.backward()
315
+
316
+ # total_norm_D = torch.sqrt(sum(p.grad.norm()**2 for p in D.parameters() if p.grad is not None))
317
+ # total_norm_G = torch.sqrt(sum(p.grad.norm()**2 for p in G.parameters() if p.grad is not None))
318
+ # print(f"Gradient norm for D: {total_norm_D.item():.4f}")
319
+ # print(f"Gradient norm for G: {total_norm_G.item():.4f}")
320
+
321
+ torch.nn.utils.clip_grad_norm_(D.parameters(), max_norm=20)
322
+ torch.nn.utils.clip_grad_norm_(G.parameters(), max_norm=20)
323
+
324
+ D_opt.step()
325
+ G_opt.step()
326
+
327
+ return report
328
+
329
+
330
+ def DDPM_step_wrapper(scheduler: DDPMScheduler):
331
+ def DDPM_step(D, G, D_opt, G_opt, data_batch, cond_batch, noise_batch, batch_size, device, auxiliary_loss_fn):
332
+ # sample random t
333
+ t = torch.randint(0, scheduler.config.num_train_timesteps, (data_batch.size(0),), device=device).long()
334
+
335
+ # compute x_t using q_sample
336
+ noise = torch.randn_like(data_batch, device=device)
337
+ x_t = scheduler.add_noise(data_batch, noise, t)
338
+
339
+ # embed timestep and concat with cond
340
+ t_embedded = t.unsqueeze(-1).float() / scheduler.config.num_train_timesteps
341
+ x_input = torch.cat([x_t, t_embedded], dim=-1)
342
+
343
+ # predict noise
344
+ noise_pred = D(x_input)
345
+
346
+ # MSE loss with optional weighting (scheduler does not expose beta_t directly)
347
+ mse = MSELoss(reduction="none")(noise_pred, noise)
348
+ base_loss = mse.mean()
349
+
350
+ # reconstruct x0 and compute auxiliary loss
351
+ alpha_cumprod = scheduler.alphas_cumprod.to(device)
352
+ sqrt_alpha_cumprod_t = alpha_cumprod[t].sqrt().unsqueeze(-1)
353
+ sqrt_one_minus_alpha_cumprod_t = (1 - alpha_cumprod[t]).sqrt().unsqueeze(-1)
354
+ x0_pred = (x_t - sqrt_one_minus_alpha_cumprod_t * noise_pred) / sqrt_alpha_cumprod_t
355
+
356
+ if auxiliary_loss_fn is not None:
357
+ thresh = int(0.9 * scheduler.config.num_train_timesteps)
358
+ valid = (t < thresh)
359
+
360
+ total_loss = base_loss
361
+ report = {
362
+ "loss": base_loss.item(),
363
+ }
364
+ else:
365
+ total_loss = base_loss
366
+ report = {"loss": base_loss.item()}
367
+
368
+ D.zero_grad()
369
+ total_loss.backward()
370
+ D_opt.step()
371
+
372
+ return report
373
+
374
+ return DDPM_step
375
+
376
+
377
+ def DDPM_step_cond_wrapper(scheduler: DDPMScheduler):
378
+ def DDPM_step_cond(D, G, D_opt, G_opt, data_batch, cond_batch, noise_batch, batch_size, device, auxiliary_loss_fn):
379
+ # sample random t
380
+ t = torch.randint(0, scheduler.config.num_train_timesteps, (data_batch.size(0),), device=device).long()
381
+
382
+ # compute x_t using q_sample
383
+ noise = torch.randn_like(data_batch, device=device)
384
+ x_t = scheduler.add_noise(data_batch, noise, t)
385
+
386
+ # embed timestep and concat with cond
387
+ t_embedded = t.unsqueeze(-1).float() / scheduler.config.num_train_timesteps
388
+ x_input = torch.cat([x_t, cond_batch, t_embedded], dim=-1)
389
+
390
+ # predict noise
391
+ noise_pred = D(x_input)
392
+
393
+ # MSE loss with optional weighting (scheduler does not expose beta_t directly)
394
+ mse = MSELoss(reduction="none")(noise_pred, noise)
395
+ base_loss = mse.mean()
396
+
397
+ # reconstruct x0 and compute auxiliary loss
398
+ alpha_cumprod = scheduler.alphas_cumprod.to(device)
399
+ sqrt_alpha_cumprod_t = alpha_cumprod[t].sqrt().unsqueeze(-1)
400
+ sqrt_one_minus_alpha_cumprod_t = (1 - alpha_cumprod[t]).sqrt().unsqueeze(-1)
401
+ x0_pred = (x_t - sqrt_one_minus_alpha_cumprod_t * noise_pred) / sqrt_alpha_cumprod_t
402
+
403
+ if auxiliary_loss_fn is not None:
404
+ thresh = int(0.5 * scheduler.config.num_train_timesteps)
405
+ valid = (t < thresh)
406
+
407
+ if valid.any():
408
+ x0_sub, cond_sub = x0_pred[valid], cond_batch[valid]
409
+ L_aux, rep = auxiliary_loss_fn(x0_sub, cond_sub)
410
+ else:
411
+ L_aux = torch.tensor(0.0, device=device)
412
+ rep = {}
413
+
414
+ total_loss = base_loss + L_aux
415
+ report = {
416
+ "loss": base_loss.item(),
417
+ "L_aux": L_aux.item(),
418
+ **rep
419
+ }
420
+ else:
421
+ total_loss = base_loss
422
+ report = {"loss": base_loss.item()}
423
+
424
+ D.zero_grad()
425
+ total_loss.backward()
426
+ D_opt.step()
427
+
428
+ return report
429
+
430
+ return DDPM_step_cond
431
+
432
+
433
+ class ReusableDataLoader:
434
+ def __init__(self, dataset, batch_size, shuffle=True):
435
+ self.dataset = dataset
436
+ self.batch_size = batch_size
437
+ self.shuffle = shuffle
438
+ self.indices = list(range(len(self.dataset)))
439
+ self.previous_indices = []
440
+
441
+ def _shuffle_indices(self):
442
+ self.indices = torch.randperm(len(self.dataset)).tolist()
443
+
444
+ def get_batch(self):
445
+ queued = self.previous_indices
446
+ while len(queued) < self.batch_size:
447
+ if self.shuffle:
448
+ self._shuffle_indices()
449
+ queued.extend(self.indices) # Add individual elements to queued list
450
+
451
+ self.previous_indices = queued[self.batch_size:] # Store remaining indices for the next batch
452
+ batch_indices = queued[:self.batch_size] # Get the batch of the correct size
453
+ return torch.stack([self.dataset[i][0] for i in batch_indices])
454
+
455
+ def train(D, G, D_opt, G_opt, loader, num_steps, batch_size, noise_dim, train_step_fn, device, auxiliary_loss_fn, condition_sampler):
456
+ # Loss function
457
+ D.train()
458
+ G.train()
459
+ steps_range = trange(num_steps, position=0, leave=True)
460
+
461
+ for step in steps_range:
462
+ data_batch = loader.get_batch().to(device)
463
+ noise_batch = torch.randn(batch_size, noise_dim).to(device)
464
+ cond_batch = condition_sampler(batch_size).to(device)
465
+ effective_auxiliary_loss_fn = auxiliary_loss_fn
466
+ report = train_step_fn(D, G, D_opt, G_opt, data_batch, cond_batch, noise_batch, batch_size, device, effective_auxiliary_loss_fn)
467
+ postfix = {key: "{:.4f}".format(value) for key, value in report.items()}
468
+ steps_range.set_postfix(postfix)
469
+ return D, G
470
+
471
+
472
+ def get_DDPM_generate_cond(scheduler: DDPMScheduler, data_dim, batch_size=64):
473
+ def DDPM_generate_cond(D, G, cond_batch, latent_dim, device, batch_size=batch_size):
474
+ with torch.no_grad():
475
+ results = []
476
+ numgen = cond_batch.shape[0]
477
+ for start_idx in range(0, numgen, batch_size):
478
+ end_idx = min(start_idx + batch_size, numgen)
479
+ current_batch_size = end_idx - start_idx
480
+
481
+ x = torch.randn(current_batch_size, data_dim).to(device)
482
+
483
+ for t in reversed(range(scheduler.config.num_train_timesteps)):
484
+ t_tensor = torch.full((current_batch_size,), t, dtype=torch.long, device=device)
485
+ t_embedded = t_tensor.unsqueeze(-1).float() / scheduler.config.num_train_timesteps
486
+ x_input = torch.cat([x, cond_batch, t_embedded], dim=-1)
487
+
488
+ with torch.no_grad():
489
+ noise_pred = D(x_input)
490
+
491
+ x = scheduler.step(model_output=noise_pred, timestep=t, sample=x).prev_sample
492
+ results.append(x)
493
+
494
+ return torch.cat(results, dim=0)
495
+ return DDPM_generate_cond
496
+
497
+ def get_DDPM_generate_guided(scheduler: DDPMScheduler, data_dim, auxiliary_loss_fn, batch_size=64):
498
+ def DDPM_generate_guided(D, G, cond_batch, latent_dim, device, auxiliary_loss_fn=auxiliary_loss_fn, batch_size=batch_size):
499
+ results = []
500
+ numgen = cond_batch.shape[0]
501
+
502
+ num_guided_timesteps = int(0.5 * scheduler.config.num_train_timesteps)
503
+
504
+ for start_idx in range(0, numgen, batch_size):
505
+ end_idx = min(start_idx + batch_size, numgen)
506
+ current_batch_size = end_idx - start_idx
507
+
508
+ # Start with pure noise
509
+ x = torch.randn(current_batch_size, data_dim, device=device)
510
+
511
+ for t in tqdm(reversed(range(scheduler.config.num_train_timesteps))):
512
+ t_tensor = torch.full((current_batch_size,), t, dtype=torch.long, device=device)
513
+ t_embedded = t_tensor.unsqueeze(-1).float() / scheduler.config.num_train_timesteps
514
+ x_input = torch.cat([x, t_embedded], dim=-1)
515
+
516
+ # Model prediction
517
+ noise_pred = D(x_input)
518
+
519
+ # Apply auxiliary loss only if t < threshold (later timesteps)
520
+ threshold = int(0.5 * scheduler.config.num_train_timesteps)
521
+ if t < threshold:
522
+ alpha_cumprod = scheduler.alphas_cumprod.to(device)
523
+ sqrt_alpha_cumprod_t = alpha_cumprod[t].sqrt().unsqueeze(-1)
524
+ sqrt_one_minus_alpha_cumprod_t = (1 - alpha_cumprod[t]).sqrt().unsqueeze(-1)
525
+ x0_pred = (x - sqrt_one_minus_alpha_cumprod_t * noise_pred) / sqrt_alpha_cumprod_t
526
+
527
+ aux_loss, _ = auxiliary_loss_fn(x0_pred, cond_batch)
528
+ aux_loss.backward(retain_graph=True)
529
+ grad = x.grad / num_guided_timesteps
530
+
531
+ # Update x based on this local gradient only
532
+ x = (x - grad).detach().requires_grad_(True)
533
+ x.retain_grad()
534
+
535
+ # Always apply scheduler step
536
+ x = scheduler.step(model_output=noise_pred, timestep=t, sample=x).prev_sample
537
+ x.retain_grad()
538
+
539
+ results.append(x)
540
+
541
+ return torch.cat(results, dim=0)
542
+
543
+ return DDPM_generate_guided
544
+
545
+
546
+
547
+
548
+
549
+
550
+ def VAE_generate(D, G, cond_batch, latent_dim, device):
551
+ with torch.no_grad():
552
+ numgen = cond_batch.shape[0]
553
+ z = torch.randn(numgen, latent_dim).to(device)
554
+ z_and_condition = torch.cat([z, cond_batch], dim=1)
555
+ generated_data = G(z_and_condition)
556
+ return generated_data
557
+
558
+ # def VAE_generate_cond(D, G, cond_batch, latent_dim, device):
559
+ # numgen = cond_batch.shape[0]
560
+ # z = torch.randn(numgen, latent_dim).to(device)
561
+ # labels = torch.ones(numgen, 1).to(device)
562
+ # z = torch.cat([z, labels], dim=1)
563
+ # generated_data = G(z)
564
+ # return generated_data
565
+
566
+ def GAN_generate(D, G, cond_batch, noise_dim, device):
567
+ with torch.no_grad():
568
+ numgen = cond_batch.shape[0]
569
+ noise = torch.randn(numgen, noise_dim).to(device)
570
+ noise_and_condition = torch.cat([noise, cond_batch], dim=1)
571
+ labels = torch.ones(numgen, 1).to(device)
572
+ generated_data = G(noise_and_condition)
573
+ return generated_data
574
+
575
+ def train_model(data, model_type, train_params, auxiliary_loss_fn, condition_sampler, device):
576
+ batch_size, disc_lr, gen_lr, noise_dim, num_epochs, n_hidden, layer_size= train_params
577
+
578
+
579
+
580
+ data = torch.tensor(data).float()
581
+ sample_condition = sample_continuous(data.shape[0], randomize=True).to(device)
582
+
583
+ loader = ReusableDataLoader(TensorDataset(data), batch_size)
584
+
585
+ data_dim = data.shape[1]
586
+ cond_dim = sample_condition.shape[1]
587
+
588
+ if model_type in ["GAN"]:
589
+ train_step = GAN_step
590
+ generate_fn = GAN_generate
591
+ D_in = data_dim + cond_dim
592
+ D_out = 1
593
+ G_in = noise_dim +cond_dim
594
+ G_out = data_dim
595
+ elif model_type in ["VAE"]:
596
+ train_step = VAE_step
597
+ generate_fn = VAE_generate
598
+ D_in = data_dim + cond_dim
599
+ D_out = 2*noise_dim
600
+ G_in = noise_dim + cond_dim
601
+ G_out = data_dim
602
+ elif model_type in ["DDPM_guided"]:
603
+ scheduler = DDPMScheduler(num_train_timesteps=100)
604
+ train_step = DDPM_step_wrapper(scheduler)
605
+ generate_fn = get_DDPM_generate_guided(scheduler, data_dim, auxiliary_loss_fn, batch_size=batch_size)
606
+ D_in = data_dim + 1
607
+ D_out = data_dim
608
+ G_in = 1 #unused
609
+ G_out = 1 #unused
610
+ elif model_type in ["DDPM_conditional"]:
611
+ scheduler = DDPMScheduler(num_train_timesteps=1000)
612
+ train_step = DDPM_step_cond_wrapper(scheduler)
613
+ generate_fn = get_DDPM_generate_cond(scheduler, data_dim, batch_size=batch_size)
614
+ D_in = data_dim + cond_dim + 1
615
+ D_out = data_dim
616
+ G_in = 1 #unused
617
+ G_out = 1 #unused
618
+ # else:
619
+ # raise ValueError("Invalid model_type")
620
+
621
+
622
+ D = Down_Model(D_in, D_out, layer_size, n_hidden)
623
+ G = Up_Model(G_in, G_out, layer_size, n_hidden)
624
+
625
+
626
+ D.to(device)
627
+ G.to(device)
628
+ D_opt = torch.optim.Adam(D.parameters(), lr=disc_lr, betas=(0.5,0.999))
629
+ G_opt = torch.optim.Adam(G.parameters(), lr=gen_lr, betas=(0.5,0.999))
630
+
631
+
632
+
633
+ if num_epochs>0:
634
+ num_steps = num_epochs*len(data)//batch_size
635
+ else:
636
+ num_steps = -num_epochs #hacky way to specify fixed number of steps rather than epochs
637
+
638
+
639
+ train(D, G, D_opt, G_opt, loader, num_steps, batch_size, noise_dim, train_step, device, auxiliary_loss_fn, condition_sampler)
640
+
641
+ return D, G, generate_fn
bike_bench_internal/benchmark_models/libmoon/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/example.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ def add_one(number):
2
+ return number + 1
3
+
4
+
bike_bench_internal/benchmark_models/libmoon/problem/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/problem/mop.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ class mop():
5
+ def __init__(self,
6
+ n_var: int,
7
+ n_obj: int,
8
+ lbound: np.ndarray,
9
+ ubound: np.ndarray,
10
+ n_cons: int=0,
11
+ ) -> None:
12
+
13
+ self.n_var = n_var
14
+ self.n_obj = n_obj
15
+ self.n_cons = n_cons
16
+
17
+ self.lbound=lbound
18
+ self.ubound=ubound
19
+
20
+
21
+ @property
22
+ def get_number_variable(self) -> int:
23
+ return self.n_var
24
+
25
+ @property
26
+ def get_number_objective(self) -> int:
27
+ return self.n_obj
28
+
29
+ @property
30
+ def get_lower_bound(self) -> np.ndarray:
31
+ return self.lbound
32
+
33
+ @property
34
+ def get_upper_bound(self) -> np.ndarray:
35
+ return self.ubound
36
+
37
+ @property
38
+ def has_constraint(self) -> bool:
39
+ return self.n_cons > 0
40
+
41
+ def evaluate(self, x):
42
+ raise NotImplementedError("Subclasses should implement this method.")
43
+
44
+ def __call__(self, x):
45
+ return self.evaluate(x)
46
+
47
+ def evaluate(self, x: any) -> any:
48
+ """
49
+ Evaluate the objectives for x
50
+ Parameters
51
+ ----------
52
+ x : any
53
+ Tensor or ndarray
54
+ Returns
55
+ -------
56
+ any
57
+ Tensor or ndarray correspondingly
58
+ Raises
59
+ ------
60
+ ValueError
61
+ wrong type of x
62
+ """
63
+
64
+ if type(x) == torch.Tensor:
65
+ return self._evaluate_torch(torch.atleast_2d(x))
66
+ elif isinstance(x, np.ndarray):
67
+ return self._evaluate_numpy(np.atleast_2d(x))
68
+ else:
69
+ raise ValueError("Input has to be in the form of Tensor or ndarray!")
70
+
71
+ def get_pf(self, n_points: int=100) -> np.ndarray:
72
+ """
73
+ Get Pareto front
74
+ Parameters
75
+ ----------
76
+ num_points : int, optional
77
+ _description_, by default 100
78
+ Returns
79
+ -------
80
+ np.ndarray
81
+ _description_
82
+ """
83
+ # TODO
84
+ # if method=='uniform':
85
+ if hasattr(self, "_get_pf"): return self._get_pf(n_points)
86
+ else: raise NotImplementedError("Subclasses should implement this method.")
87
+
88
+
89
+
90
+ class mop_noCons(mop):
91
+
92
+ def __init__(self, n_var: int, n_obj: int, lbound: np.ndarray, ubound: np.ndarray, n_cons: int = 0) -> None:
93
+ super().__init__(n_var, n_obj, lbound, ubound, n_cons)
bike_bench_internal/benchmark_models/libmoon/problem/mtl/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/problem/mtl/fair_classify.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .multimnist_loader import MultiMNISTData
bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/adult_loader.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import torch
4
+
5
+ from sklearn.model_selection import train_test_split
6
+ from sklearn.preprocessing import StandardScaler
7
+ from torch.utils import data
8
+ from libmoon.util_global.constant import root_name
9
+
10
+
11
+
12
+
13
+ def load_dataset(path, s_label):
14
+ # print()
15
+ data = pd.read_csv(path)
16
+ # Preprocessing taken from https://www.kaggle.com/islomjon/income-prediction-with-ensembles-of-decision-trees
17
+ # replace missing values with majority class
18
+ data['workclass'] = data['workclass'].replace('?','Private')
19
+ data['occupation'] = data['occupation'].replace('?','Prof-specialty')
20
+ data['native-country'] = data['native-country'].replace('?','United-States')
21
+
22
+ # education category
23
+ data.education = data.education.replace(['Preschool','1st-4th','5th-6th','7th-8th','9th','10th','11th','12th'],'left')
24
+ data.education = data.education.replace('HS-grad','school')
25
+ data.education = data.education.replace(['Assoc-voc','Assoc-acdm','Prof-school','Some-college'],'higher')
26
+ data.education = data.education.replace('Bachelors','undergrad')
27
+ data.education = data.education.replace('Masters','grad')
28
+ data.education = data.education.replace('Doctorate','doc')
29
+
30
+ # marital status
31
+ data['marital-status'] = data['marital-status'].replace(['Married-civ-spouse','Married-AF-spouse'],'married')
32
+ data['marital-status'] = data['marital-status'].replace(['Never-married','Divorced','Separated','Widowed', 'Married-spouse-absent'], 'not-married')
33
+
34
+ # income
35
+ data.income = data.income.replace('<=50K', 0)
36
+ data.income = data.income.replace('>50K', 1)
37
+
38
+ # sex
39
+ data.gender = data.gender.replace('Male', 0)
40
+ data.gender = data.gender.replace('Female', 1)
41
+
42
+ # mtldata.race = mtldata.race.replace('White', 0)
43
+ # mtldata.race = mtldata.race.replace('Black', 1)
44
+ # mtldata.race = mtldata.race.astype(int)
45
+
46
+ # encode categorical values
47
+ data1 = data.copy()
48
+ data1 = pd.get_dummies(data1)
49
+ data1 = data1.drop(['income', s_label], axis=1)
50
+ # data1 = data1.drop(['income', s_label], axis=1)
51
+
52
+ X = StandardScaler().fit(data1).transform(data1)
53
+ y = data['income'].values
54
+ s1 = data[s_label].values
55
+ # s2 = mtldata['race'].values
56
+
57
+ return X, y, s1
58
+
59
+
60
+
61
+
62
+ class ADULT(data.Dataset):
63
+
64
+
65
+ def __init__(self, split="train", sensible_attribute="gender"):
66
+ assert split in ["train", "val", "test"]
67
+
68
+ # folder_name = os.path.dirname( os.path.dirname(__file__) )
69
+
70
+
71
+
72
+
73
+ path = os.path.join(root_name, 'mtldata', "adult.csv")
74
+
75
+ x, y, s1 = load_dataset(path, sensible_attribute)
76
+
77
+
78
+ x = torch.from_numpy(x).float()
79
+ y = torch.from_numpy(y).long()
80
+ s1 = torch.from_numpy(s1).long()
81
+ # s2 = torch.from_numpy(s2).long()
82
+
83
+ # train/val/test split: 70/10/20 %
84
+ x_train, x_test, y_train, y_test, s1_train, s1_test = train_test_split(x, y, s1,test_size=.2, random_state=1)
85
+ x_train, x_val, y_train, y_val, s1_train, s1_val= train_test_split(x_train, y_train, s1_train, test_size=.125, random_state=1)
86
+
87
+ if split == 'train':
88
+ self.x = x_train
89
+ self.y = y_train
90
+ self.s1 = s1_train
91
+ # self.s2 = s2_train
92
+
93
+ elif split == 'val':
94
+ self.x = x_val
95
+ self.y = y_val
96
+ self.s1 = s1_val
97
+ # self.s2 = s2_val
98
+ elif split == 'test':
99
+ self.x = x_test
100
+ self.y = y_test
101
+ self.s1 = s1_test
102
+ # self.s2 = s2_test
103
+
104
+ print("loaded {} instances for split {}. y positives={}, {} positives={}".format(
105
+ len(self.y), split, sum(self.y), sensible_attribute, sum(self.s1)))
106
+
107
+ def __len__(self):
108
+ """__len__"""
109
+ return len(self.x)
110
+
111
+ def __getitem__(self, index):
112
+ return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s1[index])
113
+
114
+ def task_names(self):
115
+ return None
116
+
117
+
118
+
119
+ if __name__ == "__main__":
120
+ dataset = ADULT(split="train")
121
+ trainloader = data.DataLoader(dataset, batch_size=256, num_workers=0)
122
+
123
+ for i, data in enumerate(trainloader):
124
+ break
bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/compas_loader.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import pandas as pd
4
+
5
+ from datetime import datetime
6
+ from sklearn.model_selection import train_test_split
7
+ from sklearn.preprocessing import StandardScaler
8
+
9
+
10
+ def load_dataset(path, s_label):
11
+
12
+ # following the preprocessing on
13
+ # https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
14
+ raw_data = pd.read_csv(path)
15
+
16
+ data = raw_data[(
17
+ (raw_data['days_b_screening_arrest'] <= 30) &
18
+ (raw_data['days_b_screening_arrest'] >= -30) &
19
+ (raw_data['is_recid'] != -1) &
20
+ (raw_data['c_charge_degree'] != 'O') &
21
+ (raw_data['score_text'] != 'N/A')
22
+ )]
23
+
24
+ # Only some columns are relevant
25
+ data = data[['age', 'c_charge_degree', 'race', 'age_cat', 'score_text', 'sex', 'priors_count',
26
+ 'days_b_screening_arrest', 'decile_score', 'is_recid', 'two_year_recid', 'c_jail_in', 'c_jail_out']]
27
+
28
+ # convert c_jail_in and c_jail_out to time_in_jail, mesured in hours
29
+ def date_from_str(s):
30
+ return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
31
+
32
+ data['c_jail_in'] = data['c_jail_in'].apply(date_from_str)
33
+ data['c_jail_out'] = data['c_jail_out'].apply(date_from_str)
34
+
35
+ data['length_of_stay'] = data['c_jail_out'] - data['c_jail_in']
36
+
37
+ # data['length_of_stay'] = data['length_of_stay'].astype('timedelta64[h]') # modified by xz, 11.6
38
+ data['length_of_stay'] = data['length_of_stay'].dt.days
39
+
40
+
41
+ data = data.drop(['c_jail_in', 'c_jail_out'], axis=1)
42
+
43
+ # encode sex
44
+ data['sex'] = data['sex'].replace('Male', 0)
45
+ data['sex'] = data['sex'].replace('Female', 1)
46
+
47
+ # one-hot encode categorical variables
48
+ data1 = data.copy()
49
+ data1 = data1.drop(['two_year_recid', 'sex'], axis=1)
50
+ data1 = pd.get_dummies(data1)
51
+
52
+ x = StandardScaler().fit(data1).transform(data1)
53
+ y = data['two_year_recid'].values
54
+ s = data['sex'].values
55
+
56
+ return x, y, s
57
+
58
+
59
+ class Compas(torch.utils.data.Dataset):
60
+
61
+ def __init__(self, split, sensible_attribute='sex'):
62
+ assert split in ['train', 'val', 'test']
63
+
64
+ folder_name = os.path.dirname(os.path.dirname(__file__))
65
+ path = os.path.join(folder_name, 'mtldata', "compas.csv")
66
+
67
+ x, y, s = load_dataset(path, sensible_attribute)
68
+
69
+ x = torch.from_numpy(x).float()
70
+ y = torch.from_numpy(y).long()
71
+ s = torch.from_numpy(s).long()
72
+
73
+ # train/val/test split: 70/10/20 %
74
+ x_train, x_test, y_train, y_test, s_train, s_test = train_test_split(x, y, s, test_size=.2, random_state=1)
75
+ x_train, x_val, y_train, y_val, s_train, s_val = train_test_split(x_train, y_train, s_train, test_size=.125, random_state=1)
76
+
77
+ if split == 'train':
78
+ self.x = x_train
79
+ self.y = y_train
80
+ self.s = s_train
81
+ elif split == 'val':
82
+ self.x = x_val
83
+ self.y = y_val
84
+ self.s = s_val
85
+ elif split == 'test':
86
+ self.x = x_test
87
+ self.y = y_test
88
+ self.s = s_test
89
+
90
+ print("loaded {} instances for split {}. y positives={}, {} positives={}".format(
91
+ len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
92
+
93
+
94
+ def __len__(self):
95
+ return len(self.y)
96
+
97
+ def __getitem__(self, index):
98
+ return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
99
+
100
+ def task_names(self):
101
+ return None
bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/credit_loader.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import pandas as pd
4
+
5
+ from sklearn.model_selection import train_test_split
6
+ from sklearn.preprocessing import StandardScaler
7
+
8
+
9
+
10
+ def load_dataset(path, s_label):
11
+ data = pd.read_csv( path )
12
+
13
+ # convert categorical columns
14
+ to_categorical = [
15
+ 'EDUCATION', 'MARRIAGE', 'PAY_0', 'PAY_2',
16
+ 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6'
17
+ ]
18
+ for column in to_categorical:
19
+ data[column] = data[column].astype('category')
20
+
21
+ data.SEX = data.SEX.replace(1, 0) # male
22
+ data.SEX = data.SEX.replace(2, 1) # female
23
+
24
+ # Scale and split
25
+ data1 = data.copy()
26
+ data1 = data1.drop(['default.payment.next.month', s_label], axis=1)
27
+ data1 = pd.get_dummies(data1)
28
+
29
+ x = StandardScaler().fit(data1).transform(data1)
30
+ y = data['default.payment.next.month'].values
31
+ s = data[s_label].values
32
+
33
+ return x, y, s
34
+
35
+
36
+ class Credit(torch.utils.data.Dataset):
37
+
38
+ def __init__(self, split, sensible_attribute='SEX'):
39
+ assert split in ['train', 'val', 'test']
40
+
41
+ folder_name = os.path.dirname(os.path.dirname(__file__))
42
+ path = os.path.join(folder_name, 'mtldata', "credit.csv")
43
+
44
+ x, y, s = load_dataset(path, sensible_attribute)
45
+
46
+ x = torch.from_numpy(x).float()
47
+ y = torch.from_numpy(y).long()
48
+ s = torch.from_numpy(s).long()
49
+
50
+ # train/val/test split: 70/10/20 %
51
+ x_train, x_test, y_train, y_test, s_train, s_test = train_test_split(x, y, s, test_size=.2, random_state=1)
52
+ x_train, x_val, y_train, y_val, s_train, s_val = train_test_split(x_train, y_train, s_train, test_size=.125, random_state=1)
53
+
54
+ if split == 'train':
55
+ self.x = x_train
56
+ self.y = y_train
57
+ self.s = s_train
58
+ elif split == 'val':
59
+ self.x = x_val
60
+ self.y = y_val
61
+ self.s = s_val
62
+ elif split == 'test':
63
+ self.x = x_test
64
+ self.y = y_test
65
+ self.s = s_test
66
+
67
+ print("loaded {} instances for split {}. y positives={}, {} positives={}".format(
68
+ len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
69
+
70
+
71
+ def __len__(self):
72
+ return len(self.y)
73
+
74
+ def __getitem__(self, index):
75
+ return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
76
+
77
+ def task_names(self):
78
+ return None
79
+
80
+
bike_bench_internal/benchmark_models/libmoon/problem/mtl/loaders/multimnist_loader.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pickle
3
+ from sklearn.model_selection import train_test_split
4
+ # taken from https://github.com/Xi-L/ParetoMTL and adapted
5
+ import os
6
+
7
+ from libmoon.util_global.constant import root_name
8
+
9
+
10
+ class MultiMNISTData(torch.utils.data.Dataset):
11
+ """
12
+ The datasets from ParetoMTL
13
+ """
14
+ def __init__(self, dataset, split, root='data/multi', **kwargs):
15
+ assert dataset in ['mnist', 'fashion', 'fmnist']
16
+ assert split in ['train', 'val', 'test']
17
+
18
+ # equal size of val and test split
19
+ train_split = .9
20
+
21
+ if dataset == 'mnist':
22
+ self.path = os.path.join(root_name, 'problem', 'mtl', 'data', 'multimnist', 'mnist.pickle')
23
+
24
+ elif dataset == 'fashion':
25
+ self.path = os.path.join(root_name, 'problem', 'mtl', 'data', 'multimnist', 'fashion.pickle')
26
+
27
+ elif dataset == 'fmnist':
28
+ self.path = os.path.join(root_name, 'problem', 'mtl', 'data', 'multimnist', 'fmnist.pickle')
29
+
30
+
31
+ self.val_size = .1
32
+ with open(self.path, 'rb') as f:
33
+ trainX, trainLabel, testX, testLabel = pickle.load(f)
34
+
35
+ n_train = len(trainX)
36
+ if self.val_size > 0:
37
+ trainX, valX, trainLabel, valLabel = train_test_split(
38
+ trainX, trainLabel, test_size=self.val_size, random_state=42
39
+ )
40
+ n_train = len(trainX)
41
+ n_val = len(valX)
42
+
43
+ trainX = torch.from_numpy(trainX.reshape(n_train, 1, 36, 36)).float()
44
+ trainLabel = torch.from_numpy(trainLabel).long()
45
+ testX = torch.from_numpy(testX.reshape(20000, 1, 36, 36)).float()
46
+ testLabel = torch.from_numpy(testLabel).long()
47
+
48
+ if self.val_size > 0:
49
+ valX = torch.from_numpy(valX.reshape(n_val, 1, 36, 36)).float()
50
+ valLabel = torch.from_numpy(valLabel).long()
51
+
52
+ if split in ['train', 'val']:
53
+ n = int(len(trainX) * train_split)
54
+ if split == 'val':
55
+ self.X = valX
56
+ self.y = valLabel
57
+ elif split == 'train':
58
+ self.X = trainX
59
+ self.y = trainLabel
60
+ elif split == 'test':
61
+ self.X = testX
62
+ self.y = testLabel
63
+
64
+ def __getitem__(self, index):
65
+ return dict(data=self.X[index], labels_l=self.y[index, 0], labels_r=self.y[index, 1])
66
+
67
+ def __len__(self):
68
+ return len(self.X)
69
+
70
+ def task_names(self):
71
+ return ['l', 'r']
72
+
73
+
74
+
75
+
76
+
77
+
78
+
79
+ if __name__ == '__main__':
80
+ import matplotlib.pyplot as plt
81
+ dst = MultiMNISTData(dataset='mnist', split='val')
82
+ loader = torch.utils.data.DataLoader(dst, batch_size=10, shuffle=True, num_workers=0)
83
+
84
+ for dat in loader:
85
+ ims = dat['data'].view(10, 36, 36).numpy()
86
+ labs_l = dat['labels_l']
87
+ labs_r = dat['labels_r']
88
+
89
+ f, axarr = plt.subplots(2, 5)
90
+ for j in range(5):
91
+ for i in range(2):
92
+ axarr[i][j].imshow(ims[j * 2 + i, :, :], cmap='gray')
93
+ axarr[i][j].set_title('{}_{}'.format(labs_l[j * 2 + i], labs_r[j * 2 + i]))
94
+ plt.show()
95
+ a = input()
96
+
97
+ if a == 'ex':
98
+ break
99
+ else:
100
+ plt.close()
101
+
bike_bench_internal/benchmark_models/libmoon/problem/mtl/mnist.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from libmoon.util_global.constant import root_name
3
+ from libmoon.problem.mtl.loaders.multimnist_loader import MultiMNISTData
4
+ import torch
5
+
6
+ from libmoon.problem.mtl.objectives import CrossEntropyLoss
7
+ from libmoon.problem.mtl.model.simple import MultiLeNet
8
+ from libmoon.util_global.weight_factor.funs import uniform_pref
9
+ from libmoon.util_global.constant import FONT_SIZE
10
+
11
+ loss_1 = CrossEntropyLoss(label_name='labels_l', logits_name='logits_l')
12
+ loss_2 = CrossEntropyLoss(label_name='labels_r', logits_name='logits_r')
13
+
14
+ from tqdm import tqdm
15
+ import numpy as np
16
+ from numpy import array
17
+ import os
18
+ from solver.gradient import get_core_solver
19
+ from solver.gradient.utils.util import get_grads_from_model, numel_params
20
+ from libmoon.util_global.constant import is_pref_based
21
+ import itertools
22
+
23
+ class MultiMnistProblem:
24
+
25
+ # How to train at the same time.
26
+ def __init__(self, args, prefs):
27
+ self.dataset = MultiMNISTData('mnist', 'train')
28
+ self.args = args
29
+ self.loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.args.batch_size, shuffle=True,
30
+ num_workers=0)
31
+ self.dataset_test = MultiMNISTData('mnist', 'test')
32
+ self.loader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=args.batch_size, shuffle=True,
33
+ num_workers=0)
34
+
35
+ self.lr = args.lr
36
+ self.prefs = prefs
37
+ self.n_prob = len(prefs)
38
+
39
+ self.model_arr = [MultiLeNet([1, 36, 36]) for _ in range(self.n_prob)]
40
+
41
+ num_params = numel_params(self.model_arr[0])
42
+ print('num_params: ', num_params)
43
+
44
+ for model in self.model_arr:
45
+ model.to(args.device)
46
+
47
+ self.is_pref_flag = is_pref_based(args.mtd)
48
+
49
+ if self.is_pref_flag:
50
+ self.core_solver_arr = [get_core_solver(args, pref) for pref in prefs]
51
+ self.optimizer_arr = [torch.optim.Adam(self.model_arr[idx].parameters(), lr=self.lr) for idx in
52
+ range(self.n_prob)]
53
+ else:
54
+ self.set_core_solver = get_core_solver(args)
55
+
56
+ params = [model.parameters() for model in self.model_arr]
57
+ self.set_optimizer = torch.optim.Adam(itertools.chain(*params), lr=0.01)
58
+
59
+
60
+ def optimize(self):
61
+ loss_all = []
62
+ for _ in tqdm(range(self.args.num_epoch)):
63
+ if self.is_pref_flag:
64
+ loss_hostory = [[] for i in range(self.n_prob)]
65
+ else:
66
+ loss_hostory = []
67
+
68
+ for data in self.loader:
69
+ data_ = {k: v.to(self.args.device) for k, v in data.items()}
70
+
71
+ # pref based mtd
72
+ if self.is_pref_flag:
73
+ for pref_idx, (pref, model, optimizer) in enumerate(
74
+ zip(self.prefs, self.model_arr, self.optimizer_arr)):
75
+ logits_dict = self.model_arr[pref_idx](data_)
76
+ logits_dict['labels_l'] = data_['labels_l']
77
+ logits_dict['labels_r'] = data_['labels_r']
78
+ l1 = loss_1(**logits_dict)
79
+ l2 = loss_2(**logits_dict)
80
+
81
+ l_contains_grad = [l1, l2]
82
+ G = get_grads_from_model(l_contains_grad, model)
83
+
84
+ l1_np = np.array(l1.cpu().detach().numpy(), copy=True)
85
+ l2_np = np.array(l2.cpu().detach().numpy(), copy=True)
86
+ losses = array([l1_np, l2_np])
87
+ alpha = self.core_solver_arr[pref_idx].get_alpha(G = G, losses=losses)
88
+ self.optimizer_arr[pref_idx].zero_grad()
89
+ (alpha[0] * l1 + alpha[1] * l2).backward()
90
+ self.optimizer_arr[pref_idx].step()
91
+ l1_np = np.array(l1.cpu().detach().numpy(), copy=True)
92
+ l2_np = np.array(l2.cpu().detach().numpy(), copy=True)
93
+ loss_hostory[pref_idx].append([l1_np, l2_np])
94
+ else:
95
+ # set based method is more complicated.
96
+ losses = [0,] * self.n_prob
97
+ losses_ts = [0] * self.n_prob
98
+
99
+ for model_idx, model in enumerate(self.model_arr):
100
+ logits_dict = self.model_arr[model_idx](data_)
101
+ logits_dict['labels_l'] = data_['labels_l']
102
+ logits_dict['labels_r'] = data_['labels_r']
103
+ l1 = loss_1(**logits_dict)
104
+ l2 = loss_2(**logits_dict)
105
+
106
+ losses_ts[model_idx] = torch.stack([l1, l2])
107
+
108
+ l1_np, l2_np = np.array(l1.cpu().detach().numpy(), copy=True), np.array(l2.cpu().detach().numpy(), copy=True)
109
+ losses[model_idx] = [l1_np, l2_np]
110
+
111
+ losses_ts = torch.stack(losses_ts)
112
+ losses = np.array(losses)
113
+ alpha = self.set_core_solver.get_alpha(losses).to(self.args.device)
114
+ self.set_optimizer.zero_grad()
115
+ torch.sum(alpha * losses_ts).backward()
116
+ self.set_optimizer.step()
117
+ loss_hostory.append(losses)
118
+ loss_hostory = np.array(loss_hostory)
119
+ if args.is_pref_based:
120
+ loss_history_mean = np.mean(loss_hostory, axis=1)
121
+ else:
122
+ loss_history_mean = np.mean(loss_hostory, axis=0)
123
+ loss_all.append(loss_history_mean)
124
+ return loss_all
125
+
126
+
127
+
128
+ if __name__ == '__main__':
129
+ import argparse
130
+ parser = argparse.ArgumentParser()
131
+
132
+ parser.add_argument('--problem', default='mnist', type=str) # For attribute in args, we all call problem.
133
+ parser.add_argument('--split', default='train', type=str)
134
+ parser.add_argument('--batch_size', default=512, type=int)
135
+ parser.add_argument('--shuffle', default=True, type=bool)
136
+ parser.add_argument('--lr', default=1e-2, type=float)
137
+ parser.add_argument('--num_epoch', default=10, type=int)
138
+ parser.add_argument('--use-cuda', default=True, type=bool)
139
+
140
+ parser.add_argument('--mtd', default='hvgrad', type=str)
141
+ parser.add_argument('--agg-mtd', default='ls', type=str) # This att is only valid when args.mtd=agg.
142
+ parser.add_argument('--n-obj', default=2, type=int) # This att is only valid when args.mtd=agg.
143
+
144
+ args = parser.parse_args()
145
+ args.is_pref_based = is_pref_based(args.mtd)
146
+ if torch.cuda.is_available() and args.use_cuda:
147
+ args.device = torch.device("cuda") # Use the GPU
148
+ print('cuda is available')
149
+ else:
150
+ args.device = torch.device("cpu") # Use the CPU
151
+ print('cuda is not available')
152
+
153
+ prefs = uniform_pref(n_partition=10, n_obj=2, clip_eps=0.1)
154
+ args.n_prob = len(prefs)
155
+
156
+ problem = MultiMnistProblem(args, prefs)
157
+ # args.n_obj = problem.n_obj
158
+
159
+ loss_history = problem.optimize()
160
+ loss_history = np.array(loss_history)
161
+
162
+ final_solution = loss_history[-1,:,:]
163
+ # plt.scatter(final_solution[:,0], final_solution[:,1], label='final solution')
164
+ for idx in range(loss_history.shape[1]):
165
+ plt.plot(loss_history[:,idx,0], loss_history[:,idx,1], 'o-', label='pref {}'.format(idx))
166
+
167
+ plt.plot(final_solution[:,0], final_solution[:,1], color='k', linewidth=3)
168
+
169
+ plt.legend(fontsize=FONT_SIZE)
170
+ # draw pref
171
+ solution_norm = np.linalg.norm(final_solution, axis=1, keepdims=True)
172
+ prefs_norm = prefs / np.linalg.norm(prefs, axis=1, keepdims=True) * solution_norm
173
+
174
+ if args.is_pref_based:
175
+ for pref in prefs_norm:
176
+ plt.plot([0, pref[0]], [0, pref[1]], color='k')
177
+
178
+
179
+ plt.xlabel('$L_1$', fontsize=FONT_SIZE)
180
+ plt.ylabel('$L_2$', fontsize=FONT_SIZE)
181
+
182
+
183
+ folder_name = os.path.join( root_name, 'output', args.problem, args.mtd)
184
+ os.makedirs(folder_name, exist_ok=True)
185
+ fig_name = os.path.join(folder_name, 'final_solution.svg')
186
+ plt.savefig(fig_name)
187
+ print('saved in ', fig_name)
188
+
189
+ plt.show()
bike_bench_internal/benchmark_models/libmoon/problem/mtl/model/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/problem/mtl/model/simple.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ '''
4
+ MTL problems need .
5
+ '''
6
+
7
+
8
+
9
+ class MultiLeNet(nn.Module):
10
+
11
+ def __init__(self, dim, **kwargs):
12
+
13
+ '''
14
+ :param dim: a 3d-array. [chanel, height, width]
15
+ :param kwargs:
16
+ '''
17
+ super().__init__()
18
+ self.shared = nn.Sequential(
19
+ nn.Conv2d(dim[0], 10, kernel_size=5),
20
+ nn.MaxPool2d(kernel_size=2),
21
+ nn.ReLU(),
22
+ nn.Conv2d(10, 20, kernel_size=5),
23
+ nn.MaxPool2d(kernel_size=2),
24
+ nn.ReLU(),
25
+ nn.Flatten(),
26
+ nn.Linear(720, 50),
27
+ nn.ReLU(),
28
+ )
29
+ self.private_left = nn.Linear(50, 10)
30
+ self.private_right = nn.Linear(50, 10)
31
+
32
+
33
+ def forward(self, batch):
34
+ x = batch['data']
35
+ x = self.shared(x)
36
+ return dict(logits_l=self.private_left(x), logits_r=self.private_right(x))
37
+
38
+ def private_params(self):
39
+ return ['private_left.weight', 'private_left.bias', 'private_right.weight', 'private_right.bias']
40
+
41
+
42
+
43
+
44
+
45
+ class FullyConnected(nn.Module):
46
+ def __init__(self, dim, **kwargs):
47
+ super().__init__()
48
+ self.f = nn.Sequential(
49
+ nn.Linear(dim[0], 60),
50
+ nn.ReLU(),
51
+ nn.Linear(60, 25),
52
+ nn.ReLU(),
53
+ nn.Linear(25, 1),
54
+ )
55
+
56
+ def forward(self, batch):
57
+ x = batch['data']
58
+ return dict(logits=self.f(x))
59
+
60
+
61
+
62
+
63
+
64
+ if __name__ == '__main__':
65
+ from libmoon.util_global.constant import root_name
66
+ import os
67
+ import pickle
68
+
69
+
70
+
71
+
72
+ pickle_name = os.path.join(root_name, 'problem', 'mtl', 'data', 'multimnist', 'mnist.pickle')
73
+ with open(pickle_name, 'rb') as f:
74
+ data = pickle.load(f)
75
+
76
+ model = MultiLeNet([3, 32, 32])
77
+ print('hello world')
bike_bench_internal/benchmark_models/libmoon/problem/mtl/objectives.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def from_name(names, task_names):
4
+ objectives = {
5
+ 'CrossEntropyLoss': CrossEntropyLoss,
6
+ 'BinaryCrossEntropyLoss': BinaryCrossEntropyLoss,
7
+ 'L1Regularization': L1Regularization,
8
+ 'L2Regularization': L2Regularization,
9
+ 'ddp': DDPHyperbolicTangentRelaxation,
10
+ 'deo': DEOHyperbolicTangentRelaxation,
11
+ }
12
+
13
+ if task_names is not None:
14
+ return [objectives[n]("labels_{}".format(t), "logits_{}".format(t)) for n, t in zip(names, task_names)]
15
+ else:
16
+ return [ objectives[n]() for n in names ]
17
+
18
+
19
+ class CrossEntropyLoss(torch.nn.CrossEntropyLoss):
20
+ def __init__(self, label_name='labels', logits_name='logits'):
21
+ super().__init__(reduction='mean')
22
+ self.label_name = label_name
23
+ self.logits_name = logits_name
24
+
25
+ def __call__(self, **kwargs):
26
+ logits = kwargs[self.logits_name]
27
+ labels = kwargs[self.label_name]
28
+ return super().__call__(logits, labels)
29
+
30
+
31
+
32
+ class BinaryCrossEntropyLoss(torch.nn.BCEWithLogitsLoss):
33
+
34
+ def __init__(self, label_name='labels', logits_name='logits', pos_weight=None):
35
+ super().__init__(reduction='mean', pos_weight=torch.Tensor([pos_weight]).cuda() if pos_weight else None)
36
+ self.label_name = label_name
37
+ self.logits_name = logits_name
38
+
39
+ def __call__(self, **kwargs):
40
+ logits = kwargs[self.logits_name]
41
+ labels = kwargs[self.label_name]
42
+ if logits.ndim == 2:
43
+ logits = torch.squeeze(logits)
44
+ if labels.dtype != torch.float:
45
+ labels = labels.float()
46
+ return super().__call__(logits, labels)
47
+
48
+
49
+
50
+
51
+
52
+ class MSELoss(torch.nn.MSELoss):
53
+
54
+ def __init__(self, label_name='labels'):
55
+ super().__init__()
56
+ self.label_name = label_name
57
+
58
+ def __call__(self, **kwargs):
59
+ logits = kwargs['logits']
60
+ labels = kwargs[self.label_name]
61
+ if logits.ndim == 2:
62
+ logits = torch.squeeze(logits)
63
+ return super().__call__(logits, labels)
64
+
65
+
66
+ class L1Regularization():
67
+
68
+ def __call__(self, **kwargs):
69
+ model = kwargs['model']
70
+ return torch.linalg.norm(torch.cat([p.view(-1) for p in model.parameters()]), ord=1)
71
+
72
+
73
+ class L2Regularization():
74
+
75
+ def __call__(self, **kwargs):
76
+ model = kwargs['model']
77
+ return torch.linalg.norm(torch.cat([p.view(-1) for p in model.parameters()]), ord=2)
78
+
79
+
80
+ class DDPHyperbolicTangentRelaxation():
81
+
82
+ def __init__(self, label_name='labels', logits_name='logits', s_name='sensible_attribute', c=1):
83
+ self.label_name = label_name
84
+ self.logits_name = logits_name
85
+ self.s_name = s_name
86
+ self.c = c
87
+
88
+ def __call__(self, **kwargs):
89
+ logits = kwargs[self.logits_name]
90
+ labels = kwargs[self.label_name]
91
+ sensible_attribute = kwargs[self.s_name]
92
+
93
+ n = logits.shape[0]
94
+ logits = torch.sigmoid(logits)
95
+ s_negative = logits[sensible_attribute.bool()]
96
+ s_positive = logits[~sensible_attribute.bool()]
97
+
98
+ return 1 / n * torch.abs(torch.sum(torch.tanh(self.c * torch.relu(s_positive))) - torch.sum(
99
+ torch.tanh(self.c * torch.relu(s_negative))))
100
+
101
+
102
+ class DEOHyperbolicTangentRelaxation():
103
+
104
+ def __init__(self, label_name='labels', logits_name='logits', s_name='sensible_attribute', c=1):
105
+ self.label_name = label_name
106
+ self.logits_name = logits_name
107
+ self.s_name = s_name
108
+ self.c = c
109
+
110
+ def __call__(self, **kwargs):
111
+ logits = kwargs[self.logits_name]
112
+ labels = kwargs[self.label_name]
113
+ sensible_attribute = kwargs[self.s_name]
114
+
115
+ n = logits.shape[0]
116
+ logits = torch.sigmoid(logits)
117
+ s_negative = logits[(sensible_attribute.bool()) & (labels == 1)]
118
+ s_positive = logits[(~sensible_attribute.bool()) & (labels == 1)]
119
+
120
+ return 1 / n * torch.abs(torch.sum(torch.tanh(self.c * torch.relu(s_positive))) - torch.sum(
121
+ torch.tanh(self.c * torch.relu(s_negative))))
122
+
123
+
124
+ """
125
+ Popular problem proposed by
126
+
127
+ Carlos Manuel Mira da Fonseca. Multiobjective genetic algorithms with
128
+ application to controlengineering problems.PhD thesis, University of Sheffield, 1995.
129
+
130
+ with a concave pareto front.
131
+
132
+ $ \mathcal{L}_1(\theta) = 1 - \exp{ - || \theta - 1 / \sqrt{d} || $
133
+ $ \mathcal{L}_1(\theta) = 1 - \exp{ - || \theta + 1 / \sqrt{d} || $
134
+
135
+ with $\theta \in R^d$ and $ d = 100$
136
+ """
137
+
138
+
139
+ #
140
+ # class Fonseca1():
141
+ #
142
+ # def f1(theta):
143
+ # d = len(theta)
144
+ # sum1 = autograd.numpy.sum([(theta[i] - 1.0 / autograd.numpy.sqrt(d)) ** 2 for i in range(d)])
145
+ # f1 = 1 - autograd.numpy.exp(- sum1)
146
+ # return f1
147
+ #
148
+ # f1_dx = autograd.grad(f1)
149
+ #
150
+ # def __call__(self, **kwargs):
151
+ # return f1(kwargs['parameters'])
152
+ #
153
+ # def gradient(self, **kwargs):
154
+ # return f1_dx(kwargs['parameters'])
155
+ #
156
+ #
157
+ # class Fonseca2():
158
+ #
159
+ # def f2(theta):
160
+ # d = len(theta)
161
+ # sum1 = autograd.numpy.sum([(theta[i] + 1.0 / autograd.numpy.sqrt(d)) ** 2 for i in range(d)])
162
+ # f1 = 1 - autograd.numpy.exp(- sum1)
163
+ # return f1
164
+ #
165
+ # f2_dx = autograd.grad(f2)
166
+ #
167
+ # def __call__(self, **kwargs):
168
+ # return f2(kwargs['parameters'])
169
+ #
170
+ # def gradient(self, **kwargs):
171
+ # return f2_dx(kwargs['parameters'])
172
+
173
+
174
+
175
+
176
+
177
+ #
178
+ # if __name__ == '__main__':
179
+ # problem = Fonseca1()
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .vlmop import VLMOP1, VLMOP2
2
+ from .zdt import ZDT1, ZDT2, ZDT3, ZDT4, ZDT6
3
+ from .maf import MAF1
4
+
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/dtlz.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import torch
4
+ from ..mop import mop
5
+
6
+
7
+
8
+
9
+ class DTLZ1(mop):
10
+
11
+ def __init__(self, n_var=30, n_obj=3, lbound=np.zeros(30),
12
+ ubound=np.ones(30)):
13
+
14
+ super().__init__(n_var=n_var,
15
+ n_obj=n_obj,
16
+ lbound=lbound,
17
+ ubound=ubound)
18
+ self.problem_name= 'DTLZ1'
19
+
20
+ def _evaluate_torch(self, x: torch.Tensor):
21
+ x1 = x[:,0]
22
+ x2 = x[:,1]
23
+ xm = x[:, 2:]
24
+
25
+ g = 100 * (self.n_var - 2 + torch.sum(torch.pow(xm - 0.5, 2) -
26
+ torch.cos(20 * np.pi * (xm - 0.5)), dim=1))
27
+
28
+ f1 = 0.5 * x1 * x2 * (1+g)
29
+ f2 = 0.5 * x1 * (1 - x2) * (1+g)
30
+ f3 = 0.5 * (1 - x1) * (1+g)
31
+ return torch.stack((f1, f2, f3), dim=1)
32
+
33
+ def _evaluate_numpy(self, x: np.ndarray):
34
+ x1 = x[:, 0]
35
+ x2 = x[:, 1]
36
+ xm = x[:, 2:]
37
+
38
+ g = 100 * (self.n_var - 2 + np.sum(np.power(xm - 0.5, 2) -
39
+ np.cos(20 * np.pi * (xm - 0.5)), axis=1))
40
+
41
+ f1 = 0.5 * x1 * x2 * (1+g)
42
+ f2 = 0.5 * x1 * (1 - x2) * (1+g)
43
+ f3 = 0.5 * (1 - x1) * (1+g)
44
+ return np.stack((f1, f2, f3), axis=1)
45
+
46
+
47
+
48
+ class DTLZ2(mop):
49
+ def __init__(self, n_var=30, n_obj=3, lbound=np.zeros(30),
50
+ ubound=np.ones(30)):
51
+ super().__init__(n_var=n_var,
52
+ n_obj=n_obj,
53
+ lbound=lbound,
54
+ ubound=ubound, )
55
+ self.problem_name = 'DTLZ2'
56
+
57
+ def _evaluate_torch(self, x):
58
+ xm = x[:, 2:]
59
+ g = torch.sum(torch.pow(xm - 0.5, 2), dim=1)
60
+ f1 = torch.cos(x[:, 0] * np.pi / 2) * torch.cos(x[:, 1] * np.pi / 2) * (1 + g)
61
+ f2 = torch.cos(x[:, 0] * np.pi / 2) * torch.sin(x[:, 1] * np.pi / 2) * (1 + g)
62
+ f3 = torch.sin(x[:, 0] * np.pi / 2) * (1 + g)
63
+ return torch.stack((f1, f2, f3), dim=1)
64
+
65
+ def _evaluate_numpy(self, x):
66
+ xm = x[:, 2:]
67
+ g = np.sum(np.power(xm - 0.5, 2), axis=1)
68
+ f1 = np.cos(x[:, 0] * np.pi / 2) * np.cos(x[:, 1] * np.pi / 2) * (1 + g)
69
+ f2 = np.cos(x[:, 0] * np.pi / 2) * np.sin(x[:, 1] * np.pi / 2) * (1 + g)
70
+ f3 = np.sin(x[:, 0] * np.pi / 2) * (1 + g)
71
+ return np.stack((f1, f2, f3), axis=1)
72
+
73
+
74
+ class DTLZ3(mop):
75
+ def __init__(self, n_var=30, n_obj=3, lbound=np.zeros(30),
76
+ ubound=np.ones(30)):
77
+ super().__init__(n_var=n_var,
78
+ n_obj=n_obj,
79
+ lbound=lbound,
80
+ ubound=ubound, )
81
+ self.problem_name = 'DTLZ3'
82
+
83
+
84
+ def _evaluate_torch(self, x):
85
+ xm = x[:, 2:]
86
+ g = 100 * (self.n_var - 2 + torch.sum(torch.pow(xm - 0.5, 2) -
87
+ torch.cos(20 * np.pi * (xm - 0.5)), dim=1))
88
+ f1 = torch.cos(x[:, 0] * np.pi / 2) * torch.cos(x[:, 1] * np.pi / 2) * (1 + g)
89
+ f2 = torch.cos(x[:, 0] * np.pi / 2) * torch.sin(x[:, 1] * np.pi / 2) * (1 + g)
90
+ f3 = torch.sin(x[:, 0] * np.pi / 2) * (1 + g)
91
+ return torch.stack((f1, f2, f3), dim=1)
92
+
93
+ def _evaluate_numpy(self, x):
94
+ xm = x[:, 2:]
95
+
96
+ g = 100 * (self.n_var - 2 + np.sum(np.power(xm - 0.5, 2) -
97
+ np.cos(20 * np.pi * (xm - 0.5)), axis=1))
98
+
99
+ f1 = np.cos(x[:, 0] * np.pi / 2) * np.cos(x[:, 1] * np.pi / 2) * (1 + g)
100
+ f2 = np.cos(x[:, 0] * np.pi / 2) * np.sin(x[:, 1] * np.pi / 2) * (1 + g)
101
+ f3 = np.sin(x[:, 0] * np.pi / 2) * (1 + g)
102
+ return np.stack((f1, f2, f3), axis=1)
103
+
104
+
105
+ class DTLZ4(mop):
106
+ def __init__(self, n_var=30, n_obj=3, lbound=np.zeros(30),
107
+ ubound=np.ones(30)):
108
+ super().__init__(n_var=n_var,
109
+ n_obj=n_obj,
110
+ lbound=lbound,
111
+ ubound=ubound, )
112
+ self.problem_name = 'DTLZ4'
113
+ self.alpha = 20
114
+
115
+ def _evaluate_torch(self, x):
116
+ xm = x[:, 2:]
117
+ g = torch.sum(torch.pow(xm - 0.5, 2), dim=1)
118
+ # alpha = 1
119
+
120
+ f1 = torch.cos(x[:, 0] ** self.alpha * np.pi / 2) * torch.cos(x[:, 1] ** self.alpha * np.pi / 2) * (1 + g)
121
+ f2 = torch.cos(x[:, 0] ** self.alpha * np.pi / 2) * torch.sin(x[:, 1] ** self.alpha * np.pi / 2) * (1 + g)
122
+ f3 = torch.sin(x[:, 0] ** self.alpha * np.pi / 2) * (1 + g)
123
+ return torch.stack((f1, f2, f3), dim=1)
124
+
125
+ def _evaluate_numpy(self, x):
126
+ xm = x[:, 2:]
127
+ g = np.sum(np.power(xm - 0.5, 2), axis=1)
128
+
129
+ f1 = np.cos(x[:, 0] ** self.alpha * np.pi / 2) * np.cos(x[:, 1] ** self.alpha * np.pi / 2) * (1 + g)
130
+ f2 = np.cos(x[:, 0] ** self.alpha * np.pi / 2) * np.sin(x[:, 1] ** self.alpha * np.pi / 2) * (1 + g)
131
+ f3 = np.sin(x[:, 0] ** self.alpha * np.pi / 2) * (1 + g)
132
+ return np.stack((f1, f2, f3), axis=1 )
133
+
134
+ # DTLZ5, DTLZ6.
135
+ # degenerated.
136
+
137
+
138
+ # DTLZ7 has disjoint Pareto front.
139
+
140
+
141
+
142
+ if __name__ == '__main__':
143
+ x = torch.rand(100, 30)
144
+ problem = DTLZ4()
145
+
146
+ y = problem.evaluate(x)
147
+ print( y )
148
+
149
+
150
+
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/maf.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from numpy import array
3
+ import torch
4
+
5
+
6
+
7
+ class MAF1:
8
+ def __init__(self):
9
+ '''
10
+ n_obj can be set as any number. For simlicity, we set it as 3.
11
+ '''
12
+ self.n_obj = 3
13
+ self.n_var = 30
14
+ self.lb = 0
15
+ self.ub = 1
16
+
17
+ def evaluate(self, x):
18
+ if type(x) == torch.Tensor:
19
+
20
+ g = torch.sum( torch.pow(x[:, 2:] - 0.5, 2), dim=1 )
21
+
22
+ f1 = (1 - x[:,0] * x[:,1]) * (1 + g)
23
+ f2 = (1 - x[:,0] * (1 - x[:,1]) ) * (1 + g)
24
+ f3 = x[:,0] * (1 + g)
25
+
26
+ return torch.stack((f1, f2, f3), dim=1)
27
+
28
+ else:
29
+ assert False
30
+
31
+
32
+
33
+ def get_pf(self):
34
+ return array([[0.0, 0.0, 0.0]])
35
+
36
+
37
+
38
+ if __name__ == '__main__':
39
+ x = torch.rand(100, 30)
40
+ problem = MAF1()
41
+
42
+ y = problem.evaluate(x)
43
+ print()
44
+
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/re.py ADDED
@@ -0,0 +1,619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import torch
4
+ from ..mop import mop
5
+
6
+ from numpy import array
7
+
8
+
9
+ class RE21(mop):
10
+ def __init__(self, n_var=4, n_obj=2, lbound=np.zeros(4), ubound=np.ones(4)):
11
+ self.problem_name = 'RE21'
12
+ self.n_var = n_var
13
+ self.n_obj = n_obj
14
+ self.n_cons = 0
15
+
16
+ self.n_original_constraints = 0
17
+
18
+ self.ideal = array([1237.8414230005742, 0.002761423749158419])
19
+ # self.nadir = array([2086.36956042, 0.00341421356237])
20
+ self.nadir = np.array([2886.3695604236013, 0.039999999999998245])
21
+
22
+ F = 10.0
23
+ sigma = 10.0
24
+ tmp_val = F / sigma
25
+ self.ubound = np.full(self.n_var, 3 * tmp_val)
26
+ self.lbound = np.zeros(self.n_var)
27
+ self.lbound[0] = tmp_val
28
+ self.lbound[1] = np.sqrt(2.0) * tmp_val
29
+ self.lbound[2] = np.sqrt(2.0) * tmp_val
30
+ self.lbound[3] = tmp_val
31
+
32
+
33
+ def _evaluate_numpy(self, x):
34
+ n_sub = len(x)
35
+
36
+ x1 = x[:,0]
37
+ x2 = x[:,1]
38
+ x3 = x[:,2]
39
+ x4 = x[:,3]
40
+ f = np.zeros((n_sub, self.n_obj) )
41
+
42
+ F = 10.0
43
+ sigma = 10.0
44
+ E = 2.0 * 1e5
45
+ L = 200.0
46
+
47
+ f[:,0] = L * ((2 * x1) + np.sqrt(2.0) * x2 + np.sqrt(x3) + x4)
48
+ f[:,1] = ((F * L) / E) * ((2.0 / x1) + (2.0 * np.sqrt(2.0) / x2) - (2.0 * np.sqrt(2.0) / x3) + (2.0 / x4))
49
+
50
+ # f_arr = np.stack((f1,f2), axis=1)
51
+
52
+ f_arr_norm = (f - self.ideal) / (self.nadir - self.ideal)
53
+ # f_arr_norm =
54
+ f_arr_norm[:, 0] = 0.5 * f_arr_norm[:, 0]
55
+
56
+ return f_arr_norm
57
+
58
+
59
+ def _evaluate_torch(self, x):
60
+ x1 = x[:, 0]
61
+ x2 = x[:, 1]
62
+ x3 = x[:, 2]
63
+ x4 = x[:, 3]
64
+
65
+ F = 10.0
66
+ sigma = 10.0
67
+ E = 2.0 * 1e5
68
+ L = 200.0
69
+
70
+ f1 = L * ( (2 * x1) + np.sqrt(2.0) * x2 + torch.sqrt(x3) + x4 )
71
+ f2 = ((F * L) / E) * ((2.0 / x1) + (2.0 * np.sqrt(2.0) / x2) - (2.0 * np.sqrt(2.0) / x3) + (2.0 / x4))
72
+ f_arr = torch.stack((f1, f2), dim=1)
73
+ f_arr_norm = (f_arr - self.ideal) / (self.nadir - self.ideal)
74
+ f_arr_norm[:, 0] = 0.5 * f_arr_norm[:, 0]
75
+ return f_arr_norm
76
+
77
+
78
+ class RE22(mop):
79
+ def __init__(self, n_var=3, n_obj=2, lbound=np.zeros(30),
80
+ ubound=np.ones(30)):
81
+
82
+
83
+ self.n_var=n_var
84
+ self.n_obj=n_obj
85
+ self.problem_name = 'RE22'
86
+ self.n_cons = 0
87
+ self.n_original_constraints = 2
88
+
89
+ self.ideal = np.array([5.88, 0.0])
90
+ self.nadir = np.array([361.262944647, 180.01547])
91
+
92
+
93
+ self.ubound = np.zeros(self.n_var)
94
+ self.lbound = np.zeros(self.n_var)
95
+
96
+ self.lbound[0] = 0.2
97
+ self.lbound[1] = 0.0
98
+ self.lbound[2] = 0.0
99
+ self.ubound[0] = 15
100
+ self.ubound[1] = 20
101
+ self.ubound[2] = 40
102
+
103
+ self.n_var = n_var
104
+ self.n_obj = n_obj
105
+
106
+ self.feasible_vals = np.array(
107
+ [0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93, 1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60,
108
+ 1.76, 1.80, 1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79, 2.80, 3.0, 3.08, 3, 10, 3.16, 3.41,
109
+ 3.52, 3.60, 3.72, 3.95, 3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84, 5.0, 5.28, 5.40, 5.53,
110
+ 5.72, 6.0, 6.16, 6.32, 6.60, 7.11, 7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0, 11.06, 11.85,
111
+ 12.0, 13.0, 14.0, 15.0])
112
+
113
+
114
+ def _evaluate_numpy(self, x):
115
+ n_sub = len(x)
116
+
117
+ f = np.zeros( (n_sub, self.n_obj) )
118
+ g = np.zeros( (n_sub, self.n_original_constraints) )
119
+ # Reference: getNearestValue_sample2.py (https://gist.github.com/icchi-h/1d0bb1c52ebfdd31f14b3e811328390a)
120
+ idx_arr = [np.abs(np.asarray(self.feasible_vals) - x0).argmin() for x0 in x[:,0]]
121
+ x1 = array([self.feasible_vals[idx] for idx in idx_arr])
122
+ x2 = x[:,1]
123
+ x3 = x[:,2]
124
+
125
+ # First original objective function
126
+ f[:,0] = (29.4 * x1) + (0.6 * x2 * x3)
127
+ # Original constraint functions
128
+ g[:,0] = (x1 * x3) - 7.735 * ((x1 * x1) / x2) - 180.0
129
+ g[:,1] = 4.0 - (x3 / x2)
130
+ g = np.where(g < 0, -g, 0)
131
+ f[:,1] = g[:,0] + g[:,1]
132
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
133
+ f_norm[:, 0] = 0.5 * f_norm[:, 0]
134
+
135
+ return f_norm
136
+
137
+ def _evaluate_torch(self, x):
138
+ pass
139
+
140
+
141
+ class RE23(mop):
142
+ def __init__(self, n_var=4, n_obj=2, lbound=np.zeros(2),
143
+ ubound=np.ones(2)):
144
+ self.problem_name = 'RE23'
145
+ self.n_obj = n_obj
146
+ self.n_var = n_var
147
+ self.n_cons = 0
148
+ self.n_original_constraints = 3
149
+
150
+ self.ideal = array([15.9018007813, 0.0])
151
+ self.nadir = array([481.608088535, 44.2819047619])
152
+
153
+ self.ubound = np.zeros(self.n_var)
154
+ self.lbound = np.zeros(self.n_var)
155
+ self.lbound[0] = 1
156
+ self.lbound[1] = 1
157
+ self.lbound[2] = 10
158
+ self.lbound[3] = 10
159
+ self.ubound[0] = 100
160
+ self.ubound[1] = 100
161
+ self.ubound[2] = 200
162
+ self.ubound[3] = 240
163
+
164
+ def _evaluate_numpy(self, x):
165
+
166
+ f = np.zeros( (len(x), self.n_obj) )
167
+ g = np.zeros( (len(x), self.n_original_constraints))
168
+
169
+ x1 = 0.0625 * np.round(x[:,0]).astype(np.int32)
170
+ x2 = 0.0625 * np.round(x[:,1]).astype(np.int32)
171
+
172
+ x3 = x[:,2]
173
+ x4 = x[:,3]
174
+
175
+ # First original objective function
176
+ f[:,0] = (0.6224 * x1 * x3 * x4) + (1.7781 * x2 * x3 * x3) + (3.1661 * x1 * x1 * x4) + (19.84 * x1 * x1 * x3)
177
+
178
+ # Original constraint functions
179
+ g[:,0] = x1 - (0.0193 * x3)
180
+ g[:,1] = x2 - (0.00954 * x3)
181
+ g[:,2] = (np.pi * x3 * x3 * x4) + ((4.0 / 3.0) * (np.pi * x3 * x3 * x3)) - 1296000
182
+ g = np.where(g < 0, -g, 0)
183
+ f[:,1] = g[:,0] + g[:,1] + g[:,2]
184
+
185
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
186
+
187
+ return f_norm
188
+
189
+
190
+
191
+
192
+
193
+ class RE24(mop):
194
+ def __init__(self, n_var=2, n_obj=2, lbound=np.zeros(2),
195
+ ubound=np.ones(2)):
196
+ super().__init__(n_var=n_var,
197
+ n_obj=n_obj,
198
+ lbound=lbound,
199
+ ubound=ubound, )
200
+
201
+ self.problem_name = 'RE24'
202
+ self.n_obj = 2
203
+ self.n_var = 2
204
+
205
+ self.n_cons = 0
206
+ self.n_original_constraints = 4
207
+
208
+ self.ubound = np.zeros(self.n_var)
209
+ self.lbound = np.zeros(self.n_var)
210
+
211
+ self.lbound[0] = 0.5
212
+ self.lbound[1] = 0.5
213
+
214
+ self.ubound[0] = 4
215
+ self.ubound[1] = 50
216
+
217
+ self.ideal = np.array([60.5, 0.0])
218
+ self.nadir = np.array([481.608088535, 44.2819047619])
219
+
220
+
221
+
222
+
223
+ def _evaluate_numpy(self, x):
224
+ n_sub = len(x)
225
+ # f = np.zeros(self.n_objectives)
226
+ g = np.zeros( (n_sub, self.n_original_constraints) )
227
+
228
+ x1 = x[:,0]
229
+ x2 = x[:,1]
230
+
231
+ # First original objective function
232
+ f1 = x1 + (120 * x2)
233
+
234
+ E = 700000
235
+ sigma_b_max = 700
236
+ tau_max = 450
237
+ delta_max = 1.5
238
+ sigma_k = (E * x1 * x1) / 100
239
+ sigma_b = 4500 / (x1 * x2)
240
+ tau = 1800 / x2
241
+ delta = (56.2 * 10000) / (E * x1 * x2 * x2)
242
+
243
+ g[:,0] = 1 - (sigma_b / sigma_b_max)
244
+ g[:,1] = 1 - (tau / tau_max)
245
+ g[:,2] = 1 - (delta / delta_max)
246
+ g[:,3] = 1 - (sigma_b / sigma_k)
247
+ g = np.where(g < 0, -g, 0)
248
+ f2 = g[:,0] + g[:,1] + g[:,2] + g[:,3]
249
+
250
+ f_arr = np.stack((f1, f2), axis=1)
251
+ f_norm = (f_arr - self.ideal) / (self.nadir - self.ideal)
252
+
253
+ return f_norm
254
+
255
+
256
+ def _evaluate_torch(self, x):
257
+ pass
258
+
259
+
260
+ class RE25(mop):
261
+ def __init__(self, n_var=3, n_obj=2):
262
+ self.problem_name = 'RE25'
263
+ self.n_obj = n_obj
264
+ self.n_var = n_var
265
+
266
+ self.n_cons = 0
267
+ self.n_original_constraints = 6
268
+
269
+ self.ideal = array([0.037591349242869145, 0.0])
270
+ self.nadir = array([0.40397042546, 2224669.22419])
271
+
272
+ self.ubound = np.zeros( self.n_var )
273
+ self.lbound = np.zeros( self.n_var )
274
+ self.lbound[0] = 1
275
+ self.lbound[1] = 0.6
276
+ self.lbound[2] = 0.09
277
+ self.ubound[0] = 70
278
+ self.ubound[1] = 3
279
+ self.ubound[2] = 0.5
280
+
281
+ self.feasible_vals = np.array(
282
+ [0.009, 0.0095, 0.0104, 0.0118, 0.0128, 0.0132, 0.014, 0.015, 0.0162, 0.0173, 0.018, 0.02, 0.023, 0.025,
283
+ 0.028, 0.032, 0.035, 0.041, 0.047, 0.054, 0.063, 0.072, 0.08, 0.092, 0.105, 0.12, 0.135, 0.148, 0.162,
284
+ 0.177, 0.192, 0.207, 0.225, 0.244, 0.263, 0.283, 0.307, 0.331, 0.362, 0.394, 0.4375, 0.5])
285
+
286
+ def _evaluate_numpy(self, x):
287
+ n_sub = len(x)
288
+ f = np.zeros( (n_sub, self.n_obj) )
289
+ g = np.zeros( (n_sub, self.n_original_constraints) )
290
+ x1 = np.round(x[:,0])
291
+ x2 = x[:,1]
292
+
293
+ # Reference: getNearestValue_sample2.py (https://gist.github.com/icchi-h/1d0bb1c52ebfdd31f14b3e811328390a)
294
+ idx_array = array([np.abs(np.asarray(self.feasible_vals) - x2).argmin() for x2 in x[:,2]])
295
+
296
+ x3 = array( [self.feasible_vals[idx] for idx in idx_array] )
297
+
298
+ # first original objective function
299
+ f[:,0] = (np.pi * np.pi * x2 * x3 * x3 * (x1 + 2)) / 4.0
300
+
301
+ # constraint functions
302
+ Cf = ((4.0 * (x2 / x3) - 1) / (4.0 * (x2 / x3) - 4)) + (0.615 * x3 / x2)
303
+ Fmax = 1000.0
304
+ S = 189000.0
305
+ G = 11.5 * 1e+6
306
+ K = (G * x3 * x3 * x3 * x3) / (8 * x1 * x2 * x2 * x2)
307
+ lmax = 14.0
308
+ lf = (Fmax / K) + 1.05 * (x1 + 2) * x3
309
+ dmin = 0.2
310
+ Dmax = 3
311
+ Fp = 300.0
312
+ sigmaP = Fp / K
313
+ sigmaPM = 6
314
+ sigmaW = 1.25
315
+
316
+ g[:,0] = -((8 * Cf * Fmax * x2) / (np.pi * x3 * x3 * x3)) + S
317
+ g[:,1] = -lf + lmax
318
+ g[:,2] = -3 + (x2 / x3)
319
+ g[:,3] = -sigmaP + sigmaPM
320
+ g[:,4] = -sigmaP - ((Fmax - Fp) / K) - 1.05 * (x1 + 2) * x3 + lf
321
+ g[:,5] = sigmaW - ((Fmax - Fp) / K)
322
+
323
+ g = np.where(g < 0, -g, 0)
324
+ f[:,1] = g[:,0] + g[:,1] + g[:,2] + g[:,3] + g[:,4] + g[:,5]
325
+
326
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
327
+ return f_norm
328
+
329
+ def _evaluate_torch(self, x):
330
+ pass
331
+
332
+
333
+ class RE31(mop):
334
+ def __init__(self, n_obj=3, n_var=3):
335
+ self.problem_name = 'RE31'
336
+ self.n_obj = n_obj
337
+ self.n_var = n_var
338
+ self.n_cons = 0
339
+ self.n_original_constraints = 3
340
+
341
+ self.ubound = np.zeros(self.n_var)
342
+ self.lbound = np.zeros(self.n_var)
343
+ self.lbound[0] = 0.00001
344
+ self.lbound[1] = 0.00001
345
+ self.lbound[2] = 1.0
346
+ self.ubound[0] = 100.0
347
+ self.ubound[1] = 100.0
348
+ self.ubound[2] = 3.0
349
+
350
+ self.ideal = np.array([5.53731918799e-05, 0.333333333333, 0.0])
351
+ self.nadir = np.array([500.002668442, 8246211.25124, 19359919.7502])
352
+
353
+
354
+ def _evaluate_numpy(self, x):
355
+ n_sub = len(x)
356
+ f = np.zeros( (n_sub, self.n_obj) )
357
+ g = np.zeros( (n_sub, self.n_original_constraints) )
358
+
359
+ x1 = x[:,0]
360
+ x2 = x[:,1]
361
+ x3 = x[:,2]
362
+
363
+ # First original objective function
364
+ f[:,0] = x1 * np.sqrt(16.0 + (x3 * x3)) + x2 * np.sqrt(1.0 + x3 * x3)
365
+ # Second original objective function
366
+ f[:,1] = (20.0 * np.sqrt(16.0 + (x3 * x3))) / (x1 * x3)
367
+
368
+ # Constraint functions
369
+ g[:,0] = 0.1 - f[:,0]
370
+ g[:,1] = 100000.0 - f[:,1]
371
+ g[:,2] = 100000 - ((80.0 * np.sqrt(1.0 + x3 * x3)) / (x3 * x2))
372
+ g = np.where(g < 0, -g, 0)
373
+ f[:,2] = g[:,0] + g[:,1] + g[:,2]
374
+
375
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
376
+ return f_norm
377
+
378
+ def _evaluate_torch(self, x):
379
+ pass
380
+
381
+
382
+ class RE37(mop):
383
+ def __init__(self, n_obj=3, n_var=4):
384
+ self.problem_name = 'RE37'
385
+ self.n_obj = n_obj
386
+ self.n_var = n_var
387
+ self.n_cons = 0
388
+ self.n_original_constraints = 0
389
+
390
+ self.lbound = np.full(self.n_var, 0)
391
+ self.ubound = np.full(self.n_var, 1)
392
+
393
+ self.ideal = np.array([0.00889341391106, 0.00488, -0.431499999825])
394
+ self.nadir = np.array([0.98949120096, 0.956587924661, 0.987530948586])
395
+
396
+
397
+ def _evaluate_numpy(self, x):
398
+ n_sub = len(x)
399
+ f = np.zeros( (n_sub, self.n_obj) )
400
+
401
+ xAlpha = x[:,0]
402
+ xHA = x[:,1]
403
+ xOA = x[:,2]
404
+ xOPTT = x[:,3]
405
+
406
+ # f1 (TF_max)
407
+ f[:,0] = 0.692 + (0.477 * xAlpha) - (0.687 * xHA) - (0.080 * xOA) - (0.0650 * xOPTT) - (
408
+ 0.167 * xAlpha * xAlpha) - (0.0129 * xHA * xAlpha) + (0.0796 * xHA * xHA) - (
409
+ 0.0634 * xOA * xAlpha) - (0.0257 * xOA * xHA) + (0.0877 * xOA * xOA) - (
410
+ 0.0521 * xOPTT * xAlpha) + (0.00156 * xOPTT * xHA) + (0.00198 * xOPTT * xOA) + (
411
+ 0.0184 * xOPTT * xOPTT)
412
+ # f2 (X_cc)
413
+ f[:,1] = 0.153 - (0.322 * xAlpha) + (0.396 * xHA) + (0.424 * xOA) + (0.0226 * xOPTT) + (
414
+ 0.175 * xAlpha * xAlpha) + (0.0185 * xHA * xAlpha) - (0.0701 * xHA * xHA) - (
415
+ 0.251 * xOA * xAlpha) + (0.179 * xOA * xHA) + (0.0150 * xOA * xOA) + (
416
+ 0.0134 * xOPTT * xAlpha) + (0.0296 * xOPTT * xHA) + (0.0752 * xOPTT * xOA) + (
417
+ 0.0192 * xOPTT * xOPTT)
418
+ # f3 (TT_max)
419
+ f[:,2] = 0.370 - (0.205 * xAlpha) + (0.0307 * xHA) + (0.108 * xOA) + (1.019 * xOPTT) - (
420
+ 0.135 * xAlpha * xAlpha) + (0.0141 * xHA * xAlpha) + (0.0998 * xHA * xHA) + (
421
+ 0.208 * xOA * xAlpha) - (0.0301 * xOA * xHA) - (0.226 * xOA * xOA) + (
422
+ 0.353 * xOPTT * xAlpha) - (0.0497 * xOPTT * xOA) - (0.423 * xOPTT * xOPTT) + (
423
+ 0.202 * xHA * xAlpha * xAlpha) - (0.281 * xOA * xAlpha * xAlpha) - (
424
+ 0.342 * xHA * xHA * xAlpha) - (0.245 * xHA * xHA * xOA) + (0.281 * xOA * xOA * xHA) - (
425
+ 0.184 * xOPTT * xOPTT * xAlpha) - (0.281 * xHA * xAlpha * xOA)
426
+
427
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
428
+ return f_norm
429
+
430
+
431
+
432
+ class RE41(mop):
433
+ def __init__(self, n_obj=4, n_var=7):
434
+ self.problem_name = 'RE41'
435
+ self.n_obj = n_obj
436
+ self.n_var = n_var
437
+ self.n_cons = 0
438
+ self.n_original_constraints = 10
439
+
440
+ self.lbound = np.zeros(self.n_var)
441
+ self.ubound = np.zeros(self.n_var)
442
+ self.lbound[0] = 0.5
443
+ self.lbound[1] = 0.45
444
+ self.lbound[2] = 0.5
445
+ self.lbound[3] = 0.5
446
+ self.lbound[4] = 0.875
447
+ self.lbound[5] = 0.4
448
+ self.lbound[6] = 0.4
449
+ self.ubound[0] = 1.5
450
+ self.ubound[1] = 1.35
451
+ self.ubound[2] = 1.5
452
+ self.ubound[3] = 1.5
453
+ self.ubound[4] = 2.625
454
+ self.ubound[5] = 1.2
455
+ self.ubound[6] = 1.2
456
+
457
+ self.ideal = np.array([15.576004, 3.58525, 10.61064375, 0.0])
458
+ self.nadir = np.array([39.2905121788, 4.42725, 13.09138125, 9.49401929991])
459
+
460
+
461
+ def _evaluate_numpy(self, x):
462
+ n_sub = len(x)
463
+
464
+ f = np.zeros( (n_sub, self.n_obj) )
465
+ g = np.zeros( (n_sub, self.n_original_constraints) )
466
+
467
+ x1 = x[:,0]
468
+ x2 = x[:,1]
469
+ x3 = x[:,2]
470
+ x4 = x[:,3]
471
+ x5 = x[:,4]
472
+ x6 = x[:,5]
473
+ x7 = x[:,6]
474
+
475
+ # First original objective function
476
+ f[:,0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.78 * x5 + 0.00001 * x6 + 2.73 * x7
477
+ # Second original objective function
478
+ f[:,1] = 4.72 - 0.5 * x4 - 0.19 * x2 * x3
479
+ # Third original objective function
480
+ Vmbp = 10.58 - 0.674 * x1 * x2 - 0.67275 * x2
481
+ Vfd = 16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6
482
+ f[:,2] = 0.5 * (Vmbp + Vfd)
483
+
484
+ # Constraint functions
485
+ g[:,0] = 1 - (1.16 - 0.3717 * x2 * x4 - 0.0092928 * x3)
486
+ g[:,1] = 0.32 - (0.261 - 0.0159 * x1 * x2 - 0.06486 * x1 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.0154464 * x6)
487
+ g[:,2] = 0.32 - (
488
+ 0.214 + 0.00817 * x5 - 0.045195 * x1 - 0.0135168 * x1 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.007176 * x3 + 0.023232 * x3 - 0.00364 * x5 * x6 - 0.018 * x2 * x2)
489
+ g[:,3] = 0.32 - (0.74 - 0.61 * x2 - 0.031296 * x3 - 0.031872 * x7 + 0.227 * x2 * x2)
490
+ g[:,4] = 32 - (28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 1.27296 * x6 - 2.68065 * x7)
491
+ g[:,5] = 32 - (33.86 + 2.95 * x3 - 5.057 * x1 * x2 - 3.795 * x2 - 3.4431 * x7 + 1.45728)
492
+ g[:,6] = 32 - (46.36 - 9.9 * x2 - 4.4505 * x1)
493
+ g[:,7] = 4 - f[:,1]
494
+ g[:,8] = 9.9 - Vmbp
495
+ g[:,9] = 15.7 - Vfd
496
+ g = np.where(g < 0, -g, 0)
497
+ f[:,3] = g[:,0] + g[:,1] + g[:,2] + g[:,3] + g[:,4] + g[:,5] + g[:,6] + g[:,7] + g[:,8] + g[:,9]
498
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
499
+ return f_norm
500
+
501
+ def _evaluate_torch(self, x):
502
+ pass
503
+
504
+
505
+ class RE42(mop):
506
+ def __init__(self):
507
+ self.problem_name = 'RE42'
508
+
509
+ self.n_obj = 4
510
+ self.n_var = 6
511
+ self.n_cons = 0
512
+ self.n_original_constraints = 9
513
+
514
+ self.lbound = np.zeros(self.n_var )
515
+ self.ubound = np.zeros(self.n_var )
516
+ self.lbound[0] = 150.0
517
+ self.lbound[1] = 20.0
518
+ self.lbound[2] = 13.0
519
+ self.lbound[3] = 10.0
520
+ self.lbound[4] = 14.0
521
+ self.lbound[5] = 0.63
522
+ self.ubound[0] = 274.32
523
+ self.ubound[1] = 32.31
524
+ self.ubound[2] = 25.0
525
+ self.ubound[3] = 11.71
526
+ self.ubound[4] = 18.0
527
+ self.ubound[5] = 0.75
528
+
529
+ self.ideal = np.array([-2756.2590400638524, 3962.557843228888, 1947.880856925791, 0.0])
530
+ self.nadir = np.array([-1010.5229595219643, 13827.138456300128, 2611.9668107424536, 12.437669929732023 ])
531
+
532
+
533
+
534
+ def _evaluate_numpy(self, x):
535
+ n_sub = len(x)
536
+
537
+ f = np.zeros( (n_sub, self.n_obj) )
538
+ # NOT g
539
+ constraintFuncs = np.zeros( (n_sub, self.n_original_constraints) )
540
+
541
+ x_L = x[:,0]
542
+ x_B = x[:,1]
543
+ x_D = x[:,2]
544
+ x_T = x[:,3]
545
+ x_Vk = x[:,4]
546
+ x_CB = x[:,5]
547
+
548
+ displacement = 1.025 * x_L * x_B * x_T * x_CB
549
+ V = 0.5144 * x_Vk
550
+ g = 9.8065
551
+ Fn = V / np.power(g * x_L, 0.5)
552
+ a = (4977.06 * x_CB * x_CB) - (8105.61 * x_CB) + 4456.51
553
+ b = (-10847.2 * x_CB * x_CB) + (12817.0 * x_CB) - 6960.32
554
+
555
+ power = (np.power(displacement, 2.0 / 3.0) * np.power(x_Vk, 3.0)) / (a + (b * Fn))
556
+ outfit_weight = 1.0 * np.power(x_L, 0.8) * np.power(x_B, 0.6) * np.power(x_D, 0.3) * np.power(x_CB, 0.1)
557
+ steel_weight = 0.034 * np.power(x_L, 1.7) * np.power(x_B, 0.7) * np.power(x_D, 0.4) * np.power(x_CB, 0.5)
558
+ machinery_weight = 0.17 * np.power(power, 0.9)
559
+ light_ship_weight = steel_weight + outfit_weight + machinery_weight
560
+
561
+ ship_cost = 1.3 * ((2000.0 * np.power(steel_weight, 0.85)) + (3500.0 * outfit_weight) + (
562
+ 2400.0 * np.power(power, 0.8)))
563
+ capital_costs = 0.2 * ship_cost
564
+
565
+ DWT = displacement - light_ship_weight
566
+
567
+ running_costs = 40000.0 * np.power(DWT, 0.3)
568
+
569
+ round_trip_miles = 5000.0
570
+ sea_days = (round_trip_miles / 24.0) * x_Vk
571
+ handling_rate = 8000.0
572
+
573
+ daily_consumption = ((0.19 * power * 24.0) / 1000.0) + 0.2
574
+ fuel_price = 100.0
575
+ fuel_cost = 1.05 * daily_consumption * sea_days * fuel_price
576
+ port_cost = 6.3 * np.power(DWT, 0.8)
577
+
578
+ fuel_carried = daily_consumption * (sea_days + 5.0)
579
+ miscellaneous_DWT = 2.0 * np.power(DWT, 0.5)
580
+
581
+ cargo_DWT = DWT - fuel_carried - miscellaneous_DWT
582
+ port_days = 2.0 * ((cargo_DWT / handling_rate) + 0.5)
583
+ RTPA = 350.0 / (sea_days + port_days)
584
+
585
+ voyage_costs = (fuel_cost + port_cost) * RTPA
586
+ annual_costs = capital_costs + running_costs + voyage_costs
587
+ annual_cargo = cargo_DWT * RTPA
588
+
589
+ f[:,0] = annual_costs / annual_cargo
590
+ f[:,1] = light_ship_weight
591
+ # f_2 is dealt as a minimization problem
592
+ f[:,2] = -annual_cargo
593
+
594
+ # Reformulated objective functions
595
+ constraintFuncs[:,0] = (x_L / x_B) - 6.0
596
+ constraintFuncs[:,1] = -(x_L / x_D) + 15.0
597
+ constraintFuncs[:,2] = -(x_L / x_T) + 19.0
598
+ constraintFuncs[:,3] = 0.45 * np.power(DWT, 0.31) - x_T
599
+ constraintFuncs[:,4] = 0.7 * x_D + 0.7 - x_T
600
+ constraintFuncs[:,5] = 500000.0 - DWT
601
+ constraintFuncs[:,6] = DWT - 3000.0
602
+ constraintFuncs[:,7] = 0.32 - Fn
603
+
604
+ KB = 0.53 * x_T
605
+ BMT = ((0.085 * x_CB - 0.002) * x_B * x_B) / (x_T * x_CB)
606
+ KG = 1.0 + 0.52 * x_D
607
+ constraintFuncs[:,8] = (KB + BMT - KG) - (0.07 * x_B)
608
+
609
+ constraintFuncs = np.where(constraintFuncs < 0, -constraintFuncs, 0)
610
+ f[:,3] = constraintFuncs[:,0] + constraintFuncs[:,1] + constraintFuncs[:,2] + constraintFuncs[:,3] + constraintFuncs[:,4] + \
611
+ constraintFuncs[:,5] + constraintFuncs[:,6] + constraintFuncs[:,7] + constraintFuncs[:,8]
612
+
613
+ f_norm = (f - self.ideal) / (self.nadir - self.ideal)
614
+ return f
615
+
616
+
617
+
618
+
619
+
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/re_original.py ADDED
@@ -0,0 +1,1335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ A real-world multimnist-objective problem suite (the RE benchmark set)
4
+ Reference:
5
+ Ryoji Tanabe, Hisao Ishibuchi, "An Easy-to-use Real-world Multi-objective Problem Suite" Applied Soft Computing. 89: 106078 (2020)
6
+ Copyright (c) 2020 Ryoji Tanabe
7
+
8
+ I re-implemented the RE problem set by referring to its C source code (reproblem.c). While variables directly copied from the C source code are written in CamelCase, the other variables are written in snake_case. It is somewhat awkward.
9
+
10
+ This program is free software: you can redistribute it and/or modify
11
+ it under the terms of the GNU General Public License as published by
12
+ the Free Software Foundation, either version 3 of the License, or
13
+ (at your option) any later version.
14
+
15
+ This program is distributed in the hope that it will be useful,
16
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
17
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
+ GNU General Public License for more details.
19
+
20
+ You should have received a copy of the GNU General Public License
21
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
22
+ """
23
+ import numpy as np
24
+
25
+ class RE21():
26
+ def __init__(self, n_var=4, n_obj=2, lower_bound=np.zeros(30),
27
+ upper_bound=np.ones(30)):
28
+ self.problem_name = 'RE21'
29
+ self.n_constraints = 0
30
+ self.n_original_constraints = 0
31
+
32
+ F = 10.0
33
+ sigma = 10.0
34
+ tmp_val = F / sigma
35
+
36
+ self.ubound = np.full(self.n_variables, 3 * tmp_val)
37
+ self.lbound = np.zeros(self.n_variables)
38
+ self.lbound[0] = tmp_val
39
+ self.lbound[1] = np.sqrt(2.0) * tmp_val
40
+ self.lbound[2] = np.sqrt(2.0) * tmp_val
41
+ self.lbound[3] = tmp_val
42
+
43
+ def evaluate(self, x):
44
+ f = np.zeros(self.n_objectives)
45
+ x1 = x[0]
46
+ x2 = x[1]
47
+ x3 = x[2]
48
+ x4 = x[3]
49
+
50
+ F = 10.0
51
+ sigma = 10.0
52
+ E = 2.0 * 1e5
53
+ L = 200.0
54
+
55
+ f[0] = L * ((2 * x1) + np.sqrt(2.0) * x2 + np.sqrt(x3) + x4)
56
+ f[1] = ((F * L) / E) * ((2.0 / x1) + (2.0 * np.sqrt(2.0) / x2) - (2.0 * np.sqrt(2.0) / x3) + (2.0 / x4))
57
+
58
+ return f
59
+
60
+
61
+ class RE22():
62
+ def __init__(self):
63
+ self.problem_name = 'RE22'
64
+ self.n_objectives = 2
65
+ self.n_variables = 3
66
+
67
+ self.n_constraints = 0
68
+ self.n_original_constraints = 2
69
+
70
+ self.ubound = np.zeros(self.n_variables)
71
+ self.lbound = np.zeros(self.n_variables)
72
+ self.lbound[0] = 0.2
73
+ self.lbound[1] = 0.0
74
+ self.lbound[2] = 0.0
75
+ self.ubound[0] = 15
76
+ self.ubound[1] = 20
77
+ self.ubound[2] = 40
78
+
79
+ self.feasible_vals = np.array(
80
+ [0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93, 1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60,
81
+ 1.76, 1.80, 1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79, 2.80, 3.0, 3.08, 3, 10, 3.16, 3.41,
82
+ 3.52, 3.60, 3.72, 3.95, 3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84, 5.0, 5.28, 5.40, 5.53,
83
+ 5.72, 6.0, 6.16, 6.32, 6.60, 7.11, 7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0, 11.06, 11.85,
84
+ 12.0, 13.0, 14.0, 15.0])
85
+
86
+ def evaluate(self, x):
87
+ f = np.zeros(self.n_objectives)
88
+ g = np.zeros(self.n_original_constraints)
89
+ # Reference: getNearestValue_sample2.py (https://gist.github.com/icchi-h/1d0bb1c52ebfdd31f14b3e811328390a)
90
+ idx = np.abs(np.asarray(self.feasible_vals) - x[0]).argmin()
91
+ x1 = self.feasible_vals[idx]
92
+ x2 = x[1]
93
+ x3 = x[2]
94
+
95
+ # First original objective function
96
+ f[0] = (29.4 * x1) + (0.6 * x2 * x3)
97
+
98
+ # Original constraint functions
99
+ g[0] = (x1 * x3) - 7.735 * ((x1 * x1) / x2) - 180.0
100
+ g[1] = 4.0 - (x3 / x2)
101
+ g = np.where(g < 0, -g, 0)
102
+ f[1] = g[0] + g[1]
103
+
104
+ return f
105
+
106
+
107
+ class RE23():
108
+ def __init__(self):
109
+ self.problem_name = 'RE23'
110
+ self.n_objectives = 2
111
+ self.n_variables = 4
112
+ self.n_constraints = 0
113
+ self.n_original_constraints = 3
114
+
115
+ self.ubound = np.zeros(self.n_variables)
116
+ self.lbound = np.zeros(self.n_variables)
117
+ self.lbound[0] = 1
118
+ self.lbound[1] = 1
119
+ self.lbound[2] = 10
120
+ self.lbound[3] = 10
121
+ self.ubound[0] = 100
122
+ self.ubound[1] = 100
123
+ self.ubound[2] = 200
124
+ self.ubound[3] = 240
125
+
126
+ def evaluate(self, x):
127
+ f = np.zeros(self.n_objectives)
128
+ g = np.zeros(self.n_original_constraints)
129
+
130
+ x1 = 0.0625 * int(np.round(x[0]))
131
+ x2 = 0.0625 * int(np.round(x[1]))
132
+ x3 = x[2]
133
+ x4 = x[3]
134
+
135
+ # First original objective function
136
+ f[0] = (0.6224 * x1 * x3 * x4) + (1.7781 * x2 * x3 * x3) + (3.1661 * x1 * x1 * x4) + (19.84 * x1 * x1 * x3)
137
+
138
+ # Original constraint functions
139
+ g[0] = x1 - (0.0193 * x3)
140
+ g[1] = x2 - (0.00954 * x3)
141
+ g[2] = (np.pi * x3 * x3 * x4) + ((4.0 / 3.0) * (np.pi * x3 * x3 * x3)) - 1296000
142
+ g = np.where(g < 0, -g, 0)
143
+ f[1] = g[0] + g[1] + g[2]
144
+
145
+ return f
146
+
147
+
148
+ class RE24():
149
+ def __init__(self):
150
+ self.problem_name = 'RE24'
151
+ self.n_objectives = 2
152
+ self.n_variables = 2
153
+ self.n_constraints = 0
154
+ self.n_original_constraints = 4
155
+
156
+ self.ubound = np.zeros(self.n_variables)
157
+ self.lbound = np.zeros(self.n_variables)
158
+ self.lbound[0] = 0.5
159
+ self.lbound[1] = 0.5
160
+ self.ubound[0] = 4
161
+ self.ubound[1] = 50
162
+
163
+ def evaluate(self, x):
164
+ f = np.zeros(self.n_objectives)
165
+ g = np.zeros(self.n_original_constraints)
166
+
167
+ x1 = x[0]
168
+ x2 = x[1]
169
+
170
+ # First original objective function
171
+ f[0] = x1 + (120 * x2)
172
+
173
+ E = 700000
174
+ sigma_b_max = 700
175
+ tau_max = 450
176
+ delta_max = 1.5
177
+ sigma_k = (E * x1 * x1) / 100
178
+ sigma_b = 4500 / (x1 * x2)
179
+ tau = 1800 / x2
180
+ delta = (56.2 * 10000) / (E * x1 * x2 * x2)
181
+
182
+ g[0] = 1 - (sigma_b / sigma_b_max)
183
+ g[1] = 1 - (tau / tau_max)
184
+ g[2] = 1 - (delta / delta_max)
185
+ g[3] = 1 - (sigma_b / sigma_k)
186
+ g = np.where(g < 0, -g, 0)
187
+ f[1] = g[0] + g[1] + g[2] + g[3]
188
+
189
+ return f
190
+
191
+
192
+ class RE25():
193
+ def __init__(self):
194
+ self.problem_name = 'RE25'
195
+ self.n_objectives = 2
196
+ self.n_variables = 3
197
+ self.n_constraints = 0
198
+ self.n_original_constraints = 6
199
+
200
+ self.ubound = np.zeros(self.n_variables)
201
+ self.lbound = np.zeros(self.n_variables)
202
+ self.lbound[0] = 1
203
+ self.lbound[1] = 0.6
204
+ self.lbound[2] = 0.09
205
+ self.ubound[0] = 70
206
+ self.ubound[1] = 3
207
+ self.ubound[2] = 0.5
208
+
209
+ self.feasible_vals = np.array(
210
+ [0.009, 0.0095, 0.0104, 0.0118, 0.0128, 0.0132, 0.014, 0.015, 0.0162, 0.0173, 0.018, 0.02, 0.023, 0.025,
211
+ 0.028, 0.032, 0.035, 0.041, 0.047, 0.054, 0.063, 0.072, 0.08, 0.092, 0.105, 0.12, 0.135, 0.148, 0.162,
212
+ 0.177, 0.192, 0.207, 0.225, 0.244, 0.263, 0.283, 0.307, 0.331, 0.362, 0.394, 0.4375, 0.5])
213
+
214
+ def evaluate(self, x):
215
+ f = np.zeros(self.n_objectives)
216
+ g = np.zeros(self.n_original_constraints)
217
+
218
+ x1 = np.round(x[0])
219
+ x2 = x[1]
220
+ # Reference: getNearestValue_sample2.py (https://gist.github.com/icchi-h/1d0bb1c52ebfdd31f14b3e811328390a)
221
+ idx = np.abs(np.asarray(self.feasible_vals) - x[2]).argmin()
222
+ x3 = self.feasible_vals[idx]
223
+
224
+ # first original objective function
225
+ f[0] = (np.pi * np.pi * x2 * x3 * x3 * (x1 + 2)) / 4.0
226
+
227
+ # constraint functions
228
+ Cf = ((4.0 * (x2 / x3) - 1) / (4.0 * (x2 / x3) - 4)) + (0.615 * x3 / x2)
229
+ Fmax = 1000.0
230
+ S = 189000.0
231
+ G = 11.5 * 1e+6
232
+ K = (G * x3 * x3 * x3 * x3) / (8 * x1 * x2 * x2 * x2)
233
+ lmax = 14.0
234
+ lf = (Fmax / K) + 1.05 * (x1 + 2) * x3
235
+ dmin = 0.2
236
+ Dmax = 3
237
+ Fp = 300.0
238
+ sigmaP = Fp / K
239
+ sigmaPM = 6
240
+ sigmaW = 1.25
241
+
242
+ g[0] = -((8 * Cf * Fmax * x2) / (np.pi * x3 * x3 * x3)) + S
243
+ g[1] = -lf + lmax
244
+ g[2] = -3 + (x2 / x3)
245
+ g[3] = -sigmaP + sigmaPM
246
+ g[4] = -sigmaP - ((Fmax - Fp) / K) - 1.05 * (x1 + 2) * x3 + lf
247
+ g[5] = sigmaW - ((Fmax - Fp) / K)
248
+
249
+ g = np.where(g < 0, -g, 0)
250
+ f[1] = g[0] + g[1] + g[2] + g[3] + g[4] + g[5]
251
+
252
+ return f
253
+
254
+
255
+ class RE31():
256
+ def __init__(self):
257
+ self.problem_name = 'RE31'
258
+ self.n_objectives = 3
259
+ self.n_variables = 3
260
+ self.n_constraints = 0
261
+ self.n_original_constraints = 3
262
+
263
+ self.ubound = np.zeros(self.n_variables)
264
+ self.lbound = np.zeros(self.n_variables)
265
+ self.lbound[0] = 0.00001
266
+ self.lbound[1] = 0.00001
267
+ self.lbound[2] = 1.0
268
+ self.ubound[0] = 100.0
269
+ self.ubound[1] = 100.0
270
+ self.ubound[2] = 3.0
271
+
272
+ def evaluate(self, x):
273
+ f = np.zeros(self.n_objectives)
274
+ g = np.zeros(self.n_original_constraints)
275
+
276
+ x1 = x[0]
277
+ x2 = x[1]
278
+ x3 = x[2]
279
+
280
+ # First original objective function
281
+ f[0] = x1 * np.sqrt(16.0 + (x3 * x3)) + x2 * np.sqrt(1.0 + x3 * x3)
282
+ # Second original objective function
283
+ f[1] = (20.0 * np.sqrt(16.0 + (x3 * x3))) / (x1 * x3)
284
+
285
+ # Constraint functions
286
+ g[0] = 0.1 - f[0]
287
+ g[1] = 100000.0 - f[1]
288
+ g[2] = 100000 - ((80.0 * np.sqrt(1.0 + x3 * x3)) / (x3 * x2))
289
+ g = np.where(g < 0, -g, 0)
290
+ f[2] = g[0] + g[1] + g[2]
291
+
292
+ return f
293
+
294
+
295
+ class RE32():
296
+ def __init__(self):
297
+ self.problem_name = 'RE32'
298
+ self.n_objectives = 3
299
+ self.n_variables = 4
300
+ self.n_constraints = 0
301
+ self.n_original_constraints = 4
302
+
303
+ self.ubound = np.zeros(self.n_variables)
304
+ self.lbound = np.zeros(self.n_variables)
305
+ self.lbound[0] = 0.125
306
+ self.lbound[1] = 0.1
307
+ self.lbound[2] = 0.1
308
+ self.lbound[3] = 0.125
309
+ self.ubound[0] = 5.0
310
+ self.ubound[1] = 10.0
311
+ self.ubound[2] = 10.0
312
+ self.ubound[3] = 5.0
313
+
314
+ def evaluate(self, x):
315
+ f = np.zeros(self.n_objectives)
316
+ g = np.zeros(self.n_original_constraints)
317
+
318
+ x1 = x[0]
319
+ x2 = x[1]
320
+ x3 = x[2]
321
+ x4 = x[3]
322
+
323
+ P = 6000
324
+ L = 14
325
+ E = 30 * 1e6
326
+
327
+ # // deltaMax = 0.25
328
+ G = 12 * 1e6
329
+ tauMax = 13600
330
+ sigmaMax = 30000
331
+
332
+ # First original objective function
333
+ f[0] = (1.10471 * x1 * x1 * x2) + (0.04811 * x3 * x4) * (14.0 + x2)
334
+ # Second original objective function
335
+ f[1] = (4 * P * L * L * L) / (E * x4 * x3 * x3 * x3)
336
+
337
+ # Constraint functions
338
+ M = P * (L + (x2 / 2))
339
+ tmpVar = ((x2 * x2) / 4.0) + np.power((x1 + x3) / 2.0, 2)
340
+ R = np.sqrt(tmpVar)
341
+ tmpVar = ((x2 * x2) / 12.0) + np.power((x1 + x3) / 2.0, 2)
342
+ J = 2 * np.sqrt(2) * x1 * x2 * tmpVar
343
+
344
+ tauDashDash = (M * R) / J
345
+ tauDash = P / (np.sqrt(2) * x1 * x2)
346
+ tmpVar = tauDash * tauDash + ((2 * tauDash * tauDashDash * x2) / (2 * R)) + (tauDashDash * tauDashDash)
347
+ tau = np.sqrt(tmpVar)
348
+ sigma = (6 * P * L) / (x4 * x3 * x3)
349
+ tmpVar = 4.013 * E * np.sqrt((x3 * x3 * x4 * x4 * x4 * x4 * x4 * x4) / 36.0) / (L * L)
350
+ tmpVar2 = (x3 / (2 * L)) * np.sqrt(E / (4 * G))
351
+ PC = tmpVar * (1 - tmpVar2)
352
+
353
+ g[0] = tauMax - tau
354
+ g[1] = sigmaMax - sigma
355
+ g[2] = x4 - x1
356
+ g[3] = PC - P
357
+ g = np.where(g < 0, -g, 0)
358
+ f[2] = g[0] + g[1] + g[2] + g[3]
359
+
360
+ return f
361
+
362
+
363
+ class RE33():
364
+ def __init__(self):
365
+ self.problem_name = 'RE33'
366
+ self.n_objectives = 3
367
+ self.n_variables = 4
368
+ self.n_constraints = 0
369
+ self.n_original_constraints = 4
370
+
371
+ self.ubound = np.zeros(self.n_variables)
372
+ self.lbound = np.zeros(self.n_variables)
373
+ self.lbound[0] = 55
374
+ self.lbound[1] = 75
375
+ self.lbound[2] = 1000
376
+ self.lbound[3] = 11
377
+ self.ubound[0] = 80
378
+ self.ubound[1] = 110
379
+ self.ubound[2] = 3000
380
+ self.ubound[3] = 20
381
+
382
+ def evaluate(self, x):
383
+ f = np.zeros(self.n_objectives)
384
+ g = np.zeros(self.n_original_constraints)
385
+
386
+ x1 = x[0]
387
+ x2 = x[1]
388
+ x3 = x[2]
389
+ x4 = x[3]
390
+
391
+ # First original objective function
392
+ f[0] = 4.9 * 1e-5 * (x2 * x2 - x1 * x1) * (x4 - 1.0)
393
+ # Second original objective function
394
+ f[1] = ((9.82 * 1e6) * (x2 * x2 - x1 * x1)) / (x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1))
395
+
396
+ # Reformulated objective functions
397
+ g[0] = (x2 - x1) - 20.0
398
+ g[1] = 0.4 - (x3 / (3.14 * (x2 * x2 - x1 * x1)))
399
+ g[2] = 1.0 - (2.22 * 1e-3 * x3 * (x2 * x2 * x2 - x1 * x1 * x1)) / np.power((x2 * x2 - x1 * x1), 2)
400
+ g[3] = (2.66 * 1e-2 * x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1)) / (x2 * x2 - x1 * x1) - 900.0
401
+ g = np.where(g < 0, -g, 0)
402
+ f[2] = g[0] + g[1] + g[2] + g[3]
403
+
404
+ return f
405
+
406
+
407
+ class RE34():
408
+ def __init__(self):
409
+ self.problem_name = 'RE34'
410
+ self.n_objectives = 3
411
+ self.n_variables = 5
412
+ self.n_constraints = 0
413
+ self.n_original_constraints = 0
414
+
415
+ self.lbound = np.full(self.n_variables, 1)
416
+ self.ubound = np.full(self.n_variables, 3)
417
+
418
+ def evaluate(self, x):
419
+ f = np.zeros(self.n_objectives)
420
+ g = np.zeros(self.n_original_constraints)
421
+
422
+ x1 = x[0]
423
+ x2 = x[1]
424
+ x3 = x[2]
425
+ x4 = x[3]
426
+ x5 = x[4]
427
+
428
+ f[0] = 1640.2823 + (2.3573285 * x1) + (2.3220035 * x2) + (4.5688768 * x3) + (7.7213633 * x4) + (4.4559504 * x5)
429
+ f[1] = 6.5856 + (1.15 * x1) - (1.0427 * x2) + (0.9738 * x3) + (0.8364 * x4) - (0.3695 * x1 * x4) + (
430
+ 0.0861 * x1 * x5) + (0.3628 * x2 * x4) - (0.1106 * x1 * x1) - (0.3437 * x3 * x3) + (
431
+ 0.1764 * x4 * x4)
432
+ f[2] = -0.0551 + (0.0181 * x1) + (0.1024 * x2) + (0.0421 * x3) - (0.0073 * x1 * x2) + (0.024 * x2 * x3) - (
433
+ 0.0118 * x2 * x4) - (0.0204 * x3 * x4) - (0.008 * x3 * x5) - (0.0241 * x2 * x2) + (0.0109 * x4 * x4)
434
+
435
+ return f
436
+
437
+
438
+ class RE35():
439
+ def __init__(self):
440
+ self.problem_name = 'RE35'
441
+ self.n_objectives = 3
442
+ self.n_variables = 7
443
+ self.n_constraints = 0
444
+ self.n_original_constraints = 11
445
+
446
+ self.lbound = np.zeros(self.n_variables)
447
+ self.ubound = np.zeros(self.n_variables)
448
+ self.lbound[0] = 2.6
449
+ self.lbound[1] = 0.7
450
+ self.lbound[2] = 17
451
+ self.lbound[3] = 7.3
452
+ self.lbound[4] = 7.3
453
+ self.lbound[5] = 2.9
454
+ self.lbound[6] = 5.0
455
+ self.ubound[0] = 3.6
456
+ self.ubound[1] = 0.8
457
+ self.ubound[2] = 28
458
+ self.ubound[3] = 8.3
459
+ self.ubound[4] = 8.3
460
+ self.ubound[5] = 3.9
461
+ self.ubound[6] = 5.5
462
+
463
+ def evaluate(self, x):
464
+ f = np.zeros(self.n_objectives)
465
+ g = np.zeros(self.n_original_constraints)
466
+
467
+ x1 = x[0]
468
+ x2 = x[1]
469
+ x3 = np.round(x[2])
470
+ x4 = x[3]
471
+ x5 = x[4]
472
+ x6 = x[5]
473
+ x7 = x[6]
474
+
475
+ # First original objective function (weight)
476
+ f[0] = 0.7854 * x1 * (x2 * x2) * (((10.0 * x3 * x3) / 3.0) + (14.933 * x3) - 43.0934) - 1.508 * x1 * (
477
+ x6 * x6 + x7 * x7) + 7.477 * (x6 * x6 * x6 + x7 * x7 * x7) + 0.7854 * (x4 * x6 * x6 + x5 * x7 * x7)
478
+
479
+ # Second original objective function (stress)
480
+ tmpVar = np.power((745.0 * x4) / (x2 * x3), 2.0) + 1.69 * 1e7
481
+ f[1] = np.sqrt(tmpVar) / (0.1 * x6 * x6 * x6)
482
+
483
+ # Constraint functions
484
+ g[0] = -(1.0 / (x1 * x2 * x2 * x3)) + 1.0 / 27.0
485
+ g[1] = -(1.0 / (x1 * x2 * x2 * x3 * x3)) + 1.0 / 397.5
486
+ g[2] = -(x4 * x4 * x4) / (x2 * x3 * x6 * x6 * x6 * x6) + 1.0 / 1.93
487
+ g[3] = -(x5 * x5 * x5) / (x2 * x3 * x7 * x7 * x7 * x7) + 1.0 / 1.93
488
+ g[4] = -(x2 * x3) + 40.0
489
+ g[5] = -(x1 / x2) + 12.0
490
+ g[6] = -5.0 + (x1 / x2)
491
+ g[7] = -1.9 + x4 - 1.5 * x6
492
+ g[8] = -1.9 + x5 - 1.1 * x7
493
+ g[9] = -f[1] + 1300.0
494
+ tmpVar = np.power((745.0 * x5) / (x2 * x3), 2.0) + 1.575 * 1e8
495
+ g[10] = -np.sqrt(tmpVar) / (0.1 * x7 * x7 * x7) + 1100.0
496
+ g = np.where(g < 0, -g, 0)
497
+ f[2] = g[0] + g[1] + g[2] + g[3] + g[4] + g[5] + g[6] + g[7] + g[8] + g[9] + g[10]
498
+
499
+ return f
500
+
501
+
502
+ class RE36():
503
+ def __init__(self):
504
+ self.problem_name = 'RE36'
505
+ self.n_objectives = 3
506
+ self.n_variables = 4
507
+ self.n_constraints = 0
508
+ self.n_original_constraints = 1
509
+
510
+ self.lbound = np.full(self.n_variables, 12)
511
+ self.ubound = np.full(self.n_variables, 60)
512
+
513
+ def evaluate(self, x):
514
+ f = np.zeros(self.n_objectives)
515
+ g = np.zeros(self.n_original_constraints)
516
+
517
+ # all the four variables must be inverger values
518
+ x1 = np.round(x[0])
519
+ x2 = np.round(x[1])
520
+ x3 = np.round(x[2])
521
+ x4 = np.round(x[3])
522
+
523
+ # First original objective function
524
+ f[0] = np.abs(6.931 - ((x3 / x1) * (x4 / x2)))
525
+ # Second original objective function (the maximum value among the four variables)
526
+ l = [x1, x2, x3, x4]
527
+ f[1] = max(l)
528
+
529
+ g[0] = 0.5 - (f[0] / 6.931)
530
+ g = np.where(g < 0, -g, 0)
531
+ f[2] = g[0]
532
+
533
+ return f
534
+
535
+
536
+ class RE37():
537
+ def __init__(self):
538
+ self.problem_name = 'RE37'
539
+ self.n_objectives = 3
540
+ self.n_variables = 4
541
+ self.n_constraints = 0
542
+ self.n_original_constraints = 0
543
+
544
+ self.lbound = np.full(self.n_variables, 0)
545
+ self.ubound = np.full(self.n_variables, 1)
546
+
547
+ def evaluate(self, x):
548
+ f = np.zeros(self.n_objectives)
549
+
550
+ xAlpha = x[0]
551
+ xHA = x[1]
552
+ xOA = x[2]
553
+ xOPTT = x[3]
554
+
555
+ # f1 (TF_max)
556
+ f[0] = 0.692 + (0.477 * xAlpha) - (0.687 * xHA) - (0.080 * xOA) - (0.0650 * xOPTT) - (
557
+ 0.167 * xAlpha * xAlpha) - (0.0129 * xHA * xAlpha) + (0.0796 * xHA * xHA) - (
558
+ 0.0634 * xOA * xAlpha) - (0.0257 * xOA * xHA) + (0.0877 * xOA * xOA) - (
559
+ 0.0521 * xOPTT * xAlpha) + (0.00156 * xOPTT * xHA) + (0.00198 * xOPTT * xOA) + (
560
+ 0.0184 * xOPTT * xOPTT)
561
+ # f2 (X_cc)
562
+ f[1] = 0.153 - (0.322 * xAlpha) + (0.396 * xHA) + (0.424 * xOA) + (0.0226 * xOPTT) + (
563
+ 0.175 * xAlpha * xAlpha) + (0.0185 * xHA * xAlpha) - (0.0701 * xHA * xHA) - (
564
+ 0.251 * xOA * xAlpha) + (0.179 * xOA * xHA) + (0.0150 * xOA * xOA) + (
565
+ 0.0134 * xOPTT * xAlpha) + (0.0296 * xOPTT * xHA) + (0.0752 * xOPTT * xOA) + (
566
+ 0.0192 * xOPTT * xOPTT)
567
+ # f3 (TT_max)
568
+ f[2] = 0.370 - (0.205 * xAlpha) + (0.0307 * xHA) + (0.108 * xOA) + (1.019 * xOPTT) - (
569
+ 0.135 * xAlpha * xAlpha) + (0.0141 * xHA * xAlpha) + (0.0998 * xHA * xHA) + (
570
+ 0.208 * xOA * xAlpha) - (0.0301 * xOA * xHA) - (0.226 * xOA * xOA) + (
571
+ 0.353 * xOPTT * xAlpha) - (0.0497 * xOPTT * xOA) - (0.423 * xOPTT * xOPTT) + (
572
+ 0.202 * xHA * xAlpha * xAlpha) - (0.281 * xOA * xAlpha * xAlpha) - (
573
+ 0.342 * xHA * xHA * xAlpha) - (0.245 * xHA * xHA * xOA) + (0.281 * xOA * xOA * xHA) - (
574
+ 0.184 * xOPTT * xOPTT * xAlpha) - (0.281 * xHA * xAlpha * xOA)
575
+
576
+ return f
577
+
578
+
579
+ class RE41():
580
+ def __init__(self):
581
+ self.problem_name = 'RE41'
582
+ self.n_objectives = 4
583
+ self.n_variables = 7
584
+ self.n_constraints = 0
585
+ self.n_original_constraints = 10
586
+
587
+ self.lbound = np.zeros(self.n_variables)
588
+ self.ubound = np.zeros(self.n_variables)
589
+ self.lbound[0] = 0.5
590
+ self.lbound[1] = 0.45
591
+ self.lbound[2] = 0.5
592
+ self.lbound[3] = 0.5
593
+ self.lbound[4] = 0.875
594
+ self.lbound[5] = 0.4
595
+ self.lbound[6] = 0.4
596
+ self.ubound[0] = 1.5
597
+ self.ubound[1] = 1.35
598
+ self.ubound[2] = 1.5
599
+ self.ubound[3] = 1.5
600
+ self.ubound[4] = 2.625
601
+ self.ubound[5] = 1.2
602
+ self.ubound[6] = 1.2
603
+
604
+ def evaluate(self, x):
605
+ f = np.zeros(self.n_objectives)
606
+ g = np.zeros(self.n_original_constraints)
607
+
608
+ x1 = x[0]
609
+ x2 = x[1]
610
+ x3 = x[2]
611
+ x4 = x[3]
612
+ x5 = x[4]
613
+ x6 = x[5]
614
+ x7 = x[6]
615
+
616
+ # First original objective function
617
+ f[0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.78 * x5 + 0.00001 * x6 + 2.73 * x7
618
+ # Second original objective function
619
+ f[1] = 4.72 - 0.5 * x4 - 0.19 * x2 * x3
620
+ # Third original objective function
621
+ Vmbp = 10.58 - 0.674 * x1 * x2 - 0.67275 * x2
622
+ Vfd = 16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6
623
+ f[2] = 0.5 * (Vmbp + Vfd)
624
+
625
+ # Constraint functions
626
+ g[0] = 1 - (1.16 - 0.3717 * x2 * x4 - 0.0092928 * x3)
627
+ g[1] = 0.32 - (0.261 - 0.0159 * x1 * x2 - 0.06486 * x1 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.0154464 * x6)
628
+ g[2] = 0.32 - (
629
+ 0.214 + 0.00817 * x5 - 0.045195 * x1 - 0.0135168 * x1 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.007176 * x3 + 0.023232 * x3 - 0.00364 * x5 * x6 - 0.018 * x2 * x2)
630
+ g[3] = 0.32 - (0.74 - 0.61 * x2 - 0.031296 * x3 - 0.031872 * x7 + 0.227 * x2 * x2)
631
+ g[4] = 32 - (28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 1.27296 * x6 - 2.68065 * x7)
632
+ g[5] = 32 - (33.86 + 2.95 * x3 - 5.057 * x1 * x2 - 3.795 * x2 - 3.4431 * x7 + 1.45728)
633
+ g[6] = 32 - (46.36 - 9.9 * x2 - 4.4505 * x1)
634
+ g[7] = 4 - f[1]
635
+ g[8] = 9.9 - Vmbp
636
+ g[9] = 15.7 - Vfd
637
+
638
+ g = np.where(g < 0, -g, 0)
639
+ f[3] = g[0] + g[1] + g[2] + g[3] + g[4] + g[5] + g[6] + g[7] + g[8] + g[9]
640
+
641
+ return f
642
+
643
+
644
+ class RE42():
645
+ def __init__(self):
646
+ self.problem_name = 'RE42'
647
+ self.n_objectives = 4
648
+ self.n_variables = 6
649
+ self.n_constraints = 0
650
+ self.n_original_constraints = 9
651
+
652
+ self.lbound = np.zeros(self.n_variables)
653
+ self.ubound = np.zeros(self.n_variables)
654
+ self.lbound[0] = 150.0
655
+ self.lbound[1] = 20.0
656
+ self.lbound[2] = 13.0
657
+ self.lbound[3] = 10.0
658
+ self.lbound[4] = 14.0
659
+ self.lbound[5] = 0.63
660
+ self.ubound[0] = 274.32
661
+ self.ubound[1] = 32.31
662
+ self.ubound[2] = 25.0
663
+ self.ubound[3] = 11.71
664
+ self.ubound[4] = 18.0
665
+ self.ubound[5] = 0.75
666
+
667
+ def evaluate(self, x):
668
+ f = np.zeros(self.n_objectives)
669
+ # NOT g
670
+ constraintFuncs = np.zeros(self.n_original_constraints)
671
+
672
+ x_L = x[0]
673
+ x_B = x[1]
674
+ x_D = x[2]
675
+ x_T = x[3]
676
+ x_Vk = x[4]
677
+ x_CB = x[5]
678
+
679
+ displacement = 1.025 * x_L * x_B * x_T * x_CB
680
+ V = 0.5144 * x_Vk
681
+ g = 9.8065
682
+ Fn = V / np.power(g * x_L, 0.5)
683
+ a = (4977.06 * x_CB * x_CB) - (8105.61 * x_CB) + 4456.51
684
+ b = (-10847.2 * x_CB * x_CB) + (12817.0 * x_CB) - 6960.32
685
+
686
+ power = (np.power(displacement, 2.0 / 3.0) * np.power(x_Vk, 3.0)) / (a + (b * Fn))
687
+ outfit_weight = 1.0 * np.power(x_L, 0.8) * np.power(x_B, 0.6) * np.power(x_D, 0.3) * np.power(x_CB, 0.1)
688
+ steel_weight = 0.034 * np.power(x_L, 1.7) * np.power(x_B, 0.7) * np.power(x_D, 0.4) * np.power(x_CB, 0.5)
689
+ machinery_weight = 0.17 * np.power(power, 0.9)
690
+ light_ship_weight = steel_weight + outfit_weight + machinery_weight
691
+
692
+ ship_cost = 1.3 * ((2000.0 * np.power(steel_weight, 0.85)) + (3500.0 * outfit_weight) + (
693
+ 2400.0 * np.power(power, 0.8)))
694
+ capital_costs = 0.2 * ship_cost
695
+
696
+ DWT = displacement - light_ship_weight
697
+
698
+ running_costs = 40000.0 * np.power(DWT, 0.3)
699
+
700
+ round_trip_miles = 5000.0
701
+ sea_days = (round_trip_miles / 24.0) * x_Vk
702
+ handling_rate = 8000.0
703
+
704
+ daily_consumption = ((0.19 * power * 24.0) / 1000.0) + 0.2
705
+ fuel_price = 100.0
706
+ fuel_cost = 1.05 * daily_consumption * sea_days * fuel_price
707
+ port_cost = 6.3 * np.power(DWT, 0.8)
708
+
709
+ fuel_carried = daily_consumption * (sea_days + 5.0)
710
+ miscellaneous_DWT = 2.0 * np.power(DWT, 0.5)
711
+
712
+ cargo_DWT = DWT - fuel_carried - miscellaneous_DWT
713
+ port_days = 2.0 * ((cargo_DWT / handling_rate) + 0.5)
714
+ RTPA = 350.0 / (sea_days + port_days)
715
+
716
+ voyage_costs = (fuel_cost + port_cost) * RTPA
717
+ annual_costs = capital_costs + running_costs + voyage_costs
718
+ annual_cargo = cargo_DWT * RTPA
719
+
720
+ f[0] = annual_costs / annual_cargo
721
+ f[1] = light_ship_weight
722
+ # f_2 is dealt as a minimization problem
723
+ f[2] = -annual_cargo
724
+
725
+ # Reformulated objective functions
726
+ constraintFuncs[0] = (x_L / x_B) - 6.0
727
+ constraintFuncs[1] = -(x_L / x_D) + 15.0
728
+ constraintFuncs[2] = -(x_L / x_T) + 19.0
729
+ constraintFuncs[3] = 0.45 * np.power(DWT, 0.31) - x_T
730
+ constraintFuncs[4] = 0.7 * x_D + 0.7 - x_T
731
+ constraintFuncs[5] = 500000.0 - DWT
732
+ constraintFuncs[6] = DWT - 3000.0
733
+ constraintFuncs[7] = 0.32 - Fn
734
+
735
+ KB = 0.53 * x_T
736
+ BMT = ((0.085 * x_CB - 0.002) * x_B * x_B) / (x_T * x_CB)
737
+ KG = 1.0 + 0.52 * x_D
738
+ constraintFuncs[8] = (KB + BMT - KG) - (0.07 * x_B)
739
+
740
+ constraintFuncs = np.where(constraintFuncs < 0, -constraintFuncs, 0)
741
+ f[3] = constraintFuncs[0] + constraintFuncs[1] + constraintFuncs[2] + constraintFuncs[3] + constraintFuncs[4] + \
742
+ constraintFuncs[5] + constraintFuncs[6] + constraintFuncs[7] + constraintFuncs[8]
743
+
744
+ return f
745
+
746
+
747
+ class RE61():
748
+ def __init__(self):
749
+ self.problem_name = 'RE61'
750
+ self.n_objectives = 6
751
+ self.n_variables = 3
752
+ self.n_constraints = 0
753
+ self.n_original_constraints = 7
754
+
755
+ self.lbound = np.zeros(self.n_variables)
756
+ self.ubound = np.zeros(self.n_variables)
757
+ self.lbound[0] = 0.01
758
+ self.lbound[1] = 0.01
759
+ self.lbound[2] = 0.01
760
+ self.ubound[0] = 0.45
761
+ self.ubound[1] = 0.10
762
+ self.ubound[2] = 0.10
763
+
764
+ def evaluate(self, x):
765
+ f = np.zeros(self.n_objectives)
766
+ g = np.zeros(self.n_original_constraints)
767
+
768
+ # First original objective function
769
+ f[0] = 106780.37 * (x[1] + x[2]) + 61704.67
770
+ # Second original objective function
771
+ f[1] = 3000 * x[0]
772
+ # Third original objective function
773
+ f[2] = 305700 * 2289 * x[1] / np.power(0.06 * 2289, 0.65)
774
+ # Fourth original objective function
775
+ f[3] = 250 * 2289 * np.exp(-39.75 * x[1] + 9.9 * x[2] + 2.74)
776
+ # Fifth original objective function
777
+ f[4] = 25 * (1.39 / (x[0] * x[1]) + 4940 * x[2] - 80)
778
+
779
+ # Constraint functions
780
+ g[0] = 1 - (0.00139 / (x[0] * x[1]) + 4.94 * x[2] - 0.08)
781
+ g[1] = 1 - (0.000306 / (x[0] * x[1]) + 1.082 * x[2] - 0.0986)
782
+ g[2] = 50000 - (12.307 / (x[0] * x[1]) + 49408.24 * x[2] + 4051.02)
783
+ g[3] = 16000 - (2.098 / (x[0] * x[1]) + 8046.33 * x[2] - 696.71)
784
+ g[4] = 10000 - (2.138 / (x[0] * x[1]) + 7883.39 * x[2] - 705.04)
785
+ g[5] = 2000 - (0.417 * x[0] * x[1] + 1721.26 * x[2] - 136.54)
786
+ g[6] = 550 - (0.164 / (x[0] * x[1]) + 631.13 * x[2] - 54.48)
787
+
788
+ g = np.where(g < 0, -g, 0)
789
+ f[5] = g[0] + g[1] + g[2] + g[3] + g[4] + g[5] + g[6]
790
+
791
+ return f
792
+
793
+
794
+ class RE91():
795
+ def __init__(self):
796
+ self.problem_name = 'RE91'
797
+ self.n_objectives = 9
798
+ self.n_variables = 7
799
+ self.n_constraints = 0
800
+ self.n_original_constraints = 0
801
+
802
+ self.lbound = np.zeros(self.n_variables)
803
+ self.ubound = np.zeros(self.n_variables)
804
+ self.lbound[0] = 0.5
805
+ self.lbound[1] = 0.45
806
+ self.lbound[2] = 0.5
807
+ self.lbound[3] = 0.5
808
+ self.lbound[4] = 0.875
809
+ self.lbound[5] = 0.4
810
+ self.lbound[6] = 0.4
811
+ self.ubound[0] = 1.5
812
+ self.ubound[1] = 1.35
813
+ self.ubound[2] = 1.5
814
+ self.ubound[3] = 1.5
815
+ self.ubound[4] = 2.625
816
+ self.ubound[5] = 1.2
817
+ self.ubound[6] = 1.2
818
+
819
+ def evaluate(self, x):
820
+ f = np.zeros(self.n_objectives)
821
+ g = np.zeros(self.n_original_constraints)
822
+
823
+ x1 = x[0]
824
+ x2 = x[1]
825
+ x3 = x[2]
826
+ x4 = x[3]
827
+ x5 = x[4]
828
+ x6 = x[5]
829
+ x7 = x[6]
830
+ # stochastic variables
831
+ x8 = 0.006 * (np.random.normal(0, 1)) + 0.345
832
+ x9 = 0.006 * (np.random.normal(0, 1)) + 0.192
833
+ x10 = 10 * (np.random.normal(0, 1)) + 0.0
834
+ x11 = 10 * (np.random.normal(0, 1)) + 0.0
835
+
836
+ # First function
837
+ f[0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.75 * x5 + 0.00001 * x6 + 2.73 * x7
838
+ # Second function
839
+ f[1] = max(0.0, (1.16 - 0.3717 * x2 * x4 - 0.00931 * x2 * x10 - 0.484 * x3 * x9 + 0.01343 * x6 * x10) / 1.0)
840
+ # Third function
841
+ f[2] = max(0.0, (
842
+ 0.261 - 0.0159 * x1 * x2 - 0.188 * x1 * x8 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.87570001 * x5 * x10 + 0.08045 * x6 * x9 + 0.00139 * x8 * x11 + 0.00001575 * x10 * x11) / 0.32)
843
+ # Fourth function
844
+ f[3] = max(0.0, (
845
+ 0.214 + 0.00817 * x5 - 0.131 * x1 * x8 - 0.0704 * x1 * x9 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.0208 * x3 * x8 + 0.121 * x3 * x9 - 0.00364 * x5 * x6 + 0.0007715 * x5 * x10 - 0.0005354 * x6 * x10 + 0.00121 * x8 * x11 + 0.00184 * x9 * x10 - 0.018 * x2 * x2) / 0.32)
846
+ # Fifth function
847
+ f[4] = max(0.0, (
848
+ 0.74 - 0.61 * x2 - 0.163 * x3 * x8 + 0.001232 * x3 * x10 - 0.166 * x7 * x9 + 0.227 * x2 * x2) / 0.32)
849
+ # Sixth function
850
+ tmp = ((
851
+ 28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 0.0207 * x5 * x10 + 6.63 * x6 * x9 - 7.77 * x7 * x8 + 0.32 * x9 * x10) + (
852
+ 33.86 + 2.95 * x3 + 0.1792 * x10 - 5.057 * x1 * x2 - 11 * x2 * x8 - 0.0215 * x5 * x10 - 9.98 * x7 * x8 + 22 * x8 * x9) + (
853
+ 46.36 - 9.9 * x2 - 12.9 * x1 * x8 + 0.1107 * x3 * x10)) / 3
854
+ f[5] = max(0.0, tmp / 32)
855
+ # Seventh function
856
+ f[6] = max(0.0, (
857
+ 4.72 - 0.5 * x4 - 0.19 * x2 * x3 - 0.0122 * x4 * x10 + 0.009325 * x6 * x10 + 0.000191 * x11 * x11) / 4.0)
858
+ # EighthEighth function
859
+ f[7] = max(0.0, (
860
+ 10.58 - 0.674 * x1 * x2 - 1.95 * x2 * x8 + 0.02054 * x3 * x10 - 0.0198 * x4 * x10 + 0.028 * x6 * x10) / 9.9)
861
+ # Ninth function
862
+ f[8] = max(0.0, (
863
+ 16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6 + 0.0432 * x9 * x10 - 0.0556 * x9 * x11 - 0.000786 * x11 * x11) / 15.7)
864
+
865
+ return f
866
+
867
+
868
+ class CRE21():
869
+ def __init__(self):
870
+ self.problem_name = 'CRE21'
871
+ self.n_objectives = 2
872
+ self.n_variables = 3
873
+ self.n_constraints = 3
874
+
875
+ self.ubound = np.zeros(self.n_variables)
876
+ self.lbound = np.zeros(self.n_variables)
877
+ self.lbound[0] = 0.00001
878
+ self.lbound[1] = 0.00001
879
+ self.lbound[2] = 1.0
880
+ self.ubound[0] = 100.0
881
+ self.ubound[1] = 100.0
882
+ self.ubound[2] = 3.0
883
+
884
+ def evaluate(self, x):
885
+ f = np.zeros(self.n_objectives)
886
+ g = np.zeros(self.n_constraints)
887
+
888
+ x1 = x[0]
889
+ x2 = x[1]
890
+ x3 = x[2]
891
+
892
+ # First original objective function
893
+ f[0] = x1 * np.sqrt(16.0 + (x3 * x3)) + x2 * np.sqrt(1.0 + x3 * x3)
894
+ # Second original objective function
895
+ f[1] = (20.0 * np.sqrt(16.0 + (x3 * x3))) / (x1 * x3)
896
+
897
+ # Constraint functions
898
+ g[0] = 0.1 - f[0]
899
+ g[1] = 100000.0 - f[1]
900
+ g[2] = 100000 - ((80.0 * np.sqrt(1.0 + x3 * x3)) / (x3 * x2))
901
+ g = np.where(g < 0, -g, 0)
902
+
903
+ return f, g
904
+
905
+
906
+ class CRE22():
907
+ def __init__(self):
908
+ self.problem_name = 'CRE22'
909
+ self.n_objectives = 2
910
+ self.n_variables = 4
911
+ self.n_constraints = 4
912
+
913
+ self.ubound = np.zeros(self.n_variables)
914
+ self.lbound = np.zeros(self.n_variables)
915
+ self.lbound[0] = 0.125
916
+ self.lbound[1] = 0.1
917
+ self.lbound[2] = 0.1
918
+ self.lbound[3] = 0.125
919
+ self.ubound[0] = 5.0
920
+ self.ubound[1] = 10.0
921
+ self.ubound[2] = 10.0
922
+ self.ubound[3] = 5.0
923
+
924
+ def evaluate(self, x):
925
+ f = np.zeros(self.n_objectives)
926
+ g = np.zeros(self.n_constraints)
927
+
928
+ x1 = x[0]
929
+ x2 = x[1]
930
+ x3 = x[2]
931
+ x4 = x[3]
932
+
933
+ P = 6000
934
+ L = 14
935
+ E = 30 * 1e6
936
+
937
+ # // deltaMax = 0.25
938
+ G = 12 * 1e6
939
+ tauMax = 13600
940
+ sigmaMax = 30000
941
+
942
+ # First original objective function
943
+ f[0] = (1.10471 * x1 * x1 * x2) + (0.04811 * x3 * x4) * (14.0 + x2)
944
+ # Second original objective function
945
+ f[1] = (4 * P * L * L * L) / (E * x4 * x3 * x3 * x3)
946
+
947
+ # Constraint functions
948
+ M = P * (L + (x2 / 2))
949
+ tmpVar = ((x2 * x2) / 4.0) + np.power((x1 + x3) / 2.0, 2)
950
+ R = np.sqrt(tmpVar)
951
+ tmpVar = ((x2 * x2) / 12.0) + np.power((x1 + x3) / 2.0, 2)
952
+ J = 2 * np.sqrt(2) * x1 * x2 * tmpVar
953
+
954
+ tauDashDash = (M * R) / J
955
+ tauDash = P / (np.sqrt(2) * x1 * x2)
956
+ tmpVar = tauDash * tauDash + ((2 * tauDash * tauDashDash * x2) / (2 * R)) + (tauDashDash * tauDashDash)
957
+ tau = np.sqrt(tmpVar)
958
+ sigma = (6 * P * L) / (x4 * x3 * x3)
959
+ tmpVar = 4.013 * E * np.sqrt((x3 * x3 * x4 * x4 * x4 * x4 * x4 * x4) / 36.0) / (L * L)
960
+ tmpVar2 = (x3 / (2 * L)) * np.sqrt(E / (4 * G))
961
+ PC = tmpVar * (1 - tmpVar2)
962
+
963
+ g[0] = tauMax - tau
964
+ g[1] = sigmaMax - sigma
965
+ g[2] = x4 - x1
966
+ g[3] = PC - P
967
+ g = np.where(g < 0, -g, 0)
968
+
969
+ return f, g
970
+
971
+
972
+ class CRE23():
973
+ def __init__(self):
974
+ self.problem_name = 'CRE23'
975
+ self.n_objectives = 2
976
+ self.n_variables = 4
977
+ self.n_constraints = 4
978
+
979
+ self.ubound = np.zeros(self.n_variables)
980
+ self.lbound = np.zeros(self.n_variables)
981
+ self.lbound[0] = 55
982
+ self.lbound[1] = 75
983
+ self.lbound[2] = 1000
984
+ self.lbound[3] = 11
985
+ self.ubound[0] = 80
986
+ self.ubound[1] = 110
987
+ self.ubound[2] = 3000
988
+ self.ubound[3] = 20
989
+
990
+ def evaluate(self, x):
991
+ f = np.zeros(self.n_objectives)
992
+ g = np.zeros(self.n_constraints)
993
+
994
+ x1 = x[0]
995
+ x2 = x[1]
996
+ x3 = x[2]
997
+ x4 = x[3]
998
+
999
+ # First original objective function
1000
+ f[0] = 4.9 * 1e-5 * (x2 * x2 - x1 * x1) * (x4 - 1.0)
1001
+ # Second original objective function
1002
+ f[1] = ((9.82 * 1e6) * (x2 * x2 - x1 * x1)) / (x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1))
1003
+
1004
+ # Reformulated objective functions
1005
+ g[0] = (x2 - x1) - 20.0
1006
+ g[1] = 0.4 - (x3 / (3.14 * (x2 * x2 - x1 * x1)))
1007
+ g[2] = 1.0 - (2.22 * 1e-3 * x3 * (x2 * x2 * x2 - x1 * x1 * x1)) / np.power((x2 * x2 - x1 * x1), 2)
1008
+ g[3] = (2.66 * 1e-2 * x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1)) / (x2 * x2 - x1 * x1) - 900.0
1009
+ g = np.where(g < 0, -g, 0)
1010
+
1011
+ return f, g
1012
+
1013
+
1014
+ class CRE24():
1015
+ def __init__(self):
1016
+ self.problem_name = 'CRE24'
1017
+ self.n_objectives = 2
1018
+ self.n_variables = 7
1019
+ self.n_constraints = 11
1020
+
1021
+ self.lbound = np.zeros(self.n_variables)
1022
+ self.ubound = np.zeros(self.n_variables)
1023
+
1024
+ self.lbound[0] = 2.6
1025
+ self.lbound[1] = 0.7
1026
+ self.lbound[2] = 17
1027
+ self.lbound[3] = 7.3
1028
+ self.lbound[4] = 7.3
1029
+ self.lbound[5] = 2.9
1030
+ self.lbound[6] = 5.0
1031
+ self.ubound[0] = 3.6
1032
+ self.ubound[1] = 0.8
1033
+ self.ubound[2] = 28
1034
+ self.ubound[3] = 8.3
1035
+ self.ubound[4] = 8.3
1036
+ self.ubound[5] = 3.9
1037
+ self.ubound[6] = 5.5
1038
+
1039
+ def evaluate(self, x):
1040
+ f = np.zeros(self.n_objectives)
1041
+ g = np.zeros(self.n_constraints)
1042
+
1043
+ x1 = x[0]
1044
+ x2 = x[1]
1045
+ x3 = np.round(x[2])
1046
+ x4 = x[3]
1047
+ x5 = x[4]
1048
+ x6 = x[5]
1049
+ x7 = x[6]
1050
+
1051
+ # First original objective function (weight)
1052
+ f[0] = 0.7854 * x1 * (x2 * x2) * (((10.0 * x3 * x3) / 3.0) + (14.933 * x3) - 43.0934) - 1.508 * x1 * (
1053
+ x6 * x6 + x7 * x7) + 7.477 * (x6 * x6 * x6 + x7 * x7 * x7) + 0.7854 * (x4 * x6 * x6 + x5 * x7 * x7)
1054
+
1055
+ # Second original objective function (stress)
1056
+ tmpVar = np.power((745.0 * x4) / (x2 * x3), 2.0) + 1.69 * 1e7
1057
+ f[1] = np.sqrt(tmpVar) / (0.1 * x6 * x6 * x6)
1058
+
1059
+ # Constraint functions
1060
+ g[0] = -(1.0 / (x1 * x2 * x2 * x3)) + 1.0 / 27.0
1061
+ g[1] = -(1.0 / (x1 * x2 * x2 * x3 * x3)) + 1.0 / 397.5
1062
+ g[2] = -(x4 * x4 * x4) / (x2 * x3 * x6 * x6 * x6 * x6) + 1.0 / 1.93
1063
+ g[3] = -(x5 * x5 * x5) / (x2 * x3 * x7 * x7 * x7 * x7) + 1.0 / 1.93
1064
+ g[4] = -(x2 * x3) + 40.0
1065
+ g[5] = -(x1 / x2) + 12.0
1066
+ g[6] = -5.0 + (x1 / x2)
1067
+ g[7] = -1.9 + x4 - 1.5 * x6
1068
+ g[8] = -1.9 + x5 - 1.1 * x7
1069
+ g[9] = -f[1] + 1300.0
1070
+ tmpVar = np.power((745.0 * x5) / (x2 * x3), 2.0) + 1.575 * 1e8
1071
+ g[10] = -np.sqrt(tmpVar) / (0.1 * x7 * x7 * x7) + 1100.0
1072
+ g = np.where(g < 0, -g, 0)
1073
+
1074
+ return f, g
1075
+
1076
+
1077
+ class CRE25():
1078
+ def __init__(self):
1079
+ self.problem_name = 'CRE25'
1080
+ self.n_objectives = 2
1081
+ self.n_variables = 4
1082
+ self.n_constraints = 1
1083
+
1084
+ self.lbound = np.full(self.n_variables, 12)
1085
+ self.ubound = np.full(self.n_variables, 60)
1086
+
1087
+ def evaluate(self, x):
1088
+ f = np.zeros(self.n_objectives)
1089
+ g = np.zeros(self.n_constraints)
1090
+
1091
+ # all the four variables must be inverger values
1092
+ x1 = np.round(x[0])
1093
+ x2 = np.round(x[1])
1094
+ x3 = np.round(x[2])
1095
+ x4 = np.round(x[3])
1096
+
1097
+ # First original objective function
1098
+ f[0] = np.abs(6.931 - ((x3 / x1) * (x4 / x2)))
1099
+ # Second original objective function (the maximum value among the four variables)
1100
+ l = [x1, x2, x3, x4]
1101
+ f[1] = max(l)
1102
+
1103
+ g[0] = 0.5 - (f[0] / 6.931)
1104
+ g = np.where(g < 0, -g, 0)
1105
+
1106
+ return f, g
1107
+
1108
+
1109
+ class CRE31():
1110
+ def __init__(self):
1111
+ self.problem_name = 'CRE31'
1112
+ self.n_objectives = 3
1113
+ self.n_variables = 7
1114
+ self.n_constraints = 10
1115
+
1116
+ self.lbound = np.zeros(self.n_variables)
1117
+ self.ubound = np.zeros(self.n_variables)
1118
+ self.lbound[0] = 0.5
1119
+ self.lbound[1] = 0.45
1120
+ self.lbound[2] = 0.5
1121
+ self.lbound[3] = 0.5
1122
+ self.lbound[4] = 0.875
1123
+ self.lbound[5] = 0.4
1124
+ self.lbound[6] = 0.4
1125
+ self.ubound[0] = 1.5
1126
+ self.ubound[1] = 1.35
1127
+ self.ubound[2] = 1.5
1128
+ self.ubound[3] = 1.5
1129
+ self.ubound[4] = 2.625
1130
+ self.ubound[5] = 1.2
1131
+ self.ubound[6] = 1.2
1132
+
1133
+ def evaluate(self, x):
1134
+ f = np.zeros(self.n_objectives)
1135
+ g = np.zeros(self.n_constraints)
1136
+
1137
+ x1 = x[0]
1138
+ x2 = x[1]
1139
+ x3 = x[2]
1140
+ x4 = x[3]
1141
+ x5 = x[4]
1142
+ x6 = x[5]
1143
+ x7 = x[6]
1144
+
1145
+ # First original objective function
1146
+ f[0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.78 * x5 + 0.00001 * x6 + 2.73 * x7
1147
+ # Second original objective function
1148
+ f[1] = 4.72 - 0.5 * x4 - 0.19 * x2 * x3
1149
+ # Third original objective function
1150
+ Vmbp = 10.58 - 0.674 * x1 * x2 - 0.67275 * x2
1151
+ Vfd = 16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6
1152
+ f[2] = 0.5 * (Vmbp + Vfd)
1153
+
1154
+ # Constraint functions
1155
+ g[0] = 1 - (1.16 - 0.3717 * x2 * x4 - 0.0092928 * x3)
1156
+ g[1] = 0.32 - (0.261 - 0.0159 * x1 * x2 - 0.06486 * x1 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.0154464 * x6)
1157
+ g[2] = 0.32 - (
1158
+ 0.214 + 0.00817 * x5 - 0.045195 * x1 - 0.0135168 * x1 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.007176 * x3 + 0.023232 * x3 - 0.00364 * x5 * x6 - 0.018 * x2 * x2)
1159
+ g[3] = 0.32 - (0.74 - 0.61 * x2 - 0.031296 * x3 - 0.031872 * x7 + 0.227 * x2 * x2)
1160
+ g[4] = 32 - (28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 1.27296 * x6 - 2.68065 * x7)
1161
+ g[5] = 32 - (33.86 + 2.95 * x3 - 5.057 * x1 * x2 - 3.795 * x2 - 3.4431 * x7 + 1.45728)
1162
+ g[6] = 32 - (46.36 - 9.9 * x2 - 4.4505 * x1)
1163
+ g[7] = 4 - f[1]
1164
+ g[8] = 9.9 - Vmbp
1165
+ g[9] = 15.7 - Vfd
1166
+ g = np.where(g < 0, -g, 0)
1167
+
1168
+ return f, g
1169
+
1170
+
1171
+ class CRE32():
1172
+ def __init__(self):
1173
+ self.problem_name = 'CRE32'
1174
+ self.n_objectives = 3
1175
+ self.n_variables = 6
1176
+ self.n_constraints = 9
1177
+
1178
+ self.lbound = np.zeros(self.n_variables)
1179
+ self.ubound = np.zeros(self.n_variables)
1180
+ self.lbound[0] = 150.0
1181
+ self.lbound[1] = 20.0
1182
+ self.lbound[2] = 13.0
1183
+ self.lbound[3] = 10.0
1184
+ self.lbound[4] = 14.0
1185
+ self.lbound[5] = 0.63
1186
+ self.ubound[0] = 274.32
1187
+ self.ubound[1] = 32.31
1188
+ self.ubound[2] = 25.0
1189
+ self.ubound[3] = 11.71
1190
+ self.ubound[4] = 18.0
1191
+ self.ubound[5] = 0.75
1192
+
1193
+ def evaluate(self, x):
1194
+ f = np.zeros(self.n_objectives)
1195
+ # NOT g
1196
+ constraintFuncs = np.zeros(self.n_constraints)
1197
+
1198
+ x_L = x[0]
1199
+ x_B = x[1]
1200
+ x_D = x[2]
1201
+ x_T = x[3]
1202
+ x_Vk = x[4]
1203
+ x_CB = x[5]
1204
+
1205
+ displacement = 1.025 * x_L * x_B * x_T * x_CB
1206
+ V = 0.5144 * x_Vk
1207
+ g = 9.8065
1208
+ Fn = V / np.power(g * x_L, 0.5)
1209
+ a = (4977.06 * x_CB * x_CB) - (8105.61 * x_CB) + 4456.51
1210
+ b = (-10847.2 * x_CB * x_CB) + (12817.0 * x_CB) - 6960.32
1211
+
1212
+ power = (np.power(displacement, 2.0 / 3.0) * np.power(x_Vk, 3.0)) / (a + (b * Fn))
1213
+ outfit_weight = 1.0 * np.power(x_L, 0.8) * np.power(x_B, 0.6) * np.power(x_D, 0.3) * np.power(x_CB, 0.1)
1214
+ steel_weight = 0.034 * np.power(x_L, 1.7) * np.power(x_B, 0.7) * np.power(x_D, 0.4) * np.power(x_CB, 0.5)
1215
+ machinery_weight = 0.17 * np.power(power, 0.9)
1216
+ light_ship_weight = steel_weight + outfit_weight + machinery_weight
1217
+
1218
+ ship_cost = 1.3 * ((2000.0 * np.power(steel_weight, 0.85)) + (3500.0 * outfit_weight) + (
1219
+ 2400.0 * np.power(power, 0.8)))
1220
+ capital_costs = 0.2 * ship_cost
1221
+
1222
+ DWT = displacement - light_ship_weight
1223
+
1224
+ running_costs = 40000.0 * np.power(DWT, 0.3)
1225
+
1226
+ round_trip_miles = 5000.0
1227
+ sea_days = (round_trip_miles / 24.0) * x_Vk
1228
+ handling_rate = 8000.0
1229
+
1230
+ daily_consumption = ((0.19 * power * 24.0) / 1000.0) + 0.2
1231
+ fuel_price = 100.0
1232
+ fuel_cost = 1.05 * daily_consumption * sea_days * fuel_price
1233
+ port_cost = 6.3 * np.power(DWT, 0.8)
1234
+
1235
+ fuel_carried = daily_consumption * (sea_days + 5.0)
1236
+ miscellaneous_DWT = 2.0 * np.power(DWT, 0.5)
1237
+
1238
+ cargo_DWT = DWT - fuel_carried - miscellaneous_DWT
1239
+ port_days = 2.0 * ((cargo_DWT / handling_rate) + 0.5)
1240
+ RTPA = 350.0 / (sea_days + port_days)
1241
+
1242
+ voyage_costs = (fuel_cost + port_cost) * RTPA
1243
+ annual_costs = capital_costs + running_costs + voyage_costs
1244
+ annual_cargo = cargo_DWT * RTPA
1245
+
1246
+ f[0] = annual_costs / annual_cargo
1247
+ f[1] = light_ship_weight
1248
+ # f_2 is dealt as a minimization problem
1249
+ f[2] = -annual_cargo
1250
+
1251
+ # Reformulated objective functions
1252
+ constraintFuncs[0] = (x_L / x_B) - 6.0
1253
+ constraintFuncs[1] = -(x_L / x_D) + 15.0
1254
+ constraintFuncs[2] = -(x_L / x_T) + 19.0
1255
+ constraintFuncs[3] = 0.45 * np.power(DWT, 0.31) - x_T
1256
+ constraintFuncs[4] = 0.7 * x_D + 0.7 - x_T
1257
+ constraintFuncs[5] = 500000.0 - DWT
1258
+ constraintFuncs[6] = DWT - 3000.0
1259
+ constraintFuncs[7] = 0.32 - Fn
1260
+
1261
+ KB = 0.53 * x_T
1262
+ BMT = ((0.085 * x_CB - 0.002) * x_B * x_B) / (x_T * x_CB)
1263
+ KG = 1.0 + 0.52 * x_D
1264
+ constraintFuncs[8] = (KB + BMT - KG) - (0.07 * x_B)
1265
+ constraintFuncs = np.where(constraintFuncs < 0, -constraintFuncs, 0)
1266
+
1267
+ return f, constraintFuncs
1268
+
1269
+
1270
+ class CRE51():
1271
+ def __init__(self):
1272
+ self.problem_name = 'CRE51'
1273
+ self.n_objectives = 5
1274
+ self.n_variables = 3
1275
+ self.n_constraints = 7
1276
+
1277
+ self.lbound = np.zeros(self.n_variables)
1278
+ self.ubound = np.zeros(self.n_variables)
1279
+ self.lbound[0] = 0.01
1280
+ self.lbound[1] = 0.01
1281
+ self.lbound[2] = 0.01
1282
+ self.ubound[0] = 0.45
1283
+ self.ubound[1] = 0.10
1284
+ self.ubound[2] = 0.10
1285
+
1286
+ def evaluate(self, x):
1287
+ f = np.zeros(self.n_objectives)
1288
+ g = np.zeros(self.n_constraints)
1289
+
1290
+ # First original objective function
1291
+ f[0] = 106780.37 * (x[1] + x[2]) + 61704.67
1292
+ # Second original objective function
1293
+ f[1] = 3000 * x[0]
1294
+ # Third original objective function
1295
+ f[2] = 305700 * 2289 * x[1] / np.power(0.06 * 2289, 0.65)
1296
+ # Fourth original objective function
1297
+ f[3] = 250 * 2289 * np.exp(-39.75 * x[1] + 9.9 * x[2] + 2.74)
1298
+ # Fifth original objective function
1299
+ f[4] = 25 * (1.39 / (x[0] * x[1]) + 4940 * x[2] - 80)
1300
+
1301
+ # Constraint functions
1302
+ g[0] = 1 - (0.00139 / (x[0] * x[1]) + 4.94 * x[2] - 0.08)
1303
+ g[1] = 1 - (0.000306 / (x[0] * x[1]) + 1.082 * x[2] - 0.0986)
1304
+ g[2] = 50000 - (12.307 / (x[0] * x[1]) + 49408.24 * x[2] + 4051.02)
1305
+ g[3] = 16000 - (2.098 / (x[0] * x[1]) + 8046.33 * x[2] - 696.71)
1306
+ g[4] = 10000 - (2.138 / (x[0] * x[1]) + 7883.39 * x[2] - 705.04)
1307
+ g[5] = 2000 - (0.417 * x[0] * x[1] + 1721.26 * x[2] - 136.54)
1308
+ g[6] = 550 - (0.164 / (x[0] * x[1]) + 631.13 * x[2] - 54.48)
1309
+ g = np.where(g < 0, -g, 0)
1310
+
1311
+ return f, g
1312
+
1313
+
1314
+
1315
+
1316
+ if __name__ == '__main__':
1317
+ np.random.seed(seed=1)
1318
+ fun = RE21()
1319
+
1320
+ x = fun.lbound + (fun.ubound - fun.lbound) * np.random.rand(fun.n_variables)
1321
+ print("Problem = {}".format(fun.problem_name))
1322
+ print("Number of objectives = {}".format(fun.n_objectives))
1323
+ print("Number of variables = {}".format(fun.n_variables))
1324
+ print("Number of constraints = {}".format(fun.n_constraints))
1325
+ print("Lower bounds = ", fun.lbound)
1326
+ print("Upper bounds = ", fun.ubound)
1327
+ print("x = ", x)
1328
+
1329
+ if 'CRE' in fun.problem_name:
1330
+ f, g = fun.evaluate(x)
1331
+ print("f(x) = {}".format(f))
1332
+ print("g(x) = {}".format(g))
1333
+ else:
1334
+ f = fun.evaluate(x)
1335
+ print("f(x) = {}".format(f))
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/vlmop.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import torch
3
+ import numpy as np
4
+
5
+ from ..mop import mop
6
+
7
+
8
+
9
+ class VLMOP1(mop):
10
+ def __init__( self, n_var=10, n_obj=2, lbound=-np.ones(10), ubound=np.ones(10) ):
11
+ super().__init__(n_var=n_var,
12
+ n_obj=n_obj,
13
+ lbound=lbound,
14
+ ubound=ubound, )
15
+ self.problem_name = 'VLMOP1'
16
+
17
+ def _evaluate_torch(self, x):
18
+ f1 = 1 - torch.exp(-1 * torch.sum((x - 1 / np.sqrt(self.n_var))**2, dim=1))
19
+ f2 = 1 - torch.exp(-1 * torch.sum((x + 1 / np.sqrt(self.n_var))**2, dim=1))
20
+ return torch.stack((f1, f2), dim=1)
21
+
22
+ def _evaluate_numpy(self, x):
23
+ f1 = 1 - np.exp(-1 * np.sum((x - 1 / np.sqrt(self.n_var)) ** 2, axis=1 ) )
24
+ f2 = 1 - np.exp(-1 * np.sum((x + 1 / np.sqrt(self.n_var)) ** 2, axis=1 ) )
25
+ return np.stack((f1, f2), axis=1)
26
+
27
+
28
+ def get_pf(self):
29
+ x = torch.linspace(-1 / np.sqrt(self.n_var), 1 / np.sqrt(self.n_var), 100)
30
+ x = torch.tile(x.unsqueeze(1), (1, self.n_var))
31
+ with torch.no_grad():
32
+ return self._evaluate_torch(x).numpy()
33
+
34
+
35
+
36
+
37
+ class VLMOP2(mop):
38
+ def __init__(self, n_var=10, n_obj=2, lbound=-np.ones(10), ubound=np.ones(10)):
39
+ super().__init__(n_var=n_var,
40
+ n_obj=n_obj,
41
+ lbound=lbound,
42
+ ubound=ubound, )
43
+ self.problem_name = 'VLMOP2'
44
+
45
+ def _evaluate_torch(self, x):
46
+ f1 = torch.norm(x - 1 / np.sqrt(self.n_var), dim=1)**2 / 4
47
+ f2 = torch.norm(x + 1 / np.sqrt(self.n_var), dim=1)**2 / 4
48
+ return torch.stack((f1, f2), dim=1)
49
+
50
+ def _evaluate_numpy(self, x):
51
+ f1 = np.linalg.norm(x - 1 / np.sqrt(self.n_var), axis=1)**2 / 4
52
+ f2 = np.linalg.norm(x + 1 / np.sqrt(self.n_var), axis=1)**2 / 4
53
+ return np.stack((f1, f2), axis=1)
54
+
55
+ def get_pf(self):
56
+ x = torch.linspace(-1 / np.sqrt(self.n_var), 1 / np.sqrt(self.n_var), 100)
57
+ x = torch.tile(x.unsqueeze(1), (1, self.n_var))
58
+ with torch.no_grad():
59
+ return self._evaluate_torch(x).numpy()
60
+
61
+
62
+ if __name__ == '__main__':
63
+ problem = VLMOP2()
64
+ pf = problem.get_pf()
65
+ plt.plot(pf[:, 0], pf[:, 1], '-')
66
+ plt.show()
67
+ print()
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/wfg.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+
3
+
bike_bench_internal/benchmark_models/libmoon/problem/synthetic/zdt.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ZDT test suite for multi-objective problem
3
+
4
+ Reference
5
+ ----------
6
+ Zitzler, E., Deb, K., & Thiele, L. (2000). Comparison of multiobjective
7
+ evolutionary algorithms: Empirical results. Evolutionary computation,
8
+ 8(2), 173-195. DOI: 10.1162/106365600568202
9
+ """
10
+ import numpy as np
11
+ import torch
12
+ from matplotlib import pyplot as plt
13
+ from ..mop import mop
14
+
15
+ class ZDT1(mop):
16
+
17
+ def __init__(self, n_var=30, n_obj=2, lbound=np.zeros(30),
18
+ ubound=np.ones(30)):
19
+ super().__init__(n_var=n_var,
20
+ n_obj=n_obj,
21
+ lbound=lbound,
22
+ ubound=ubound, )
23
+ self.problem_name = 'ZDT1'
24
+
25
+
26
+ def _evaluate_torch(self, x: torch.Tensor):
27
+ f1 = x[:, 0]
28
+ n = len(x)
29
+ g = 1 + 9/(n-1) * torch.sum(x[:, 1:], dim=1)
30
+ h = 1 - torch.sqrt(f1 / g)
31
+ f2 = g * h
32
+ return torch.stack((f1, f2), dim=1)
33
+
34
+ def _evaluate_numpy(self, x: np.ndarray):
35
+ n = len(x)
36
+ f1 = x[:, 0]
37
+ g = 1 + 9 / (n-1) * np.sum(x[:, 1:], axis=1)
38
+ f2 = 1 - np.sqrt(f1 / g)
39
+ return np.stack((f1, f2), axis=1)
40
+
41
+ def _get_pf(self, n_points: int = 100):
42
+ f1 = np.linspace(0, 1, n_points)
43
+ f2 = 1 - np.sqrt(f1)
44
+ return np.stack((f1, f2), axis=1)
45
+
46
+
47
+ class ZDT2(mop):
48
+
49
+ def __init__(self, n_var=30, n_obj=2, lbound=np.zeros(30), ubound=np.ones(30)):
50
+ super().__init__(n_var=n_var,
51
+ n_obj=n_obj,
52
+ lbound=lbound,
53
+ ubound=ubound)
54
+ self.problem_name = 'ZDT2'
55
+
56
+ def _evaluate_torch(self, x: torch.Tensor):
57
+ f1 = x[:, 0]
58
+ g = 1 + 9 * torch.mean(x[:, 1:], dim=1)
59
+ f2 = g * (1 - (f1 / g) ** 2)
60
+ return torch.stack((f1, f2), dim=1)
61
+
62
+ def _evaluate_numpy(self, x: np.ndarray):
63
+ f1 = x[:, 0]
64
+ g = 1 + 9 * np.mean(x[:, 1:], axis=1)
65
+ f2 = g * (1 - (f1 / g) ** 2)
66
+ return np.stack((f1, f2), axis=1)
67
+
68
+ def _get_pf(self, n_points: int = 100):
69
+ f1 = np.linspace(0, 1, n_points)
70
+ f2 = 1 - f1 ** 2
71
+ return np.stack((f1, f2), axis=1)
72
+
73
+
74
+ class ZDT3(mop):
75
+
76
+ def __init__(self, n_var=30, n_obj=2, lbound=np.zeros(30), ubound=np.ones(30)):
77
+ super().__init__(n_var=n_var,
78
+ n_obj=n_obj,
79
+ lbound=lbound,
80
+ ubound=ubound, )
81
+ self.problem_name = 'ZDT3'
82
+
83
+
84
+ def _evaluate_torch(self, x: torch.Tensor):
85
+ f1 = x[:, 0]
86
+ g = 1 + 9 * torch.mean(x[:, 1:], dim=1)
87
+ f2 = g * (1 - torch.sqrt(f1 / g) - f1 / g * torch.sin(10 * np.pi * f1))
88
+ return torch.stack((f1, f2), dim=1)
89
+
90
+ def _evaluate_numpy(self, x: np.ndarray):
91
+ f1 = x[:, 0]
92
+ g = 1 + 9 * np.mean(x[:, 1:], axis=1)
93
+ f2 = g * (1 - np.sqrt(f1 / g) - f1 / g * np.sin(10 * np.pi * f1))
94
+ return np.stack((f1, f2), axis=1)
95
+
96
+ def _get_pf(self, n_points: int = 100):
97
+ f1 = np.hstack([np.linspace(0, 0.0830, int(n_points / 5)),
98
+ np.linspace(0.1822, 0.2578, int(n_points / 5)),
99
+ np.linspace(0.4093, 0.4539, int(n_points / 5)),
100
+ np.linspace(0.6183, 0.6525, int(n_points / 5)),
101
+ np.linspace(0.8233, 0.8518, n_points - 4 * int(n_points / 5))])
102
+ f2 = 1 - np.sqrt(f1) - f1 * np.sin(10 * np.pi * f1)
103
+ return np.stack((f1, f2), axis=1)
104
+
105
+
106
+ class ZDT4(mop):
107
+
108
+ def __init__(self, n_var=10, n_obj=2, lbound=-5*np.ones(10), ubound=5*np.ones(10)):
109
+ lbound[0] = 0
110
+ ubound[0] = 1
111
+
112
+ super().__init__(n_var=n_var,
113
+ n_obj=n_obj,
114
+ lbound=lbound,
115
+ ubound=ubound, )
116
+ self.problem_name = 'ZDT4'
117
+
118
+
119
+ def _evaluate_torch(self, x: torch.Tensor):
120
+ f1 = x[:, 0]
121
+ g = 1 + 10 * (self.n_var - 1) + torch.sum(x[:, 1:] ** 2 - 10 * torch.cos(4 * np.pi * x[:, 1:]), dim=1)
122
+ f2 = g * (1 - torch.sqrt(f1 / g))
123
+ return torch.stack((f1, f2), dim=1)
124
+
125
+ def _evaluate_numpy(self, x: np.ndarray):
126
+ f1 = x[:, 0]
127
+ g = 1 + 10 * (self.n_var - 1) + np.sum(x[:, 1:] ** 2 - 10 * np.cos(4 * np.pi * x[:, 1:]), axis=1)
128
+ f2 = g * (1 - np.sqrt(f1 / g))
129
+ return np.stack((f1, f2), axis=1)
130
+
131
+ def _get_pf(self, n_points: int = 100):
132
+ f1 = np.linspace(0, 1, n_points)
133
+ f2 = 1 - np.sqrt(f1)
134
+ return np.stack((f1, f2), axis=1)
135
+
136
+
137
+ class ZDT6(mop):
138
+
139
+ def __init__(self, n_var=30, n_obj=2, lbound=np.zeros(30), ubound=np.ones(30) ) -> None:
140
+ super().__init__(n_var=n_var,
141
+ n_obj=n_obj,
142
+ lbound=lbound,
143
+ ubound=ubound, )
144
+ self.problem_name = 'ZDT6'
145
+
146
+
147
+ def _evaluate_torch(self, x: torch.Tensor):
148
+ f1 = 1 - torch.exp(-4 * x[:, 0]) * (torch.sin(6 * np.pi * x[:, 0])) ** 6
149
+ g = 1 + 9 * (torch.sum(x[:, 1:], dim=1) / (self.n_var - 1)) ** 0.25
150
+ f2 = g * (1 - (f1 / g) ** 2)
151
+ return torch.stack((f1, f2), dim=1)
152
+
153
+ def _evaluate_numpy(self, x: np.ndarray):
154
+ f1 = 1 - np.exp(-4 * x[:, 0]) * (np.sin(6 * np.pi * x[:, 0])) ** 6
155
+ g = 1 + 9 * (np.sum(x[:, 1:], axis=1) / (self.n_var - 1)) ** 0.25
156
+ f2 = g * (1 - (f1 / g) ** 2)
157
+ return np.stack((f1, f2), axis=1)
158
+
159
+ def _get_pf(self, n_points: int = 100):
160
+ f1 = np.linspace(0, 1, n_points)
161
+ f2 = 1 - f1 ** 2
162
+ return np.stack((f1, f2), axis=1)
163
+
164
+
165
+
166
+ if __name__ == '__main__':
167
+ problem = ZDT3()
168
+
169
+ res = problem.evaluate(torch.rand(10, problem.get_number_variable))
170
+ pf = problem.get_pf()
171
+ x = np.random.rand(10, problem.get_number_variable)
172
+ y = problem.evaluate(x)
173
+
174
+ plt.scatter(pf[:, 0], pf[:, 1], c='none', edgecolors='r')
175
+ plt.show()
bike_bench_internal/benchmark_models/libmoon/solver/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # from gradient.epo_solver import EPOSolver
bike_bench_internal/benchmark_models/libmoon/solver/gradient/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_solver import GradAggSolver
2
+ from .mgda_solver import MGDASolver
3
+ from .epo_solver import EPOSolver
4
+ from .moosvgd import MOOSVGDSolver
5
+ from .gradhv import GradHVSolver
6
+ from .pmtl import PMTLSolver
7
+
8
+
9
+
10
+ from .core_solver import CoreAgg, CoreMGDA, CoreEPO, CoreMOOSVGD, CoreHVGrad
11
+
12
+ def get_core_solver(args, pref=None):
13
+ if args.mtd == 'agg':
14
+ return CoreAgg(pref=pref, agg_mtd=args.agg_mtd)
15
+ elif args.mtd == 'mgda':
16
+ return CoreMGDA()
17
+ elif args.mtd == 'epo':
18
+ return CoreEPO(pref=pref)
19
+ elif args.mtd == 'moosvgd':
20
+ return CoreMOOSVGD(args=args)
21
+ elif args.mtd == 'hvgrad':
22
+ return CoreHVGrad(args=args)
23
+ else:
24
+ assert False, 'not implemented'
bike_bench_internal/benchmark_models/libmoon/solver/gradient/base_solver.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array
2
+ from torch.autograd import Variable
3
+ from torch.optim import SGD
4
+ from torch import Tensor
5
+ from ...util_global.constant import scalar_dict, solution_eps, get_hv_ref_dict
6
+ import torch
7
+ from tqdm import tqdm
8
+ from pymoo.indicators.hv import HV
9
+
10
+
11
+ class GradBaseSolver:
12
+ def __init__(self, step_size, max_iter, tol):
13
+ self.step_size = step_size
14
+ self.max_iter = max_iter
15
+ self.tol = tol
16
+
17
+ def solve(self, problem, x, prefs, args):
18
+ '''
19
+ :param problem:
20
+ :param x:
21
+ :param agg:
22
+ :return:
23
+ is a dict with keys: x, y
24
+ '''
25
+
26
+ # The abstract class cannot be implemented directly.
27
+ raise NotImplementedError
28
+
29
+
30
+
31
+ class GradAggSolver(GradBaseSolver):
32
+ def __init__(self, step_size, max_iter, tol, device='cpu'):
33
+ self.device = device
34
+ super().__init__(step_size, max_iter, tol)
35
+
36
+ def solve(self, problem, x, prefs, args, ref_point):
37
+ x = torch.tensor(x, dtype=torch.float32, requires_grad=True, device=self.device)
38
+
39
+ # ref_point = array([2.0, 2.0])
40
+ # ind = HV(ref_point = get_hv_ref_dict(args.problem_name))
41
+ # ind = HV(ref_point = array([1.0, 1.0]))
42
+
43
+ # ind = HV(ref_point = ref_point)
44
+
45
+
46
+
47
+
48
+ # hv_arr = []
49
+ y_arr = []
50
+
51
+ if not isinstance(prefs, torch.Tensor):
52
+ prefs = torch.tensor(prefs, dtype=torch.float32, device=self.device)
53
+ else:
54
+ prefs = prefs.to(dtype=torch.float32, device=self.device)
55
+
56
+ # prefs = Tensor(prefs)
57
+ optimizer = SGD([x], lr=self.step_size)
58
+ agg_func = scalar_dict[args.agg]
59
+ res = {}
60
+ for i in tqdm(range(self.max_iter)):
61
+ y = problem.evaluate(x)
62
+
63
+ # hv_arr.append(ind.do(y.detach().cpu().numpy()))
64
+
65
+ agg_val = agg_func(y, prefs)
66
+ optimizer.zero_grad()
67
+ torch.sum(agg_val).backward()
68
+ optimizer.step()
69
+
70
+ y_arr.append(y.detach().cpu().numpy())
71
+
72
+ if 'lbound' in dir(problem):
73
+ x.data = torch.clamp(x.data,
74
+ torch.tensor(problem.lbound, device=x.device, dtype=torch.float32) + solution_eps,
75
+ torch.tensor(problem.ubound, device=x.device, dtype=torch.float32) - solution_eps)
76
+
77
+
78
+ res['x'] = x.detach().cpu().numpy()
79
+ res['y'] = y.detach().cpu().numpy()
80
+ # res['hv_arr'] = hv_arr
81
+ res['y_arr'] = y_arr
82
+ return res
bike_bench_internal/benchmark_models/libmoon/solver/gradient/core_solver.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .mgda_core import solve_mgda
3
+ from .epo_solver import EPO_LP
4
+ import torch
5
+ from .gradhv import HvMaximization
6
+ from ...util_global.constant import get_hv_ref_dict
7
+
8
+
9
+
10
+ class CoreHVGrad:
11
+ def __init__(self, args):
12
+ self.args = args
13
+ self.hv_solver = HvMaximization(args.n_prob, args.n_obj, get_hv_ref_dict(args.problem))
14
+
15
+ def get_alpha(self, losses):
16
+ alpha = self.hv_solver.compute_weights(losses).T
17
+ return alpha
18
+
19
+
20
+
21
+ class CoreMOOSVGD:
22
+ def __init__(self):
23
+ pass
24
+
25
+ def get_alpha(self):
26
+ return 0
27
+
28
+
29
+ class CoreMGDA:
30
+ def __init__(self):
31
+ pass
32
+
33
+ def get_alpha(self, G, losses=None, pref=None):
34
+ _, alpha = solve_mgda(G, return_coeff=True)
35
+ return alpha
36
+
37
+
38
+ class CoreGrad:
39
+ def __init__(self):
40
+ pass
41
+
42
+
43
+
44
+ class CoreEPO(CoreGrad):
45
+ def __init__(self, pref):
46
+ self.pref = pref
47
+ self.epo_lp = EPO_LP(m=len(pref), n=1, r=1/np.array(pref))
48
+
49
+
50
+ def get_alpha(self, G, losses):
51
+ if type(G) == torch.Tensor:
52
+ G = G.detach().cpu().numpy().copy()
53
+ GG = G @ G.T
54
+
55
+ alpha = self.epo_lp.get_alpha(losses, G=GG, C=True)
56
+ return alpha
57
+
58
+
59
+
60
+
61
+ class CoreAgg(CoreGrad):
62
+ def __init__(self, pref, agg_mtd='ls'):
63
+ self.agg_mtd = agg_mtd
64
+ self.pref = pref
65
+
66
+ def get_alpha(self, G, losses):
67
+ if self.agg_mtd == 'ls':
68
+ alpha = self.pref
69
+ elif self.agg_mtd == 'mtche':
70
+ idx = np.argmax(losses)
71
+ alpha = np.zeros_like(self.pref )
72
+ alpha[idx] = 1.0
73
+ else:
74
+ assert False
75
+ return alpha
76
+
77
+
78
+
79
+ if __name__ == '__main__':
80
+ agg = CoreAgg( pref=np.array([1, 0]) )
81
+
82
+
bike_bench_internal/benchmark_models/libmoon/solver/gradient/epo_solver.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cvxpy as cp
3
+ import cvxopt
4
+
5
+ from .base_solver import GradBaseSolver
6
+ from torch.autograd import Variable
7
+ from tqdm import tqdm
8
+ import torch
9
+ from torch.optim import SGD
10
+ from numpy import array
11
+ from pymoo.indicators.hv import HV
12
+ import warnings
13
+ warnings.filterwarnings("ignore")
14
+
15
+ from ...util_global.constant import solution_eps
16
+
17
+
18
+
19
+ class EPO_LP(object):
20
+ def __init__(self, m, n, r, eps=1e-4):
21
+ cvxopt.glpk.options["msg_lev"] = "GLP_MSG_OFF"
22
+ self.m = m
23
+ self.n = n
24
+ self.r = r
25
+ self.eps = eps
26
+ self.last_move = None
27
+ self.a = cp.Parameter(m) # Adjustments
28
+ self.C = cp.Parameter((m, m)) # C: Gradient inner products, G^T G
29
+ self.Ca = cp.Parameter(m) # d_bal^TG
30
+ self.rhs = cp.Parameter(m) # RHS of constraints for balancing
31
+
32
+ self.alpha = cp.Variable(m) # Variable to optimize
33
+
34
+ obj_bal = cp.Maximize(self.alpha @ self.Ca) # objective for balance
35
+ constraints_bal = [self.alpha >= 0, cp.sum(self.alpha) == 1, # Simplex
36
+ self.C @ self.alpha >= self.rhs]
37
+ self.prob_bal = cp.Problem(obj_bal, constraints_bal) # LP balance
38
+
39
+ obj_dom = cp.Maximize(cp.sum(self.alpha @ self.C)) # obj for descent
40
+ constraints_res = [self.alpha >= 0, cp.sum(self.alpha) == 1, # Restrict
41
+ self.alpha @ self.Ca >= -cp.neg(cp.max(self.Ca)),
42
+ self.C @ self.alpha >= 0]
43
+ constraints_rel = [self.alpha >= 0, cp.sum(self.alpha) == 1, # Relaxed
44
+ self.C @ self.alpha >= 0]
45
+ self.prob_dom = cp.Problem(obj_dom, constraints_res) # LP dominance
46
+ self.prob_rel = cp.Problem(obj_dom, constraints_rel) # LP dominance
47
+
48
+ self.gamma = 0 # Stores the latest Optimum value of the LP problem
49
+ self.mu_rl = 0 # Stores the latest non-uniformity
50
+
51
+
52
+ def get_alpha(self, l, G, r=None, C=False, relax=False):
53
+
54
+ r = self.r if r is None else r
55
+ assert len(l) == len(G) == len(r) == self.m, "length != m"
56
+
57
+ rl, self.mu_rl, self.a.value = adjustments(l, r)
58
+
59
+ self.C.value = G if C else G @ G.T
60
+ self.Ca.value = self.C.value @ self.a.value
61
+
62
+
63
+ if self.mu_rl > self.eps:
64
+ J = self.Ca.value > 0
65
+ if len(np.where(J)[0]) > 0:
66
+ J_star_idx = np.where(rl == np.max(rl))[0]
67
+ self.rhs.value = self.Ca.value.copy()
68
+ self.rhs.value[J] = -np.inf # Not efficient; but works.
69
+ self.rhs.value[J_star_idx] = 0
70
+ else:
71
+ self.rhs.value = np.zeros_like(self.Ca.value)
72
+ self.gamma = self.prob_bal.solve(solver=cp.GLPK, verbose=False)
73
+ # self.gamma = self.prob_bal.solve(verbose=False)
74
+ self.last_move = "bal"
75
+ else:
76
+ if relax:
77
+ self.gamma = self.prob_rel.solve(solver=cp.GLPK, verbose=False)
78
+ else:
79
+ self.gamma = self.prob_dom.solve(solver=cp.GLPK, verbose=False)
80
+ # self.gamma = self.prob_dom.solve(verbose=False)
81
+ self.last_move = "dom"
82
+
83
+ return self.alpha.value
84
+
85
+
86
+ def mu(rl, normed=False):
87
+ if len(np.where(rl < 0)[0]):
88
+ raise ValueError(f"rl<0 \n rl={rl}")
89
+ return None
90
+ m = len(rl)
91
+ l_hat = rl if normed else rl / rl.sum()
92
+ eps = np.finfo(rl.dtype).eps
93
+ l_hat = l_hat[l_hat > eps]
94
+ return np.sum(l_hat * np.log(l_hat * m))
95
+
96
+
97
+ def adjustments(l, r=1):
98
+ m = len(l)
99
+ rl = r * l
100
+ l_hat = rl / rl.sum()
101
+ mu_rl = mu(l_hat, normed=True)
102
+
103
+ eps = 1e-3 # clipping by eps is to avoid log(0), zxy Dec. 5.
104
+ a = r * ( np.log( np.clip(l_hat * m, eps, np.inf) ) - mu_rl)
105
+ return rl, mu_rl, a
106
+
107
+
108
+
109
+ def solve_epo(grad_arr, losses, pref, epo_lp):
110
+
111
+ '''
112
+ input: grad_arr: (m,n).
113
+ losses : (m,).
114
+ pref: (m,) inv.
115
+
116
+ return : gw: (n,).
117
+ '''
118
+ if type(pref) == torch.Tensor:
119
+ pref = pref.numpy()
120
+
121
+ pref = np.array(pref)
122
+ G = grad_arr.detach().clone().numpy()
123
+
124
+ if type(losses) == torch.Tensor:
125
+ losses_np = losses.detach().clone().numpy().squeeze()
126
+ else:
127
+ losses_np = losses
128
+
129
+ m = G.shape[0]
130
+ n = G.shape[1]
131
+ GG = G @ G.T
132
+
133
+ # epo_lp = EPO_LP(m=m, n=n, r=np.array(pref))
134
+
135
+ alpha = epo_lp.get_alpha(losses_np, G=GG, C=True)
136
+ if alpha is None: # A patch for the issue in cvxpy
137
+ alpha = pref / pref.sum()
138
+ gw = alpha @ G
139
+
140
+ # return torch.Tensor(gw).unsqueeze(0)
141
+ return torch.Tensor(gw), alpha
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+ class EPOSolver(GradBaseSolver):
150
+ def __init__(self, step_size, max_iter, tol):
151
+ super().__init__(step_size, max_iter, tol)
152
+
153
+
154
+ def solve(self, problem, x, prefs, args, ref_point):
155
+ x = Variable(x, requires_grad=True)
156
+
157
+ epo_arr = [ EPO_LP(m=args.n_obj, n=args.n_var, r=np.array( 1/pref )) for pref in prefs ]
158
+ optimizer = SGD([x], lr=self.step_size)
159
+
160
+ ind = HV(ref_point=ref_point)
161
+ hv_arr = []
162
+ y_arr = []
163
+
164
+
165
+ for i in tqdm( range(self.max_iter) ):
166
+
167
+ # optimizer.zero_grad()
168
+ y = problem.evaluate(x)
169
+ y_arr.append(y.detach().numpy() )
170
+
171
+ alpha_arr = [0] * args.n_prob
172
+ for prob_idx in range( args.n_prob ):
173
+ grad_arr = [0] * args.n_obj
174
+ for obj_idx in range(args.n_obj):
175
+ y[prob_idx][obj_idx].backward(retain_graph=True)
176
+ grad_arr[obj_idx] = x.grad[prob_idx].clone()
177
+ x.grad.zero_()
178
+
179
+ grad_arr = torch.stack(grad_arr)
180
+ _, alpha = solve_epo(grad_arr, losses=y[prob_idx], pref=prefs[prob_idx], epo_lp=epo_arr[prob_idx])
181
+ alpha_arr[prob_idx] = alpha
182
+
183
+ optimizer.zero_grad()
184
+ alpha_arr = torch.Tensor( np.array(alpha_arr) )
185
+ torch.sum(alpha_arr * y).backward()
186
+ optimizer.step()
187
+
188
+ if 'lbound' in dir(problem):
189
+ x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps, torch.Tensor(problem.ubound)-solution_eps )
190
+
191
+
192
+ res = {}
193
+ res['x'] = x.detach().numpy()
194
+ res['y'] = y.detach().numpy()
195
+ res['hv_arr'] = [0]
196
+ res['y_arr'] = y_arr
197
+
198
+ return res
bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_evaluation.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The function fastNonDominatedSort is based on the sorting algorithm described by
3
+ Deb, Kalyanmoy, et al.
4
+ "A fast and elitist multiobjective genetic algorithm: NSGA-II."
5
+ IEEE transactions on evolutionary computation 6.2 (2002): 182-197.
6
+ """
7
+
8
+
9
+ import numpy as np
10
+ import pdb
11
+ from .functions_hv_python3 import HyperVolume
12
+
13
+
14
+ def determine_non_dom_mo_sol(mo_obj_val):
15
+ # get set of non-dominated solutions, returns indices of non-dominated and booleans of dominated mo_sol
16
+ n_mo_sol = mo_obj_val.shape[1]
17
+ domination_rank = fastNonDominatedSort(mo_obj_val)
18
+ non_dom_indices = np.where(domination_rank == 0)
19
+ non_dom_indices = non_dom_indices[0] # np.where returns a tuple, so we need to get the array inside the tuple
20
+ # non_dom_mo_sol = mo_sol[:,non_dom_indices]
21
+ # non_dom_mo_obj_val = mo_obj_val[:,non_dom_indices]
22
+ mo_sol_is_non_dominated = np.zeros(n_mo_sol,dtype = bool)
23
+ mo_sol_is_non_dominated[non_dom_indices] = True
24
+ mo_sol_is_dominated = np.bitwise_not(mo_sol_is_non_dominated)
25
+ return(non_dom_indices,mo_sol_is_dominated)
26
+
27
+ def fastNonDominatedSort(objVal):
28
+ # Based on Deb et al. (2002) NSGA-II
29
+ N_OBJECTIVES = objVal.shape[0]
30
+ N_SOLUTIONS = objVal.shape[1]
31
+
32
+ rankIndArray = - 999 * np.ones(N_SOLUTIONS, dtype = int) # -999 indicates unassigned rank
33
+ solIndices = np.arange(0,N_SOLUTIONS) # array of 0 1 2 ... N_SOLUTIONS
34
+ ## compute the entire domination matrix
35
+ # dominationMatrix: (i,j) is True if solution i dominates solution j
36
+ dominationMatrix = np.zeros((N_SOLUTIONS,N_SOLUTIONS), dtype = bool)
37
+ for p in solIndices:
38
+ objValA = objVal[:,p][:,None] # add [:,None] to preserve dimensions
39
+ # objValArray = np.delete(objVal, obj = p axis = 1) # dont delete solution p because it messes up indices
40
+ dominates = checkDomination(objValA,objVal)
41
+ dominationMatrix[p,:] = dominates
42
+
43
+ # count the number of times a solution is dominated
44
+ dominationCounter = np.sum(dominationMatrix, axis = 0)
45
+
46
+ ## find rank 0 solutions to initialize loop
47
+ isRankZero = (dominationCounter == 0) # column and row binary indices of solutions that are rank 0
48
+
49
+ rankZeroRowInd = solIndices[isRankZero]
50
+ # mark rank 0's solutions by -99 so that they are not considered as members of next rank
51
+ dominationCounter[rankZeroRowInd] = -99
52
+ # initialize rank counter at 0
53
+ rankCounter = 0
54
+ # assign solutions in rank 0 rankIndArray = 0
55
+ rankIndArray[isRankZero] = rankCounter
56
+
57
+ isInCurRank = isRankZero
58
+ # while the current rank is not empty
59
+ while not (np.sum(isInCurRank) == 0):
60
+ curRankRowInd = solIndices[isInCurRank] # column and row numbers of solutions that are in current rank
61
+ # for each solution in current rank
62
+ for p in curRankRowInd:
63
+ # decrease domination counter of each solution dominated by solution p which is in the current rank
64
+ dominationCounter[dominationMatrix[p,:]] -= 1 #dominationMatrix[p,:] contains indices of the solutions dominated by p
65
+ # all solutions that now have dominationCounter == 0, are in the next rank
66
+ isInNextRank = (dominationCounter == 0)
67
+ rankIndArray[isInNextRank] = rankCounter + 1
68
+ # mark next rank's solutions by -99 so that they are not considered as members of future ranks
69
+ dominationCounter[isInNextRank] = -99
70
+ # increase front counter
71
+ rankCounter += 1
72
+ # check which solutions are in current rank (next rank became current rank)
73
+ isInCurRank = (rankIndArray == rankCounter)
74
+ if not np.all(isInNextRank == isInCurRank): # DEBUGGING, if it works fine, replace above assignment
75
+ pdb.set_trace()
76
+ return(rankIndArray)
77
+
78
+ def checkDomination(objValA,objValArray):
79
+ dominates = ( np.any(objValA < objValArray, axis = 0) & np.all(objValA <= objValArray , axis = 0) )
80
+ return(dominates)
81
+
82
+ def compute_hv_in_higher_dimensions(mo_obj_val,ref_point):
83
+ n_mo_obj = mo_obj_val.shape[0]
84
+ n_mo_sol = mo_obj_val.shape[1]
85
+ assert len(ref_point) == n_mo_obj
86
+ # initialize hv computation instance
87
+ hv_computation_instance = HyperVolume(tuple(ref_point))
88
+ # turn numpy array to list of tuples
89
+ list_of_mo_obj_val = list()
90
+ for i_mo_sol in range(n_mo_sol):
91
+ list_of_mo_obj_val.append(tuple(mo_obj_val[:,i_mo_sol]))
92
+
93
+ hv = float(hv_computation_instance.compute(list_of_mo_obj_val))
94
+ return(hv)
bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_hv_grad_3d.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ The function grad_multi_sweep and its subfunctions in this file are based on the algorithm described in:
3
+ Emmerich, Michael, and André Deutz.
4
+ "Time complexity and zeros of the hypervolume indicator gradient field."
5
+ EVOLVE-a bridge between probability, set oriented numerics,
6
+ and evolutionary computation III. Springer, Heidelberg, 2014. 169-193.
7
+ '''
8
+ import numpy as np
9
+ import copy
10
+ from .functions_hv_python3 import HyperVolume
11
+ from .functions_evaluation import determine_non_dom_mo_sol
12
+
13
+ def determine_mo_sol_in_exterior(mo_obj_val,ref_point):
14
+ # select only mo-solutions that are in the exterior
15
+ ref_point_temp = ref_point[:,None] # add axis so that comparison works
16
+ exterior_booleans = np.any(mo_obj_val > ref_point_temp, axis = 0)
17
+ exterior_indices = np.where(exterior_booleans == True)
18
+ return(exterior_indices,exterior_booleans)
19
+
20
+ def determine_mo_sol_in_interior(mo_obj_val,ref_point):
21
+ # select only mo-solutions that are in the interior
22
+ ref_point_temp = ref_point[:,None] # add axis so that comparison works
23
+ interior_booleans = np.all(mo_obj_val < ref_point_temp, axis = 0)
24
+ interior_indices = np.where(interior_booleans == True)
25
+ return(interior_indices,interior_booleans)
26
+
27
+ def determine_mo_sol_on_ref_boundary(mo_obj_val,ref_point):
28
+ # select only mo-solutions that are on the reference boundary
29
+ ref_point_temp = ref_point[:,None] # add axis so that comparison works
30
+ boundary_booleans = np.logical_and( np.all(mo_obj_val <= ref_point_temp, axis = 0) , np.any(mo_obj_val == ref_point_temp, axis = 0) )
31
+ boundary_indices = np.where(boundary_booleans == True)
32
+ return(boundary_indices,boundary_booleans)
33
+
34
+ def compute_domination_properties(mo_obj_val):
35
+ '''
36
+ compute properties needed for HV gradient computation
37
+ '''
38
+ n_mo_sol = mo_obj_val.shape[1]
39
+ # mo_sol i stricly dominates j (all entries in i < j ) if strong_domination_matrix[i,j] = True
40
+ strong_domination_matrix = np.zeros((n_mo_sol,n_mo_sol), dtype = np.bool_)
41
+ for i in range(0,n_mo_sol):
42
+ cur_col = mo_obj_val[:,i][:,None]
43
+ strong_domination_matrix[i,:] = np.all(cur_col < mo_obj_val,axis = 0)
44
+ # mo_sol i weakly dominates j (all entries in i <= j and at least one entry i < j ) if weak_domination_matrix[i,j] = True
45
+ weak_domination_matrix = np.zeros((n_mo_sol,n_mo_sol), dtype = np.bool_)
46
+ for i in range(0,n_mo_sol):
47
+ cur_col = mo_obj_val[:,i][:,None]
48
+ weak_domination_matrix[i,:] = np.logical_and( np.all(cur_col <= mo_obj_val,axis = 0) , np.any(cur_col < mo_obj_val,axis = 0) )
49
+ # a mo_sol i is strongly dominated if any other solutions j strongly dominates it (any True in the column strong_domination_matrix[:,i] )
50
+ is_strongly_dominated = np.any(strong_domination_matrix, axis = 0)
51
+ # a mo_sol i is weakly dominated if any other solutions j weakly dominates it (any True in the column weak_domination_matrix[:,i] )
52
+ is_weakly_dominated = np.any(weak_domination_matrix, axis = 0)
53
+ # weakly but not strongly dominated
54
+ is_weakly_but_not_strongly_dominated = np.logical_and( np.logical_not(is_strongly_dominated) , np.any(weak_domination_matrix, axis = 0) )
55
+
56
+ # no other solution weakly dominates it
57
+ is_not_weakly_dominated = np.logical_not(is_weakly_dominated)
58
+
59
+ # mo_sol i shares coordinate with j if at least 1 entry i = j
60
+ coordinate_sharing_matrix = np.zeros((n_mo_sol,n_mo_sol), dtype = np.bool_)
61
+ for i in range(0,n_mo_sol):
62
+ cur_col = mo_obj_val[:,i][:,None]
63
+ coordinate_sharing_matrix[i,:] = np.any(cur_col == mo_obj_val,axis = 0)
64
+
65
+ ## is not weakly dominated but share some coordinate with another not weakly dominated mo_sol
66
+ # set diagonal entries of coordinate_sharing_matrix to zero to make check work (we do not care of shared coordinates with itself)
67
+ subset_of_coordinate_sharing_matrix_with_diag_zero = copy.copy(coordinate_sharing_matrix)
68
+ np.fill_diagonal(subset_of_coordinate_sharing_matrix_with_diag_zero,False) # inplace operation
69
+ # select subset of columns so that the comparison is for all mo-solutions but only with respect to all non-weakly dominated solutions
70
+ subset_of_coordinate_sharing_matrix_with_diag_zero = subset_of_coordinate_sharing_matrix_with_diag_zero[:,is_not_weakly_dominated]
71
+ # check per row if any coordinate is shared with a non-weakly dominated solution
72
+ has_shared_coordinates = np.any(subset_of_coordinate_sharing_matrix_with_diag_zero,axis = 1)
73
+ is_not_weakly_dominated_and_shares_coordinates_with_other_not_weakly_dominated = np.logical_and( is_not_weakly_dominated , has_shared_coordinates )
74
+
75
+ ## is not weakly dominated and share no coordinate with another not weakly dominated mo_sol
76
+ is_not_weakly_dominated_and_shares_no_coordinates_with_other_not_weakly_dominated = np.logical_and( is_not_weakly_dominated , np.logical_not(has_shared_coordinates) )
77
+
78
+
79
+ return(is_strongly_dominated,is_weakly_dominated,is_weakly_but_not_strongly_dominated,is_not_weakly_dominated,is_not_weakly_dominated_and_shares_coordinates_with_other_not_weakly_dominated,is_not_weakly_dominated_and_shares_no_coordinates_with_other_not_weakly_dominated)
80
+
81
+
82
+ def compute_subsets(mo_obj_val,ref_point):
83
+ is_strongly_dominated,is_weakly_dominated,is_weakly_but_not_strongly_dominated, \
84
+ is_not_weakly_dominated,is_not_weakly_dominated_and_shares_coordinates_with_other_not_weakly_dominated, \
85
+ is_not_weakly_dominated_and_shares_no_coordinates_with_other_not_weakly_dominated = compute_domination_properties(mo_obj_val)
86
+ ## Z: indices of mo-solutions for which all partial derivatives are zero
87
+ # E: in exterior of reference space
88
+ E,_ = determine_mo_sol_in_exterior(mo_obj_val,ref_point)
89
+ # S: that are strictly dominated
90
+ S = np.where(is_strongly_dominated == True)
91
+ # Z = E cup S
92
+ Z = np.union1d(E,S)
93
+
94
+ ## U: indices of mo-solutions for which some partial derivatives are undefined
95
+ # D: that are non-dominated but have duplicate coordinates
96
+ D = np.where(is_not_weakly_dominated_and_shares_coordinates_with_other_not_weakly_dominated == True)
97
+ # W: that are weakly dominated, i.e. not strictly dominated but there is a weakly better (Pareto dominating) solution
98
+ W = np.where(is_weakly_but_not_strongly_dominated == True)
99
+ # B: that are on the boundary of the reference space
100
+ B,_ = determine_mo_sol_on_ref_boundary(mo_obj_val,ref_point)
101
+ # # U = D cup (W \ E) cup (B \ S)
102
+ # U = np.union1d( np.union1d(D, np.setdiff1d(W,E)) , np.setdiff1d(B,S) )
103
+ #DEVIATION FROM EMMERICH & DEUTZ PAPER:
104
+ # U = (D \ E) cup (W \ E) cup (B \ S)
105
+ U = np.union1d( np.union1d( np.setdiff1d(D,E) , np.setdiff1d(W,E) ) , np.setdiff1d(B,S) )
106
+
107
+ ## P: indices of mo-solutions for which all partial derivatives are positive (negative??? which one makes sense in our case)
108
+ # N: that are not Pareto dominated AND have no duplicate coordinate with any other non-dominated solution
109
+ N = np.where(is_not_weakly_dominated_and_shares_no_coordinates_with_other_not_weakly_dominated == True)
110
+ # I: that are in the interior of the reference space
111
+ I,_ = determine_mo_sol_in_interior(mo_obj_val,ref_point)
112
+ # P = N intersect I
113
+ P = np.intersect1d(N,I)
114
+
115
+ return(P,U,Z)
116
+
117
+ def grad_multi_sweep_with_duplicate_handling(mo_obj_val,ref_point):
118
+ # find unique mo_obj_val (it also sorts columns which is unnecessary but gets fixed in when using mapping_indices)
119
+ unique_mo_obj_val, mapping_indices = np.unique(mo_obj_val, axis = 1, return_inverse = True)
120
+ # compute hv_grad for unique mo-solutions
121
+ unique_hv_grad = grad_multi_sweep(unique_mo_obj_val,ref_point)
122
+ # assign the same gradients to duplicate mo_obj_val (and undo the unnecessary sorting)
123
+ hv_grad = unique_hv_grad[:,mapping_indices]
124
+ return(hv_grad)
125
+
126
+ def grad_multi_sweep(mo_obj_val,ref_point):
127
+ '''
128
+ Based on:
129
+ Emmerich, Michael, and André Deutz.
130
+ "Time complexity and zeros of the hypervolume indicator gradient field."
131
+ EVOLVE-a bridge between probability, set oriented numerics,
132
+ and evolutionary computation III. Springer, Heidelberg, 2014. 169-193.
133
+ '''
134
+ n_obj = mo_obj_val.shape[0]
135
+ n_mo_sol = mo_obj_val.shape[1]
136
+ assert n_obj == len(ref_point)
137
+ hv_grad = np.zeros_like(mo_obj_val)
138
+
139
+
140
+
141
+ P, U, Z = compute_subsets(mo_obj_val,ref_point)
142
+ #####
143
+ if not (len(U) == 0):
144
+ # raise ValueError("Partial derivatives might be only one-sided in indice" + str(U))
145
+ print("Partial derivatives might be only one-sided in indices" + str(U))
146
+ print(mo_obj_val[:,U])
147
+ hv_grad[:,U] = 0
148
+
149
+ for k in range(0,n_obj):
150
+ temp_ref_point = copy.copy(ref_point)
151
+ temp_ref_point = np.delete(temp_ref_point,k,axis = 0)
152
+ temp_ref_point = tuple(temp_ref_point)
153
+ hv_instance = HyperVolume(temp_ref_point)
154
+ sorted_P = copy.copy(mo_obj_val[:,P])
155
+ # descending order sorting
156
+ sort_order= np.argsort(-sorted_P[k,:])
157
+ sorted_P = sorted_P[:,sort_order]
158
+ # remove k-th row
159
+ sorted_P = np.delete(sorted_P,k,0)
160
+ # initialize queue by turning array of columns into list of columns
161
+ Q = sorted_P.T.tolist() # it should be possible to delete this row, Q is overwritten in the next line
162
+ Q = list()
163
+ for i in range(sorted_P.shape[1]):
164
+ Q.append(tuple(sorted_P[:,i]))
165
+ queue_index = len(P) # this initialization is actually index of last queue entry +1. The +1 is convenient because the while loop will always update the index by -1, so it all matches up in the end
166
+ T = list()
167
+ while (len(Q) > 0):
168
+ # take last element in list
169
+ q = Q.pop()
170
+ # compute hypervolume contribution of q when added to T
171
+
172
+ if len(T) == 0:
173
+ T_with_q = list()
174
+ T_with_q.append(q)
175
+ hv_contribution = hv_instance.compute(T_with_q)
176
+ else:
177
+ T_with_q = copy.copy(T)
178
+ T_with_q.append(q)
179
+ hv_contribution = hv_instance.compute(T_with_q) - hv_instance.compute(T)
180
+ # queue_index is the index of q
181
+ queue_index = queue_index - 1 # -1 because the counter is always lagging 1 and it was initialized with +1
182
+ # mo_sol_index is the index of q in mo_obj_val
183
+ mo_sol_index = P[sort_order[queue_index]]
184
+ hv_grad[k,mo_sol_index] = hv_contribution
185
+
186
+ ## add q to T and remove points dominated by q
187
+ # initialize T by q in first iteration
188
+ if len(T) == 0:
189
+ T.append(q)
190
+ # T = q
191
+ else:
192
+ ## remove columns in T that are dominated by q
193
+ # loop through T
194
+ i = 0
195
+ while (i < len(T)):
196
+ # remove entry if dominated by q
197
+ if check_weak_domination_in_tuple(q,T[i]):
198
+ del T[i]
199
+ else:
200
+ # if entry not deleted, move to next entry
201
+ i = i + 1
202
+
203
+ # add q to T
204
+ T.append(q)
205
+ return(hv_grad)
206
+
207
+ def check_weak_domination_in_tuple(tuple_A,tuple_B):
208
+ assert len(tuple_A) == len(tuple_B)
209
+ # initialize as True
210
+ A_weakly_dominates_B = True
211
+ for i in range(len(tuple_B)):
212
+ if tuple_A[i] > tuple_B[i]:
213
+ A_weakly_dominates_B = False
214
+ break
215
+ return(A_weakly_dominates_B)
bike_bench_internal/benchmark_models/libmoon/solver/gradient/functions_hv_python3.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (C) 2010 Simon Wessing
3
+ TU Dortmund University
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
17
+
18
+ __author__ = "Simon Wessing"
19
+ """
20
+
21
+
22
+ class HyperVolume:
23
+ """
24
+ Hypervolume computation based on variant 3 of the algorithm in the paper:
25
+ C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep
26
+ algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary
27
+ Computation, pages 1157-1163, Vancouver, Canada, July 2006.
28
+ Minimization is implicitly assumed here!
29
+ """
30
+
31
+ def __init__(self, referencePoint):
32
+ """Constructor."""
33
+ self.referencePoint = referencePoint
34
+ self.list = []
35
+
36
+ def compute(self, front):
37
+ """Returns the hypervolume that is dominated by a non-dominated front.
38
+ Before the HV computation, front and reference point are translated, so
39
+ that the reference point is [0, ..., 0].
40
+ """
41
+
42
+ def weaklyDominates(point, other):
43
+ for i in range(len(point)):
44
+ if point[i] > other[i]:
45
+ return False
46
+ return True
47
+
48
+ relevantPoints = []
49
+ referencePoint = self.referencePoint
50
+ dimensions = len(referencePoint)
51
+ for point in front:
52
+ # only consider points that dominate the reference point
53
+ if weaklyDominates(point, referencePoint):
54
+ relevantPoints.append(point)
55
+ if any(referencePoint):
56
+ # shift points so that referencePoint == [0, ..., 0]
57
+ # this way the reference point doesn't have to be explicitly used
58
+ # in the HV computation
59
+ for j in range(len(relevantPoints)):
60
+ relevantPoints[j] = [relevantPoints[j][i] - referencePoint[i] for i in range(dimensions)]
61
+ self.preProcess(relevantPoints)
62
+ bounds = [-1.0e308] * dimensions
63
+ hyperVolume = self.hvRecursive(dimensions - 1, len(relevantPoints), bounds)
64
+ return hyperVolume
65
+
66
+
67
+ def hvRecursive(self, dimIndex, length, bounds):
68
+ """Recursive call to hypervolume calculation.
69
+ In contrast to the paper, the code assumes that the reference point
70
+ is [0, ..., 0]. This allows the avoidance of a few operations.
71
+ """
72
+ hvol = 0.0
73
+ sentinel = self.list.sentinel
74
+ if length == 0:
75
+ return hvol
76
+ elif dimIndex == 0:
77
+ # special case: only one dimension
78
+ # why using hypervolume at all?
79
+ return -sentinel.next[0].cargo[0]
80
+ elif dimIndex == 1:
81
+ # special case: two dimensions, end recursion
82
+ q = sentinel.next[1]
83
+ h = q.cargo[0]
84
+ p = q.next[1]
85
+ while p is not sentinel:
86
+ pCargo = p.cargo
87
+ hvol += h * (q.cargo[1] - pCargo[1])
88
+ if pCargo[0] < h:
89
+ h = pCargo[0]
90
+ q = p
91
+ p = q.next[1]
92
+ hvol += h * q.cargo[1]
93
+ return hvol
94
+ else:
95
+ remove = self.list.remove
96
+ reinsert = self.list.reinsert
97
+ hvRecursive = self.hvRecursive
98
+ p = sentinel
99
+ q = p.prev[dimIndex]
100
+ while q.cargo != None:
101
+ if q.ignore < dimIndex:
102
+ q.ignore = 0
103
+ q = q.prev[dimIndex]
104
+ q = p.prev[dimIndex]
105
+ while length > 1 and (q.cargo[dimIndex] > bounds[dimIndex] or q.prev[dimIndex].cargo[dimIndex] >= bounds[dimIndex]):
106
+ p = q
107
+ remove(p, dimIndex, bounds)
108
+ q = p.prev[dimIndex]
109
+ length -= 1
110
+ qArea = q.area
111
+ qCargo = q.cargo
112
+ qPrevDimIndex = q.prev[dimIndex]
113
+ if length > 1:
114
+ hvol = qPrevDimIndex.volume[dimIndex] + qPrevDimIndex.area[dimIndex] * (qCargo[dimIndex] - qPrevDimIndex.cargo[dimIndex])
115
+ else:
116
+ qArea[0] = 1
117
+ qArea[1:dimIndex+1] = [qArea[i] * -qCargo[i] for i in range(dimIndex)]
118
+ q.volume[dimIndex] = hvol
119
+ if q.ignore >= dimIndex:
120
+ qArea[dimIndex] = qPrevDimIndex.area[dimIndex]
121
+ else:
122
+ qArea[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
123
+ if qArea[dimIndex] <= qPrevDimIndex.area[dimIndex]:
124
+ q.ignore = dimIndex
125
+ while p is not sentinel:
126
+ pCargoDimIndex = p.cargo[dimIndex]
127
+ hvol += q.area[dimIndex] * (pCargoDimIndex - q.cargo[dimIndex])
128
+ bounds[dimIndex] = pCargoDimIndex
129
+ reinsert(p, dimIndex, bounds)
130
+ length += 1
131
+ q = p
132
+ p = p.next[dimIndex]
133
+ q.volume[dimIndex] = hvol
134
+ if q.ignore >= dimIndex:
135
+ q.area[dimIndex] = q.prev[dimIndex].area[dimIndex]
136
+ else:
137
+ q.area[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
138
+ if q.area[dimIndex] <= q.prev[dimIndex].area[dimIndex]:
139
+ q.ignore = dimIndex
140
+ hvol -= q.area[dimIndex] * q.cargo[dimIndex]
141
+ return hvol
142
+
143
+
144
+ def preProcess(self, front):
145
+ """Sets up the list data structure needed for calculation."""
146
+ dimensions = len(self.referencePoint)
147
+ nodeList = MultiList(dimensions)
148
+ nodes = [MultiList.Node(dimensions, point) for point in front]
149
+ for i in range(dimensions):
150
+ # sort by dimension
151
+ nodes = sorted(nodes, key=lambda node: node.cargo[i])
152
+ nodeList.extend(nodes, i)
153
+ self.list = nodeList
154
+
155
+
156
+
157
+ class MultiList:
158
+ """A special data structure needed by FonsecaHyperVolume.
159
+
160
+ It consists of several doubly linked lists that share common nodes. So,
161
+ every node has multiple predecessors and successors, one in every list.
162
+ """
163
+
164
+ class Node:
165
+
166
+ def __init__(self, numberLists, cargo=None):
167
+ self.cargo = cargo
168
+ self.next = [None] * numberLists
169
+ self.prev = [None] * numberLists
170
+ self.ignore = 0
171
+ self.area = [0.0] * numberLists
172
+ self.volume = [0.0] * numberLists
173
+
174
+ def __str__(self):
175
+ return str(self.cargo)
176
+
177
+
178
+ def __init__(self, numberLists):
179
+ """Constructor.
180
+
181
+ Builds 'numberLists' doubly linked lists.
182
+ """
183
+ self.numberLists = numberLists
184
+ self.sentinel = MultiList.Node(numberLists)
185
+ self.sentinel.next = [self.sentinel] * numberLists
186
+ self.sentinel.prev = [self.sentinel] * numberLists
187
+
188
+
189
+ def __str__(self):
190
+ strings = []
191
+ for i in range(self.numberLists):
192
+ currentList = []
193
+ node = self.sentinel.next[i]
194
+ while node != self.sentinel:
195
+ currentList.append(str(node))
196
+ node = node.next[i]
197
+ strings.append(str(currentList))
198
+ stringRepr = ""
199
+ for string in strings:
200
+ stringRepr += string + "\n"
201
+ return stringRepr
202
+
203
+
204
+ def __len__(self):
205
+ """Returns the number of lists that are included in this MultiList."""
206
+ return self.numberLists
207
+
208
+
209
+ def getLength(self, i):
210
+ """Returns the length of the i-th list."""
211
+ length = 0
212
+ sentinel = self.sentinel
213
+ node = sentinel.next[i]
214
+ while node != sentinel:
215
+ length += 1
216
+ node = node.next[i]
217
+ return length
218
+
219
+
220
+ def append(self, node, index):
221
+ """Appends a node to the end of the list at the given index."""
222
+ lastButOne = self.sentinel.prev[index]
223
+ node.next[index] = self.sentinel
224
+ node.prev[index] = lastButOne
225
+ # set the last element as the new one
226
+ self.sentinel.prev[index] = node
227
+ lastButOne.next[index] = node
228
+
229
+
230
+ def extend(self, nodes, index):
231
+ """Extends the list at the given index with the nodes."""
232
+ sentinel = self.sentinel
233
+ for node in nodes:
234
+ lastButOne = sentinel.prev[index]
235
+ node.next[index] = sentinel
236
+ node.prev[index] = lastButOne
237
+ # set the last element as the new one
238
+ sentinel.prev[index] = node
239
+ lastButOne.next[index] = node
240
+
241
+
242
+ def remove(self, node, index, bounds):
243
+ """Removes and returns 'node' from all lists in [0, 'index'[."""
244
+ for i in range(index):
245
+ predecessor = node.prev[i]
246
+ successor = node.next[i]
247
+ predecessor.next[i] = successor
248
+ successor.prev[i] = predecessor
249
+ if bounds[i] > node.cargo[i]:
250
+ bounds[i] = node.cargo[i]
251
+ return node
252
+
253
+
254
+ def reinsert(self, node, index, bounds):
255
+ """
256
+ Inserts 'node' at the position it had in all lists in [0, 'index'[
257
+ before it was removed. This method assumes that the next and previous
258
+ nodes of the node that is reinserted are in the list.
259
+ """
260
+ for i in range(index):
261
+ node.prev[i].next[i] = node
262
+ node.next[i].prev[i] = node
263
+ if bounds[i] > node.cargo[i]:
264
+ bounds[i] = node.cargo[i]
265
+
266
+
267
+
268
+ if __name__ == "__main__":
269
+
270
+ # Example:
271
+ referencePoint = [2, 2, 2]
272
+ hv = HyperVolume(referencePoint)
273
+ front = [[1,0,1], [0,1,0]]
274
+ volume = hv.compute(front)
bike_bench_internal/benchmark_models/libmoon/solver/gradient/gradhv.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The class HvMaximization is based on the algorithm described by
3
+ Wang, Hao, et al.
4
+ "Hypervolume indicator gradient ascent multimnist-objective optimization."
5
+ International conference on evolutionary multimnist-criterion optimization. Springer, Cham, 2017.
6
+ """
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from .functions_evaluation import fastNonDominatedSort
12
+ from .functions_hv_grad_3d import grad_multi_sweep_with_duplicate_handling
13
+ from .base_solver import GradBaseSolver
14
+ from torch.autograd import Variable
15
+
16
+
17
+ from tqdm import tqdm
18
+ from pymoo.indicators.hv import HV
19
+ from ...util_global.constant import solution_eps, get_hv_ref_dict
20
+
21
+ """
22
+ The class HvMaximization is based on the algorithm described by
23
+ Wang, Hao, et al.
24
+ "Hypervolume indicator gradient ascent multi-objective optimization."
25
+ International conference on evolutionary multi-criterion optimization. Springer, Cham, 2017.
26
+ """
27
+
28
+ import numpy as np
29
+ import torch
30
+
31
+ from .functions_evaluation import fastNonDominatedSort
32
+ from .functions_hv_grad_3d import grad_multi_sweep_with_duplicate_handling
33
+
34
+
35
+ class HvMaximization(object):
36
+ """
37
+ Mo optimizer for calculating dynamic weights using higamo style hv maximization
38
+ based on Hao Wang et al.'s HIGA-MO
39
+ uses non-dominated sorting to create multiple fronts, and maximize hypervolume of each
40
+ """
41
+
42
+ def __init__(self, n_mo_sol, n_mo_obj, ref_point, obj_space_normalize=True):
43
+ super(HvMaximization, self).__init__()
44
+ self.name = 'hv_maximization'
45
+ self.ref_point = np.array(ref_point)
46
+ self.n_mo_sol = n_mo_sol
47
+ self.n_mo_obj = n_mo_obj
48
+ self.obj_space_normalize = obj_space_normalize
49
+
50
+ def compute_weights(self, mo_obj_val):
51
+ n_mo_obj = self.n_mo_obj
52
+ n_mo_sol = self.n_mo_sol
53
+
54
+ # non-dom sorting to create multiple fronts
55
+ hv_subfront_indices = fastNonDominatedSort(mo_obj_val)
56
+ dyn_ref_point = 1.1 * np.max(mo_obj_val, axis=1)
57
+ for i_obj in range(0, n_mo_obj):
58
+ dyn_ref_point[i_obj] = np.maximum(self.ref_point[i_obj], dyn_ref_point[i_obj])
59
+ number_of_fronts = np.max(hv_subfront_indices) + 1 # +1 because of 0 indexing
60
+
61
+ obj_space_multifront_hv_gradient = np.zeros((n_mo_obj, n_mo_sol))
62
+ for i_fronts in range(0, number_of_fronts):
63
+ # compute HV gradients for current front
64
+ temp_grad_array = grad_multi_sweep_with_duplicate_handling(mo_obj_val[:, (hv_subfront_indices == i_fronts)],
65
+ dyn_ref_point)
66
+ obj_space_multifront_hv_gradient[:, (hv_subfront_indices == i_fronts)] = temp_grad_array
67
+
68
+ # normalize the hv_gradient in obj space (||dHV/dY|| == 1)
69
+ normalized_obj_space_multifront_hv_gradient = np.zeros((n_mo_obj, n_mo_sol))
70
+ for i_mo_sol in range(0, n_mo_sol):
71
+ w = np.sqrt(np.sum(obj_space_multifront_hv_gradient[:, i_mo_sol] ** 2.0))
72
+ if np.isclose(w, 0):
73
+ w = 1
74
+ if self.obj_space_normalize:
75
+ normalized_obj_space_multifront_hv_gradient[:, i_mo_sol] = obj_space_multifront_hv_gradient[:,
76
+ i_mo_sol] / w
77
+ else:
78
+ normalized_obj_space_multifront_hv_gradient[:, i_mo_sol] = obj_space_multifront_hv_gradient[:, i_mo_sol]
79
+
80
+ dynamic_weights = torch.tensor(normalized_obj_space_multifront_hv_gradient, dtype=torch.float)
81
+ return (dynamic_weights)
82
+
83
+
84
+
85
+ class GradHVSolver(GradBaseSolver):
86
+ def __init__(self, step_size, max_iter, tol):
87
+
88
+ super().__init__(step_size, max_iter, tol)
89
+
90
+ def solve(self, problem, x, prefs, args, ref_point):
91
+ if args.n_obj != 2:
92
+ assert False, 'hvgrad only supports 2 obj problem'
93
+
94
+ hv_maximizer = HvMaximization(args.n_prob, args.n_obj, ref_point)
95
+
96
+
97
+ x = Variable(x, requires_grad=True)
98
+ optimizer = torch.optim.SGD([x,], lr=self.step_size)
99
+ hv_ind = HV(ref_point= ref_point)
100
+ hv_arr = [0] * self.max_iter
101
+ y_arr=[]
102
+ for iter_idx in tqdm(range(self.max_iter)):
103
+ y = problem.evaluate(x)
104
+ y_np = y.detach().numpy()
105
+ y_arr.append(y_np)
106
+ hv_arr[iter_idx] = hv_ind.do(y_np)
107
+ weight = hv_maximizer.compute_weights(y_np.T)
108
+ weight = torch.tensor(weight.T, dtype=torch.float)
109
+ optimizer.zero_grad()
110
+ torch.sum(weight*y).backward()
111
+ optimizer.step()
112
+ if 'lbound' in dir(problem):
113
+ x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps, torch.Tensor(problem.ubound)-solution_eps )
114
+
115
+
116
+ res = {}
117
+ res['x'] = x.detach().numpy()
118
+ res['y'] = y.detach().numpy()
119
+ res['hv_arr'] = hv_arr
120
+ res['y_arr'] = y_arr
121
+
122
+ return res
bike_bench_internal/benchmark_models/libmoon/solver/gradient/mgda_core.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from numpy import array
3
+ import numpy as np
4
+ from cvxopt import matrix, solvers
5
+ solvers.options['show_progress'] = False
6
+
7
+ def solve_mgda_analy(grad_1, grad_2, return_coeff = False):
8
+ '''
9
+ Noted that, solve_mgda_analy only support 2-objective case.
10
+ grad_i.shape: (n,).
11
+ This function support grad_i as both Tensor and numpy.
12
+ '''
13
+
14
+ v1v1 = grad_1 @ grad_1
15
+ v2v2 = grad_2 @ grad_2
16
+ v1v2 = grad_1 @ grad_2
17
+
18
+ if v1v2 >= v1v1:
19
+ gamma = 0.999
20
+ elif v1v2 >= v2v2:
21
+ gamma = 0.001
22
+ else:
23
+ gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
24
+
25
+ coeff = array([gamma, 1-gamma])
26
+ gw = coeff[0] * grad_1 + coeff[1] * grad_2
27
+ if return_coeff:
28
+ return gw, coeff
29
+ else:
30
+ return gw
31
+
32
+
33
+ def solve_mgda(G, return_coeff=False):
34
+ '''
35
+ input G: (m,n).
36
+ output gw (n,).
37
+ comments: This function is used to solve the dual MGDA problem. It can handle m>2.
38
+ '''
39
+ if type(G) == torch.Tensor:
40
+ G = G.detach().cpu().numpy().copy()
41
+
42
+
43
+ m = G.shape[0]
44
+ if m == 2:
45
+ return solve_mgda_analy(G[0], G[1], return_coeff=return_coeff)
46
+ else:
47
+ Q = G @ G.T
48
+ Q = matrix(np.float64(Q))
49
+ p = np.zeros(m)
50
+ A = np.ones(m)
51
+
52
+ A = matrix(A, (1, m))
53
+ b = matrix(1.0)
54
+
55
+ G_cvx = -np.eye(m)
56
+ h = [0.0] * m
57
+ h = matrix(h)
58
+
59
+ G_cvx = matrix(G_cvx)
60
+ p = matrix(p)
61
+ sol = solvers.qp(Q, p, G_cvx, h, A, b)
62
+
63
+ res = np.array(sol['x']).squeeze()
64
+ res = res / sum(res) # important
65
+ gw = torch.Tensor( res @ G )
66
+
67
+ if return_coeff:
68
+ return gw, res
69
+ else:
70
+ return gw
71
+
72
+
bike_bench_internal/benchmark_models/libmoon/solver/gradient/mgda_solver.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.autograd
2
+
3
+ from .mgda_core import solve_mgda
4
+
5
+ from .base_solver import GradBaseSolver
6
+ from torch.autograd import Variable
7
+ from torch.optim import SGD
8
+ from tqdm import tqdm
9
+ from torch import Tensor
10
+ import numpy as np
11
+ from ...util_global.constant import solution_eps, get_hv_ref_dict
12
+ from pymoo.indicators.hv import HV
13
+
14
+
15
+ '''
16
+ MGDA solver, published in:
17
+ 1. Multiple-gradient descent algorithm (MGDA) for multiobjective optimizationAlgorithme de descente à gradients multiples pour lʼoptimisation multiobjectif
18
+ 2. Sener, Ozan, and Vladlen Koltun. "Multi-task learning as multimnist-objective optimization." Advances in neural information processing systems 31 (2018).
19
+ '''
20
+
21
+
22
+ class MGDASolver(GradBaseSolver):
23
+ def __init__(self, step_size, max_iter, tol):
24
+ super().__init__(step_size, max_iter, tol)
25
+
26
+
27
+ def solve(self, problem, x, prefs, args, ref_point):
28
+ x = Variable(x, requires_grad=True)
29
+ optimizer = SGD([x], lr=self.step_size)
30
+
31
+ ind = HV(ref_point=ref_point)
32
+ hv_arr = []
33
+ y_arr = []
34
+
35
+ for i in tqdm(range(self.max_iter)):
36
+ grad_arr = [0] * args.n_prob
37
+ y = problem.evaluate(x)
38
+ y_np = y.detach().numpy()
39
+ y_arr.append(y_np)
40
+ hv_arr.append(ind.do(y_np))
41
+
42
+ for prob_idx in range( args.n_prob ):
43
+ grad_arr[prob_idx] = [0] * args.n_obj
44
+ for obj_idx in range(args.n_obj):
45
+ y[prob_idx][obj_idx].backward(retain_graph=True)
46
+ grad_arr[prob_idx][obj_idx] = x.grad[prob_idx].clone()
47
+ x.grad.zero_()
48
+ grad_arr[prob_idx] = torch.stack(grad_arr[prob_idx])
49
+
50
+ grad_arr = torch.stack(grad_arr)
51
+ gw_arr = [solve_mgda(G, return_coeff=True) for G in grad_arr]
52
+ optimizer.zero_grad()
53
+ weights = Tensor( np.array([gw[1] for gw in gw_arr]) )
54
+ # weights = Tensor( np.array([1.0, 0.0]) )
55
+ torch.sum(weights * y).backward()
56
+ optimizer.step()
57
+
58
+ if 'lbound' in dir(problem):
59
+ x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps, torch.Tensor(problem.ubound) - solution_eps )
60
+
61
+ res = {}
62
+ res['x'] = x.detach().numpy()
63
+ res['y'] = y.detach().numpy()
64
+ res['hv_arr'] = hv_arr
65
+ res['y_arr'] = y_arr
66
+
67
+ return res
68
+
69
+
70
+ if __name__ == '__main__':
71
+ print()
bike_bench_internal/benchmark_models/libmoon/solver/gradient/min_norm_solvers_numpy.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This code is from
2
+ # Multi-Task Learning as Multi-Objective Optimization
3
+ # Ozan Sener, Vladlen Koltun
4
+ # Neural Information Processing Systems (NeurIPS) 2018
5
+ # https://github.com/intel-isl/MultiObjectiveOptimization
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ class MinNormSolver:
11
+ MAX_ITER = 250
12
+ STOP_CRIT = 1e-5
13
+
14
+ def _min_norm_element_from2(v1v1, v1v2, v2v2):
15
+ """
16
+ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
17
+ d is the distance (objective) optimzed
18
+ v1v1 = <x1,x1>
19
+ v1v2 = <x1,x2>
20
+ v2v2 = <x2,x2>
21
+ """
22
+ if v1v2 >= v1v1:
23
+ # Case: Fig 1, third column
24
+ gamma = 0.999
25
+ cost = v1v1
26
+ return gamma, cost
27
+ if v1v2 >= v2v2:
28
+ # Case: Fig 1, first column
29
+ gamma = 0.001
30
+ cost = v2v2
31
+ return gamma, cost
32
+ # Case: Fig 1, second column
33
+ gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
34
+ cost = v2v2 + gamma * (v1v2 - v2v2)
35
+ return gamma, cost
36
+
37
+ def _min_norm_2d(vecs, dps):
38
+ """
39
+ Find the minimum norm solution as combination of two points
40
+ This is correct only in 2D
41
+ ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
42
+ """
43
+ dmin = 1e8
44
+ for i in range(len(vecs)):
45
+ for j in range(i + 1, len(vecs)):
46
+ if (i, j) not in dps:
47
+ dps[(i, j)] = 0.0
48
+ for k in range(len(vecs[i])):
49
+ dps[(i, j)] += torch.mul(vecs[i][k], vecs[j][k]).sum().data.cpu()
50
+ dps[(j, i)] = dps[(i, j)]
51
+ if (i, i) not in dps:
52
+ dps[(i, i)] = 0.0
53
+ for k in range(len(vecs[i])):
54
+ dps[(i, i)] += torch.mul(vecs[i][k], vecs[i][k]).sum().data.cpu()
55
+ if (j, j) not in dps:
56
+ dps[(j, j)] = 0.0
57
+ for k in range(len(vecs[i])):
58
+ dps[(j, j)] += torch.mul(vecs[j][k], vecs[j][k]).sum().data.cpu()
59
+ c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
60
+ if d < dmin:
61
+ dmin = d
62
+ sol = [(i, j), c, d]
63
+ return sol, dps
64
+
65
+ def _projection2simplex(y):
66
+ """
67
+ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
68
+ """
69
+ m = len(y)
70
+ sorted_y = np.flip(np.sort(y), axis=0)
71
+ tmpsum = 0.0
72
+ tmax_f = (np.sum(y) - 1.0) / m
73
+ for i in range(m - 1):
74
+ tmpsum += sorted_y[i]
75
+ tmax = (tmpsum - 1) / (i + 1.0)
76
+ if tmax > sorted_y[i + 1]:
77
+ tmax_f = tmax
78
+ break
79
+ return np.maximum(y - tmax_f, np.zeros(y.shape))
80
+
81
+ def _next_point(cur_val, grad, n):
82
+ proj_grad = grad - (np.sum(grad) / n)
83
+ tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
84
+ tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
85
+
86
+ # tm1 = np.array(tm1)
87
+ # tm2 = np.array(tm2)
88
+
89
+ # skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
90
+ t = 1
91
+ if len(tm1[tm1 > 1e-7]) > 0:
92
+ t = np.min(tm1[tm1 > 1e-7])
93
+ if len(tm2[tm2 > 1e-7]) > 0:
94
+ t = min(t, np.min(tm2[tm2 > 1e-7]))
95
+
96
+ next_point = proj_grad * t + cur_val
97
+ next_point = MinNormSolver._projection2simplex(next_point)
98
+ return next_point
99
+
100
+ def find_min_norm_element(vecs):
101
+ """
102
+ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull.
103
+ as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
104
+ It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j}).
105
+ Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence.
106
+ """
107
+ # Solution lying at the combination of two points
108
+ dps = {} # What does dps mean?
109
+
110
+ init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
111
+
112
+ n = len(vecs)
113
+ sol_vec = np.zeros(n)
114
+ sol_vec[init_sol[0][0]] = init_sol[1]
115
+ sol_vec[init_sol[0][1]] = 1 - init_sol[1]
116
+
117
+ if n < 3:
118
+ # This is optimal for n=2, so return the solution
119
+ return sol_vec, init_sol[2]
120
+
121
+ iter_count = 0
122
+
123
+ grad_mat = np.zeros((n, n))
124
+ for i in range(n):
125
+ for j in range(n):
126
+ grad_mat[i, j] = dps[(i, j)]
127
+
128
+ while iter_count < MinNormSolver.MAX_ITER:
129
+ grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
130
+ new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
131
+ # Re-compute the inner products for line search
132
+ v1v1 = 0.0
133
+ v1v2 = 0.0
134
+ v2v2 = 0.0
135
+ for i in range(n):
136
+ for j in range(n):
137
+ v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
138
+ v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
139
+ v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
140
+ nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
141
+ new_sol_vec = nc * sol_vec + (1 - nc) * new_point
142
+ change = new_sol_vec - sol_vec
143
+ change = np.array(change)
144
+
145
+ if np.sum( np.abs(change) ) < MinNormSolver.STOP_CRIT:
146
+ return sol_vec, nd
147
+
148
+ sol_vec = new_sol_vec
149
+
150
+ def find_min_norm_element_FW(vecs):
151
+ """
152
+ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
153
+ as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
154
+ It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
155
+ Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
156
+ """
157
+ # Solution lying at the combination of two points
158
+ dps = {}
159
+ init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
160
+
161
+ n = len(vecs)
162
+ sol_vec = np.zeros(n)
163
+ sol_vec[init_sol[0][0]] = init_sol[1]
164
+ sol_vec[init_sol[0][1]] = 1 - init_sol[1]
165
+
166
+ if n < 3:
167
+ # This is optimal for n=2, so return the solution
168
+ return sol_vec, init_sol[2]
169
+
170
+ iter_count = 0
171
+
172
+ grad_mat = np.zeros((n, n))
173
+ for i in range(n):
174
+ for j in range(n):
175
+ grad_mat[i, j] = dps[(i, j)]
176
+
177
+ while iter_count < MinNormSolver.MAX_ITER:
178
+ t_iter = np.argmin(np.dot(grad_mat, sol_vec))
179
+
180
+ v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
181
+ v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
182
+ v2v2 = grad_mat[t_iter, t_iter]
183
+
184
+ nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
185
+ new_sol_vec = nc * sol_vec
186
+ new_sol_vec[t_iter] += 1 - nc
187
+
188
+ change = new_sol_vec - sol_vec
189
+ if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
190
+ return sol_vec, nd
191
+ sol_vec = new_sol_vec
192
+
193
+
194
+ def gradient_normalizers(grads, losses, normalization_type):
195
+ gn = {}
196
+ if normalization_type == 'l2':
197
+ for t in grads:
198
+ gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data.cpu() for gr in grads[t]]))
199
+ elif normalization_type == 'loss':
200
+ for t in grads:
201
+ gn[t] = losses[t]
202
+ elif normalization_type == 'loss+':
203
+ for t in grads:
204
+ gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data.cpu() for gr in grads[t]]))
205
+ elif normalization_type == 'none':
206
+ for t in grads:
207
+ gn[t] = 1.0
208
+ else:
209
+ print('ERROR: Invalid Normalization Type')
210
+ return gn
211
+
212
+
213
+ class MinNormSolverNumpy:
214
+ MAX_ITER = 250
215
+ STOP_CRIT = 1e-6
216
+
217
+ def _min_norm_element_from2(v1v1, v1v2, v2v2):
218
+ """
219
+ Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
220
+ d is the distance (objective) optimzed
221
+ v1v1 = <x1,x1>
222
+ v1v2 = <x1,x2>
223
+ v2v2 = <x2,x2>
224
+ """
225
+ if v1v2 >= v1v1:
226
+ # Case: Fig 1, third column
227
+ gamma = 0.999
228
+ cost = v1v1
229
+ return gamma, cost
230
+ if v1v2 >= v2v2:
231
+ # Case: Fig 1, first column
232
+ gamma = 0.001
233
+ cost = v2v2
234
+ return gamma, cost
235
+ # Case: Fig 1, second column
236
+ gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
237
+ cost = v2v2 + gamma * (v1v2 - v2v2)
238
+ return gamma, cost
239
+
240
+ def _min_norm_2d(vecs, dps):
241
+ """
242
+ Find the minimum norm solution as combination of two points
243
+ This solution is correct if vectors(gradients) lie in 2D
244
+ ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
245
+ """
246
+ dmin = 1e8
247
+ for i in range(len(vecs)):
248
+ for j in range(i + 1, len(vecs)):
249
+ if (i, j) not in dps:
250
+ dps[(i, j)] = 0.0
251
+ dps[(i, j)] = np.dot(vecs[i], vecs[j])
252
+ dps[(j, i)] = dps[(i, j)]
253
+ if (i, i) not in dps:
254
+ dps[(i, i)] = 0.0
255
+ dps[(i, i)] = np.dot(vecs[i], vecs[i])
256
+ if (j, j) not in dps:
257
+ dps[(j, j)] = 0.0
258
+ dps[(j, j)] = np.dot(vecs[j], vecs[j])
259
+ c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
260
+ if d < dmin:
261
+ dmin = d
262
+ sol = [(i, j), c, d]
263
+ return sol, dps
264
+
265
+ def _projection2simplex(y):
266
+ """
267
+ Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
268
+ """
269
+ m = len(y)
270
+ sorted_y = np.flip(np.sort(y), axis=0)
271
+ tmpsum = 0.0
272
+ tmax_f = (np.sum(y) - 1.0) / m
273
+ for i in range(m - 1):
274
+ tmpsum += sorted_y[i]
275
+ tmax = (tmpsum - 1) / (i + 1.0)
276
+ if tmax > sorted_y[i + 1]:
277
+ tmax_f = tmax
278
+ break
279
+ return np.maximum(y - tmax_f, np.zeros(y.shape))
280
+
281
+ def _next_point(cur_val, grad, n):
282
+ proj_grad = grad - (np.sum(grad) / n)
283
+ tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
284
+ tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
285
+
286
+ skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
287
+ t = 1
288
+ if len(tm1[tm1 > 1e-7]) > 0:
289
+ t = np.min(tm1[tm1 > 1e-7])
290
+ if len(tm2[tm2 > 1e-7]) > 0:
291
+ t = min(t, np.min(tm2[tm2 > 1e-7]))
292
+
293
+ next_point = proj_grad * t + cur_val
294
+ next_point = MinNormSolver._projection2simplex(next_point)
295
+ return next_point
296
+
297
+ def find_min_norm_element(vecs):
298
+ """
299
+ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
300
+ as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
301
+ It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
302
+ Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
303
+ """
304
+ # Solution lying at the combination of two points
305
+ dps = {}
306
+ init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
307
+
308
+ n = len(vecs)
309
+ sol_vec = np.zeros(n)
310
+ sol_vec[init_sol[0][0]] = init_sol[1]
311
+ sol_vec[init_sol[0][1]] = 1 - init_sol[1]
312
+
313
+ if n < 3:
314
+ # This is optimal for n=2, so return the solution
315
+ return sol_vec, init_sol[2]
316
+
317
+ iter_count = 0
318
+
319
+ grad_mat = np.zeros((n, n))
320
+ for i in range(n):
321
+ for j in range(n):
322
+ grad_mat[i, j] = dps[(i, j)]
323
+
324
+ while iter_count < MinNormSolver.MAX_ITER:
325
+ grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
326
+ new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
327
+ # Re-compute the inner products for line search
328
+ v1v1 = 0.0
329
+ v1v2 = 0.0
330
+ v2v2 = 0.0
331
+ for i in range(n):
332
+ for j in range(n):
333
+ v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
334
+ v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
335
+ v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
336
+ nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
337
+ new_sol_vec = nc * sol_vec + (1 - nc) * new_point
338
+ change = new_sol_vec - sol_vec
339
+ if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
340
+ return sol_vec, nd
341
+ sol_vec = new_sol_vec
342
+ return sol_vec, nd
343
+
344
+ def find_min_norm_element_FW(vecs):
345
+ """
346
+ Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
347
+ as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
348
+ It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
349
+ Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
350
+ """
351
+ # Solution lying at the combination of two points
352
+ dps = {}
353
+ init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
354
+
355
+ n = len(vecs)
356
+ sol_vec = np.zeros(n)
357
+ sol_vec[init_sol[0][0]] = init_sol[1]
358
+ sol_vec[init_sol[0][1]] = 1 - init_sol[1]
359
+
360
+ if n < 3:
361
+ # This is optimal for n=2, so return the solution
362
+ return sol_vec, init_sol[2]
363
+
364
+ iter_count = 0
365
+
366
+ grad_mat = np.zeros((n, n))
367
+ for i in range(n):
368
+ for j in range(n):
369
+ grad_mat[i, j] = dps[(i, j)]
370
+
371
+ while iter_count < MinNormSolver.MAX_ITER:
372
+ t_iter = np.argmin(np.dot(grad_mat, sol_vec))
373
+
374
+ v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
375
+ v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
376
+ v2v2 = grad_mat[t_iter, t_iter]
377
+
378
+ nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
379
+ new_sol_vec = nc * sol_vec
380
+ new_sol_vec[t_iter] += 1 - nc
381
+
382
+ change = new_sol_vec - sol_vec
383
+ if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
384
+ return sol_vec, nd
385
+ sol_vec = new_sol_vec
386
+ return sol_vec, nd
bike_bench_internal/benchmark_models/libmoon/solver/gradient/moosvgd.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .mgda_core import solve_mgda
2
+ from .base_solver import GradBaseSolver
3
+
4
+ import torch
5
+ import math
6
+ from torch.autograd import Variable
7
+ from torch.optim import SGD
8
+ import sys
9
+ from ...util_global.constant import solution_eps
10
+ from tqdm import tqdm
11
+
12
+ def kernel_functional_rbf(losses):
13
+ '''
14
+ input losses: (n_prob, n_obj)
15
+ output kernel_matrix: (n_prob, n_prob)
16
+ comments: This function is used to compute the kernel matrix for SVGD.
17
+ '''
18
+ n = losses.shape[0]
19
+ # losses shape : (10,) * (3,)
20
+ pairwise_distance = torch.norm(losses[:, None] - losses, dim=2).pow(2)
21
+ h = median(pairwise_distance) / math.log(n)
22
+ A = 5e-6 # Noted, this bandwith parameter is important.
23
+ kernel_matrix = torch.exp(-pairwise_distance / A*h) # 5e-6 for zdt1,2,3, zxy, Dec 5, 2023
24
+ return kernel_matrix
25
+
26
+ def median(tensor):
27
+ """
28
+ torch.median() acts differently from np.median(). We want to simulate numpy implementation.
29
+ """
30
+ tensor = tensor.detach().flatten()
31
+ tensor_max = tensor.max()[None]
32
+ return (torch.cat((tensor, tensor_max)).median() + tensor.median()) / 2.
33
+
34
+ def get_svgd_gradient(G, inputs, losses):
35
+ '''
36
+ :param G.shape: (n_prob, n_obj, n_var)
37
+ :param inputs.shape: (n_prob, n_var)
38
+ :param losses.shape: (n_prob, n_obj)
39
+ :return:
40
+ '''
41
+ n_prob = inputs.size(0)
42
+ # G shape (n_prob, n_obj, n_var)
43
+ g_w = [0] * n_prob
44
+
45
+ for idx in range(n_prob):
46
+ g_w[idx] = torch.Tensor( solve_mgda(G[idx], return_coeff=False) )
47
+
48
+ g_w = torch.stack(g_w) # (n_prob, n_var)
49
+ # See https://github.com/activatedgeek/svgd/issues/1#issuecomment-649235844 for why there is a factor -0.5
50
+
51
+ kernel = kernel_functional_rbf(losses)
52
+ kernel_grad = -0.5 * torch.autograd.grad(kernel.sum(), inputs, allow_unused=True)[0] # (n_prob, n_var)
53
+ gradient = (kernel.mm(g_w) - kernel_grad) / n_prob
54
+
55
+ return gradient
56
+
57
+
58
+
59
+ class MOOSVGDSolver(GradBaseSolver):
60
+ def __init__(self, step_size, max_iter, tol):
61
+ super().__init__(step_size, max_iter, tol)
62
+
63
+ def solve(self, problem, x, prefs, args, ref_point):
64
+ x = Variable(x, requires_grad=True)
65
+ optimizer = SGD([x], lr=self.step_size)
66
+ for i in tqdm(range(self.max_iter)):
67
+ y = problem.evaluate(x)
68
+ grad_arr = [0] * args.n_prob
69
+ for prob_idx in range(args.n_prob):
70
+ grad_arr[prob_idx] = [0] * args.n_obj
71
+ for obj_idx in range(args.n_obj):
72
+ y[prob_idx][obj_idx].backward(retain_graph=True)
73
+ grad_arr[prob_idx][obj_idx] = x.grad[prob_idx].clone()
74
+ x.grad.zero_()
75
+ grad_arr[prob_idx] = torch.stack(grad_arr[prob_idx])
76
+
77
+ grad_arr = torch.stack(grad_arr).detach()
78
+ gw = get_svgd_gradient(grad_arr, x, y)
79
+ optimizer.zero_grad()
80
+ x.grad = gw
81
+ optimizer.step()
82
+
83
+ if 'lbound' in dir(problem):
84
+ x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps, torch.Tensor(problem.ubound) - solution_eps)
85
+
86
+ res={}
87
+ res['x'] = x.detach().numpy()
88
+ res['y'] = y.detach().numpy()
89
+ res['hv_arr'] = [0]
90
+ return res
91
+
92
+
93
+
94
+ if __name__ == '__main__':
95
+ losses = torch.rand(10, 3)
96
+ kernel = kernel_functional_rbf(losses)
97
+
98
+
99
+
100
+
101
+
bike_bench_internal/benchmark_models/libmoon/solver/gradient/pmgda.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_solver import GradBaseSolver
2
+
3
+
4
+
5
+ class PMGDASolver(GradBaseSolver):
6
+ def __init__(self, step_size, max_iter, tol):
7
+ print('pmgda solver')
8
+ print()
9
+
10
+ super().__init__(step_size, max_iter, tol)
11
+
12
+ def solve(self, problem, x, prefs, args):
13
+ print()
14
+
bike_bench_internal/benchmark_models/libmoon/solver/gradient/pmtl.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from .base_solver import GradBaseSolver
4
+ from matplotlib import pyplot as plt
5
+ from .min_norm_solvers_numpy import MinNormSolver
6
+ import numpy as np
7
+
8
+
9
+
10
+ from torch.autograd import Variable
11
+ from tqdm import tqdm
12
+ from ...util_global.constant import solution_eps
13
+
14
+ from .mgda_core import solve_mgda
15
+
16
+
17
+
18
+
19
+ def get_d_moomtl(grads):
20
+ """
21
+ calculate the gradient direction for MOO-MTL
22
+ """
23
+ nobj, dim = grads.shape
24
+ sol, nd = MinNormSolver.find_min_norm_element(grads)
25
+ return sol
26
+
27
+
28
+ def get_d_paretomtl(grads, value, weights, i):
29
+ # calculate the gradient direction for Pareto MTL
30
+ nobj, dim = grads.shape
31
+
32
+ # check active constraints
33
+ normalized_current_weight = weights[i] / np.linalg.norm(weights[i])
34
+ normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis=1,
35
+ keepdims=True)
36
+ # shape: (9, 2)
37
+ w = normalized_rest_weights - normalized_current_weight
38
+ # solve QP
39
+ gx = np.dot(w, value / np.linalg.norm(value))
40
+ idx = gx > 0
41
+ vec = np.concatenate((grads, np.dot(w[idx], grads)), axis=0)
42
+ # use MinNormSolver to solve QP
43
+ # vec.shape:
44
+
45
+ # sol, nd = MinNormSolver.find_min_norm_element( vec )
46
+ _, sol = solve_mgda( torch.Tensor(vec), return_coeff=True)
47
+
48
+ # reformulate ParetoMTL as linear scalarization method, return the weights
49
+ weight0 = sol[0] + np.sum(np.array([sol[j] * w[idx][j - 2, 0] for j in np.arange(2, 2 + np.sum(idx))]))
50
+ weight1 = sol[1] + np.sum(np.array([sol[j] * w[idx][j - 2, 1] for j in np.arange(2, 2 + np.sum(idx))]))
51
+ weight = np.stack([weight0, weight1])
52
+
53
+ return weight
54
+
55
+
56
+
57
+
58
+ def get_d_paretomtl_init(grads, value, weights, i):
59
+ # calculate the gradient direction for Pareto MTL initialization
60
+ nobj, dim = grads.shape
61
+
62
+ # check active constraints
63
+ normalized_current_weight = weights[i] / np.linalg.norm(weights[i])
64
+ normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis=1,
65
+ keepdims=True)
66
+ w = normalized_rest_weights - normalized_current_weight
67
+ gx = np.dot(w, value / np.linalg.norm(value))
68
+ idx = gx > 0
69
+
70
+
71
+ if np.sum(idx) <= 0:
72
+ return np.zeros(nobj)
73
+ if np.sum(idx) == 1:
74
+ sol = np.ones(1)
75
+ else:
76
+ vecs = np.dot(w[idx], grads)
77
+ _, sol = solve_mgda( torch.Tensor(vecs), return_coeff=True)
78
+ # print()
79
+
80
+ # calculate the weights
81
+ weight0 = np.sum(np.array([sol[j] * w[idx][j, 0] for j in np.arange(0, np.sum(idx))]))
82
+ weight1 = np.sum(np.array([sol[j] * w[idx][j, 1] for j in np.arange(0, np.sum(idx))]))
83
+ weight = np.stack([weight0, weight1])
84
+
85
+ return weight
86
+
87
+
88
+ def circle_points(r, n):
89
+ # generate evenly distributed preference vector
90
+ circles = []
91
+ for r, n in zip(r, n):
92
+ t = np.linspace(0, 0.5 * np.pi, n)
93
+ x = r * np.cos(t)
94
+ y = r * np.sin(t)
95
+ circles.append(np.c_[x, y])
96
+ return circles
97
+
98
+
99
+
100
+
101
+ class PMTLSolver(GradBaseSolver):
102
+ def __init__(self, step_size, max_iter, tol):
103
+ super().__init__(step_size, max_iter, tol)
104
+
105
+ def solve(self, problem, x, prefs, args, ref_point):
106
+ if args.n_obj != 2:
107
+ assert False, 'hvgrad only supports 2 obj problem'
108
+
109
+ x = Variable(x, requires_grad=True)
110
+ warmup_iter = self.max_iter // 5
111
+ optimizer = torch.optim.SGD([x], lr=self.step_size)
112
+
113
+ y_arr = []
114
+ for iter_idx in tqdm( range(self.max_iter) ):
115
+ y = problem.evaluate(x)
116
+ y_np = y.detach().numpy()
117
+ y_arr.append(y_np)
118
+
119
+ grad_arr = [0] * args.n_prob
120
+ for prob_idx in range(args.n_prob):
121
+ grad_arr[prob_idx] = [0] * args.n_obj
122
+ for obj_idx in range(args.n_obj):
123
+ y[prob_idx][obj_idx].backward(retain_graph=True)
124
+ grad_arr[prob_idx][obj_idx] = x.grad[prob_idx].clone()
125
+ x.grad.zero_()
126
+ grad_arr[prob_idx] = torch.stack(grad_arr[prob_idx])
127
+
128
+ grad_arr = torch.stack(grad_arr)
129
+ grad_arr_np = grad_arr.detach().numpy()
130
+ if iter_idx < warmup_iter:
131
+ weights = [ get_d_paretomtl_init(grad_arr_np[i], y_np[i], prefs, i) for i in range(args.n_prob) ]
132
+ else:
133
+ weights = [ get_d_paretomtl(grad_arr_np[i], y_np[i], prefs, i) for i in range(args.n_prob) ]
134
+
135
+ optimizer.zero_grad()
136
+ torch.sum(torch.tensor(weights) * y).backward()
137
+ optimizer.step()
138
+
139
+ if 'lbound' in dir(problem):
140
+ x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps, torch.Tensor(problem.ubound) - solution_eps)
141
+
142
+ res={}
143
+ res['x'] = x.detach().numpy()
144
+ res['y'] = y.detach().numpy()
145
+ res['hv_arr'] = [0]
146
+ res['y_arr'] = y_arr
147
+ return res
bike_bench_internal/benchmark_models/libmoon/solver/gradient/run/__init__.py ADDED
File without changes
bike_bench_internal/benchmark_models/libmoon/solver/gradient/run/run_grad.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import sys
4
+ print(f"vscode current run path is {os.getcwd()}")
5
+ os.chdir(sys.path[0])
6
+ print(f"set py path as current path ")
7
+ print(f"vscode current run path is {os.getcwd()}")
8
+
9
+ from libmoon.solver.gradient import MGDASolver, GradAggSolver, EPOSolver, MOOSVGDSolver, GradHVSolver, PMTLSolver
10
+ from libmoon.util_global.weight_factor.funs import uniform_pref
11
+ from libmoon.util_global.constant import problem_dict
12
+ from libmoon.visulization.view_res import vis_res, vedio_res
13
+ import argparse
14
+ import torch
15
+ from matplotlib import pyplot as plt
16
+ import pickle
17
+ import time
18
+
19
+
20
+
21
+ if __name__ == '__main__':
22
+ parser = argparse.ArgumentParser( description= 'example script' )
23
+ parser.add_argument( '--n-partition', type=int, default=10 )
24
+ parser.add_argument( '--agg', type=str, default='tche') # If solve is agg, then choose a specific agg method.
25
+ parser.add_argument('--solver', type=str, default='hvgrad')
26
+ # ['agg', 'epo', 'moosvgd', 'hvgrad', 'pmtl', 'mgda']
27
+ parser.add_argument( '--problem-name', type=str, default='VLMOP2')
28
+ parser.add_argument('--iter', type=int, default=2000)
29
+ parser.add_argument('--step-size', type=float, default=0.1)
30
+ parser.add_argument('--tol', type=float, default=1e-6)
31
+ parser.add_argument('--plt-pref-flag', type=str, default='N')
32
+
33
+ args = parser.parse_args()
34
+ problem = problem_dict[args.problem_name]
35
+ args.n_obj, args.n_var = problem.n_obj, problem.n_var
36
+ root_name = os.path.dirname(os.path.dirname(__file__))
37
+
38
+
39
+ if args.solver == 'mgda':
40
+ solver = MGDASolver(args.step_size, args.iter, args.tol)
41
+ elif args.solver == 'agg':
42
+ solver = GradAggSolver(args.step_size, args.iter, args.tol)
43
+ elif args.solver == 'epo':
44
+ solver = EPOSolver(args.step_size, args.iter, args.tol)
45
+ elif args.solver == 'moosvgd':
46
+ solver = MOOSVGDSolver(args.step_size, args.iter, args.tol)
47
+ elif args.solver == 'hvgrad':
48
+ solver = GradHVSolver(args.step_size, args.iter, args.tol)
49
+ elif args.solver == 'pmtl':
50
+ solver = PMTLSolver(args.step_size, args.iter, args.tol)
51
+ elif args.solver=='pmgda':
52
+ assert False, 'will be implemented soon'
53
+ else:
54
+ raise Exception('solver not supported')
55
+
56
+ if args.solver == 'agg':
57
+ args.folder_name = os.path.join(root_name, 'output', args.problem_name, '{}_{}'.format(args.solver, args.agg))
58
+ else:
59
+ args.folder_name = os.path.join(root_name, 'output', args.problem_name, args.solver)
60
+
61
+ os.makedirs(args.folder_name, exist_ok=True)
62
+ prefs = uniform_pref( args.n_partition, problem.n_obj, clip_eps=1e-2)
63
+
64
+ args.n_prob = len(prefs)
65
+ if 'lbound' in dir(problem):
66
+ if args.problem_name == 'VLMOP1':
67
+ x0 = torch.rand(args.n_prob, problem.n_var) * 2 / np.sqrt(problem.n_var) - 1 / np.sqrt(problem.n_var)
68
+ else:
69
+ x0 = torch.rand(args.n_prob, problem.n_var)
70
+ else:
71
+ x0 = torch.rand( args.n_prob, problem.n_var ) * 20 - 10
72
+
73
+
74
+ ts = time.time()
75
+ res = solver.solve( problem, x=x0, prefs=prefs, args=args)
76
+
77
+ elapsed = time.time() - ts
78
+ res['elapsed'] = elapsed
79
+
80
+
81
+ use_fig=False
82
+ if use_fig:
83
+ vis_res(res, problem, prefs, args)
84
+ fig_name = os.path.join(args.folder_name, 'res.svg')
85
+ plt.savefig(fig_name)
86
+ print('Save fig to %s' % fig_name)
87
+ plt.show()
88
+
89
+
90
+ use_vedio=True
91
+ if use_vedio:
92
+ vedio_res(res, problem, prefs, args)
93
+
94
+
95
+ pickle_name = os.path.join(args.folder_name, 'res.pkl')
96
+ with open(pickle_name, 'wb') as f:
97
+ pickle.dump(res, f)
98
+
99
+ print('Save pickle to %s' % pickle_name)
bike_bench_internal/benchmark_models/libmoon/solver/gradient/utils/__init__.py ADDED
File without changes