-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathreviewOptimiserRuns.py
116 lines (110 loc) · 5.71 KB
/
reviewOptimiserRuns.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import ionbench
import numpy as np
import importlib
import csv
import os
import pandas
import re
from ionbench.utils.results import expected_time
bms = [ionbench.problems.staircase.HH(), ionbench.problems.staircase.MM(), ionbench.problems.loewe2016.IKr(), ionbench.problems.loewe2016.IKur(), ionbench.problems.moreno2016.INa()]
for bm in bms:
bm.plotter = False
# Find optimiser short name
bmShortName = bm.NAME.split('.')[1].lower()
# Find out how many runs were attempted
maxRuns = 0
for app in ionbench.APP_UNIQUE:
i = 0
try:
while True:
bm.tracker.load(f"{bmShortName}_{app['module']}modNum{app['modNum']}_run{i}.pickle")
i += 1
except FileNotFoundError as e:
maxRuns = max(maxRuns, i)
print(f"{maxRuns} runs were attempted.")
allData = []
# Loop through all unique approaches
for app in ionbench.APP_UNIQUE:
# Print the approach and modification
print('---------------')
optimiserName = app['module'].split('.')[-1]
mod = importlib.import_module(app['module']).get_modification(app['modNum'])
modName = mod.NAME
# Output data
data = {'Optimiser Name': optimiserName, 'Mod Name': modName}
print(f'Collating results for approach: {optimiserName}, modification: {modName}')
try:
bm.tracker.load(f"{bmShortName}_{app['module']}modNum{app['modNum']}_run{maxRuns-1}.pickle")
except FileNotFoundError as e:
print('Not all tracking files were found. Filling data with nans.')
# Not all tracking files were found
allData.append(data)
continue
for runNum in range(maxRuns):
# For each run, load the tracking file and extract the data
bm.tracker.load(f"{bmShortName}_{app['module']}modNum{app['modNum']}_run{runNum}.pickle")
#bm.evaluate()
# Get data at convergence
i = bm.tracker.when_converged(bm.COST_THRESHOLD)
a, b = bm.tracker.total_solve_time(i)
data[f'Run {runNum} - Cost Time'] = a
data[f'Run {runNum} - Grad Time'] = b
i = -1 if i is None else i
try:
data[f'Run {runNum} - Successful'] = bm.tracker.bestCosts[i] < bm.COST_THRESHOLD
except IndexError:
data[f'Run {runNum} - Successful'] = False
try:
data[f'Run {runNum} - Cost Evals'] = bm.tracker.costSolves[i]
except IndexError:
data[f'Run {runNum} - Cost Evals'] = 0
try:
data[f'Run {runNum} - Grad Evals'] = bm.tracker.gradSolves[i]
except IndexError:
data[f'Run {runNum} - Grad Evals'] = 0
try:
data[f'Run {runNum} - Cost'] = bm.tracker.bestCosts[i]
except IndexError:
data[f'Run {runNum} - Cost'] = np.inf
bm.reset()
# Calculate the success rate
successOrFail = [data[f'Run {i} - Successful'] for i in range(maxRuns)]
data['Success Rate'] = np.mean(successOrFail)
# If at least one run succeeded
data['Tier'] = 1 if np.any(successOrFail) else 2
costTime = [data[f'Run {i} - Cost Time'] for i in range(maxRuns)]
costEvals = [data[f'Run {i} - Cost Evals'] for i in range(maxRuns)]
gradTime = [data[f'Run {i} - Grad Time'] for i in range(maxRuns)]
gradEvals = [data[f'Run {i} - Grad Evals'] for i in range(maxRuns)]
# Calculate average time per successful and failed run for cost and grad
data['ERT - Time'] = expected_time(costTime, successOrFail) + expected_time(gradTime, successOrFail)
data['ERT - Cost Evals'] = expected_time(costEvals, successOrFail)
data['ERT - Grad Evals'] = expected_time(gradEvals, successOrFail)
if data['Tier'] == 1:
print(f'There were successes. Success rate: {data["Success Rate"]}')
else:
print(f'There were no successes.')
data['Expected Cost'] = np.mean([data[f'Run {i} - Cost'] for i in range(maxRuns)])
data['Success Count'] = np.sum([data[f'Run {i} - Successful'] for i in range(maxRuns)])
data['Failure Count'] = maxRuns - data['Success Count']
data['Average Runtime'] = np.mean([data[f'Run {i} - Cost Time'] + data[f'Run {i} - Grad Time'] for i in range(maxRuns)])
allData.append(data)
df = pandas.DataFrame.from_records(allData)
# Produce summary information
costEvals = np.sum([df[f'Run {i} - Cost Evals'].sum() for i in range(maxRuns)])
costTime = np.sum([df[f'Run {i} - Cost Time'].sum() for i in range(maxRuns)])
gradEvals = np.sum([df[f'Run {i} - Grad Evals'].sum() for i in range(maxRuns)])
gradTime = np.sum([df[f'Run {i} - Grad Time'].sum() for i in range(maxRuns)])
gradToCost = (gradTime/gradEvals)/(costTime/costEvals)
if gradToCost > bm.n_parameters()+1:
gradToCost = bm.n_parameters()+1
summary = df[['Optimiser Name', 'Mod Name', 'Tier', 'Success Rate', 'Success Count', 'Failure Count', 'Average Runtime']]
summary['Time Ratio'] = gradToCost
summary.loc[summary['Optimiser Name'] == 'SPSA_Spall1998', 'Time Ratio'] = 2
summary['ERT - Evals'] = df['ERT - Cost Evals'] + summary['Time Ratio'] * df['ERT - Grad Evals']
df['ERT - Evals'] = summary['ERT - Evals']
summary = summary.sort_values(['Tier', 'ERT - Evals', 'Average Runtime'])
summary.to_csv(f'resultsSummary-{bmShortName}.csv', index=False, na_rep='NA')
df = df.sort_values(['Tier', 'ERT - Evals', 'Average Runtime'])
df.to_csv(f'resultsFile-{bmShortName}.csv', index=False, na_rep='NA')
print(f'Average cost time: {costTime/costEvals}, Average grad time: {gradTime/gradEvals}, Ratio: {gradToCost}')