-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_pairwise.py
executable file
·314 lines (252 loc) · 9.65 KB
/
run_pairwise.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
#!/usr/bin/env python
#
# BBSA Auto-benchmark
import logging
logformat = '%(asctime)s:%(levelname)s:%(message)s'
loglevel = logging.WARNING
logging.basicConfig(filename='pairlog.log', format=logformat, level=loglevel)
import sys
import json
from urllib2 import urlopen
import beanstalkc
from itertools import product
from utility import statistics
from representations.bit_string import bit_string
from fitness_functions import fitness_nk_landscape as nk
from search_algorithms.mu_lambda_ea import make_EA_solver as make_EA
from search_algorithms.simulated_annealing import make_SA_solver as make_SA
from search_algorithms.climb_hill import make_climb_hill_solver as make_CH
from search_algorithms.random_search import make_random_search_solver as make_RA
from selectors import make_LR_SUS
distributed = False
stalk = None
genome_length = 32
ea_mu = 100
ea_lam = 10
inner_runs = 10
inner_max_evals = 10000
fit_mu = 30
fit_lam = 6
outer_max_evals = 500
output_file = "pairwinners.txt"
if len(sys.argv) == 2:
output_file = sys.argv[1]
print "using output file", output_file
def initial_fits():
'''initial population of fitnesses'''
return [nk(n=genome_length) for i in xrange(fit_mu)]
def random_solution_maker():
'''for single-solution algorithms'''
starter = bit_string()
return starter.get_random(genome_length)
def population_maker():
'''for population-based algorithms'''
return [random_solution_maker() for i in xrange(ea_mu)]
def inner_wrapped_make_EA(evals, fitness):
'''Adapter design pattern'''
return make_EA(make_initial_population=population_maker,
survival_selector=make_LR_SUS(fitness=fitness, s=2.0, n=ea_mu),
parent_selector=make_LR_SUS(fitness=fitness, s=2.0, n=ea_lam),
fitness=fitness, return_best=True)
inner_wrapped_make_EA.name = 'EA'
def inner_wrapped_make_SA(evals, fitness):
'''Adapter design pattern'''
return make_SA(evals=evals,
initial_solution_maker=random_solution_maker,
fitness=fitness)
inner_wrapped_make_SA.name = 'SA'
def inner_wrapped_make_CH(evals, fitness):
'''Adapter design pattern'''
return make_CH(evals=evals,
initial_solution_maker=random_solution_maker,
fitness=fitness)
inner_wrapped_make_CH.name = 'CH'
def inner_wrapped_make_RA(evals, fitness):
'''Adapter design pattern'''
return make_RA(evals=evals,
initial_solution_maker=random_solution_maker,
fitness=fitness)
inner_wrapped_make_RA.name = 'RA'
maker_names = {'RA': inner_wrapped_make_RA,
'CH': inner_wrapped_make_CH,
'SA': inner_wrapped_make_SA,
'EA': inner_wrapped_make_EA}
worst_ever = None
def normalize(s, key=lambda x: x):
global worst_ever
maxfit = max(s, key=key)
# minfit = min(s, key=key)
minfit = worst_ever
scale = float(maxfit - minfit)
result = [(x - minfit) / scale for x in s]
return result
def normalize2(s, upper, lower):
scale = float(upper - lower)
return [(x - lower) / scale for x in s]
def get_performance(fitness_function, search_maker,
n=inner_runs, evals=inner_max_evals):
'''run search against fitness_function n times, allowing evals
fitness evaluations per run. return a list corresponding to the
fitness value of the best solution found per run'''
global worst_ever
search = search_maker(evals=evals, fitness=fitness_function)
# results = [search() for _ in xrange(n)]
results = list()
for _ in xrange(n):
results.append(search())
worst = search.func_globals['worst_ever']
print "worst individual:", worst
if worst_ever is None or worst < worst_ever:
print "new worst ever!"
worst_ever = worst
result_fits = [fitness_function(x) for x in results]
print "resulting fits:", result_fits
return statistics(result_fits)
def fit_fit(fitness_function, makers, index):
'''index is the index of the algorithm that is supposed to win'''
global worst_ever
if not hasattr(fitness_function, 'fitness'):
worst_ever = None
performs = [get_performance(fitness_function, sa)['mean']
for sa in makers]
performs = normalize(performs)
print "performs:", performs
diffs = [performs[index] - x
for x in performs[:index] + performs[index + 1:]]
print "diffs:", diffs
fitness_function.fitness = min(diffs)
print "fitness of this NK landscape:", fitness_function.fitness
return fitness_function.fitness
def distributed_fit_fit(fitness_function, makers, index):
global stalk
global worst_ever
if not hasattr(fitness_function, 'fitness'):
worst_ever = None
best_ever = None
stow_nk_landscape(fitness_function)
nkID = fitness_function.nkID
jobID = 0
# make a job request for each run that needs to happen
for maker in makers:
for _ in xrange(inner_runs):
req = {'jobID': jobID,
'nkID': nkID,
'bbsa': maker.name}
stalk.put(json.dumps(req))
jobID += 1
results = {name: list() for name in maker_names.keys()}
for _ in makers:
for _ in xrange(inner_runs):
job = stalk.reserve()
result = json.loads(job.body)
job.delete()
results[result['bbsa']].append(result['best'])
if best_ever is None or result['best'] > best_ever:
best_ever = result['best']
if worst_ever is None or result['worst'] < worst_ever:
worst_ever = result['worst']
performs = list()
for name in [x.name for x in makers]:
performs.append(statistics(results[name])['mean'])
kept = performs
performs = normalize2(performs, best_ever, worst_ever)
print index, kept, performs
diffs = [performs[index] - x
for x in performs[:index] + performs[index + 1:]]
fitness_function.fitness = min(diffs)
print "fitness of", fitness_function.nkID, fitness_function.fitness
return fitness_function.fitness
def do_eet(index, makers):
global distributed
if distributed:
outer_fit = lambda x: distributed_fit_fit(x, makers, index)
else:
outer_fit = lambda x: fit_fit(x, makers, index)
selector = lambda x: make_LR_SUS(fitness=outer_fit, s=2.0, n=x)
outer_ea = make_EA(
make_initial_population=initial_fits,
survival_selector=selector(fit_mu),
parent_selector=selector(fit_lam),
make_initial_state=lambda: {'evals': 0,
'max_evals': outer_max_evals,
'solverID': makers[index].name,
'loser': makers[1].name},
fitness=outer_fit,
noise=True, return_best=True)
return outer_ea()
def for_a_size(x):
global genome_length
genome_length = x
makers = [inner_wrapped_make_EA,
inner_wrapped_make_SA,
inner_wrapped_make_CH,
inner_wrapped_make_RA]
f = open(output_file, 'a')
for pair in product(makers, makers):
if pair[0].name != pair[1].name:
best = do_eet(0, pair)
f.write("-----------------------------------------\n")
f.write("Best NK-Landscape for %s over %s\n" %
tuple([x.name for x in pair]))
f.write("Fitness: %f\n" % best.fitness)
f.write("%s\n" % best)
f.write("%s\n" % best.neighborses)
f.write("%s\n" % best.subfuncs)
f.flush()
f.close()
def main():
for_a_size(32)
def fetch_nk_landscape(nkID):
template = 'http://r10mannr4.device.mst.edu:8080/bbsa/%i'
f = urlopen(template % nkID)
pickled = f.read()
f.close()
landscape = nk()
landscape.loads(pickled)
landscape.nkID = nkID
return landscape
def stow_nk_landscape(landscape):
template = "/usr/share/nginx/www/bbsa/%i"
f = open(template % landscape.nkID, 'w')
f.write(landscape.dumps())
f.close()
def client():
print "entering client mode!"
stalk = beanstalkc.Connection(host="r10mannr4.device.mst.edu")
stalk.watch('bbsa-job-requests')
stalk.use('bbsa-job-results')
landscapes = dict()
while True:
job = stalk.reserve()
data = json.loads(job.body)
print "processing job", data['jobID']
if data['nkID'] not in landscapes:
landscapes[data['nkID']] = fetch_nk_landscape(data['nkID'])
fitness = landscapes[data['nkID']]
search = maker_names[data['bbsa']](evals=inner_max_evals,
fitness=fitness)
data['best'] = fitness(search())
data['worst'] = search.func_globals['worst_ever']
print data
stalk.put(json.dumps(data))
job.delete()
if len(sys.argv) == 5: # all cows eat grass
client()
if len(sys.argv) == 6: # good boys do fine always
distributed = True
stalk = beanstalkc.Connection(host="r10mannr4.device.mst.edu")
stalk.watch('bbsa-job-requests')
while stalk.stats_tube('bbsa-job-requests')['current-jobs-ready'] > 0:
job = stalk.reserve()
job.delete()
stalk.ignore('bbsa-job-requests')
stalk.watch('bbsa-job-results')
while stalk.stats_tube('bbsa-job-results')['current-jobs-ready'] > 0:
job = stalk.reserve()
job.delete()
stalk.use('bbsa-job-requests')
main()
if __name__ == "__main__":
main()
# import cProfile
# cProfile.run('main()', 'profiling.prof')