#return outputval
        return tuple


#define the search space.
objective = obj_func('./all-cnn_bi_msphere.py')

real_space = ContinuousSpace([0.0, 4.0], 'real_space') * 5
integer_space = OrdinalSpace([0, 4], 'integer_space') * 5
discrete_space = NominalSpace(['0', '1', '2', '3', '4'], 'discrete_space') * 5

search_space = real_space * integer_space * discrete_space

print('starting program...')
#available_gpus = gp.getAvailable(limit=2)
available_gpus = gp.getAvailable(limit=5)
#try:
#available_gpus.remove(0)#CHRIS gpu 0 and 5 are differen gpu types on duranium since they are faster, timing will be unreliable, so remove them from list
#except:
#pass
#try:
#available_gpus.remove(5)
#except:
#pass
print(available_gpus)

n_job = max(min(5, len(available_gpus)), 1)

# use random forest as the surrogate model
#CHRIS two surrogate models are needed
model = RandomForest(levels=search_space.levels, n_estimators=100)
예제 #2
0
    def _eval_gpu(self, x, gpu=0, runs=1):
        """
        evaluate one solution
        """
        with open(self.save_name + '_thread_log.json', 'a') as outfile:
            outfile.write('thread ' + str(gpu) + ': step 3 gpu 1\n')
        # TODO: sometimes the obj_func take a dictionary as input...
        time_,loss_, n_eval = x.time,x.loss, x.n_eval
        with open(self.save_name + '_thread_log.json', 'a') as outfile:
            outfile.write('thread ' + str(gpu) + ': step 3 gpu 2\n')
        # try:
            # ans = [self.obj_func(x.tolist()) for i in range(runs)]
        # except:
        #ans = [self.obj_func(x.to_dict(), gpu_no=gpu) for i in range(runs)]
        gpu_patch = gpu
        while True:
            with open(self.save_name + '_thread_log.json', 'a') as outfile:
                outfile.write('thread ' + str(gpu) + ': step 3 gpu 3\n')
            ans = self.obj_func(x.to_dict(), gpu_no=gpu_patch,eval_epochs=self.eval_epochs,save_name=self.save_name,data_augmentation=self.data_augmentation,use_validation=self.use_validation)
            with open(self.save_name + '_thread_log.json', 'a') as outfile:
                outfile.write('thread ' + str(gpu) + ': step 3 gpu 4\n')
            print("n_left,max_iter:")
            print(self.n_left,self.max_iter)
            print('_eval_gpu():')
            print(ans)
            time_ans,loss_ans,success= ans[0],ans[1],ans[2]
            with open(self.save_name + '_thread_log.json', 'a') as outfile:
                outfile.write('thread ' + str(gpu) + ': step 3 gpu 5\n')
            if success:
                with open(self.save_name + '_thread_log.json', 'a') as outfile:
                    outfile.write('thread ' + str(gpu) + ': step 3 gpu 6a\n')
                break
            else:
                while True:
                    with open(self.save_name + '_thread_log.json', 'a') as outfile:
                        outfile.write('thread ' + str(gpu) + ': step 3 gpu 6b\n')
                    print('gpu ' + str(gpu_patch) + ' failed to give answer, searching for new gpu')
                    available_gpus_patch = gp.getAvailable(limit=5)
                    with open(self.save_name + '_thread_log.json', 'a') as outfile:
                        outfile.write('thread ' + str(gpu) + ': step 3 gpu 6b2\n')
                    for i in range(len(self.ignore_gpu)):
                        try:
                            available_gpus_patch.remove(self.ignore_gpu[i])
                        except:
                            pass
                    with open(self.save_name + '_thread_log.json', 'a') as outfile:
                        outfile.write('thread ' + str(gpu) + ': step 3 gpu 6b3\n')
                    if len(available_gpus_patch) > 0:
                        with open(self.save_name + '_thread_log.json', 'a') as outfile:
                            outfile.write('thread ' + str(gpu) + ': step 3 gpu 6b4a\n')
                        gpu_patch = available_gpus_patch[0]
                        break
                    else:
                        with open(self.save_name + '_thread_log.json', 'a') as outfile:
                            outfile.write('thread ' + str(gpu) + ': step 3 gpu 6b4b\n')
                        print('no gpus available, waiting 60 seconds')
                        time.sleep(60)

        with open(self.save_name + '_thread_log.json', 'a') as outfile:
            outfile.write('thread ' + str(gpu) + ': step 3 gpu 7\n')
        #TODO_CHRIS make this work when runs != 1
        #time_ans = []
        #loss_ans = []
        #for i in range(len(ans)):
        #    time_ans.append(ans[i][0])
        #    los_ans.append(ans[i][1])
        
        #TODO_CHRIS apply S-metric to all solutions to get fitness
        #so here take average of loss and time
        time_loc = np.sum(time_ans)
        loss_loc = np.sum(loss_ans)
        
        #fitness = np.sum(ans)#CHRIS removed, because fitness will be determined later

        x.n_eval += runs
        x.time = time_loc / runs if time_ is None else (time_ * n_eval + time_loc) / x.n_eval
        x.loss = loss_loc / runs if loss_ is None else (loss_ * n_eval + loss_loc) / x.n_eval

        #self.eval_count += runs#CHRIS no double counting
        self.eval_hist_loss += [loss_ans] #CHRIS added time and loss history
        self.eval_hist_time += [time_ans]
        self.eval_hist_id += [x.index] * runs

        with open(self.save_name + '_thread_log.json', 'a') as outfile:
            outfile.write('thread ' + str(gpu) + ': step 3 gpu 7\n')
        
        return x, runs, time_loc, loss_loc, [x.index] * runs