print("error in receiving answer from gpu " + str(gpu_no)) success = False try: tuple = (float(tuple_str1),float(tuple_str2),success) except: tuple = (0.0,0.0,False) #return outputval return tuple #define the search space. objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"] filters = OrdinalSpace([10, 600], 'filters') * 7 kernel_size = OrdinalSpace([1, 6], 'k') * 7 strides = OrdinalSpace([1, 5], 's') * 3 stack_sizes = OrdinalSpace([1, 5], 'stack') * 3 #TODO_CHRIS these changes are just for cigar test function #filters = OrdinalSpace([0, 5], 'filters') * 7 #kernel_size = OrdinalSpace([0, 5], 'k') * 7 #strides = OrdinalSpace([0, 5], 's') * 3 #stack_sizes = OrdinalSpace([0, 5], 'stack') * 3 #TODO_CHRIS these changes are just for cigar test function activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling
print("error in receiving answer from gpu " + str(gpu_no)) success = False try: tuple = (float(tuple_str1),float(tuple_str2),success) except: tuple = (0.0,0.0,False) #return outputval return tuple for it in range(10): np.random.seed(it) #define the search space. objective = obj_func('./all-cnn_bi_mbarrier.py') real_space = ContinuousSpace([0.0, 4.0],'real_space') * 5 integer_space = OrdinalSpace([0,4],'integer_space') * 5 discrete_space = NominalSpace(['0','1','2','3','4'],'discrete_space') * 5 search_space = real_space * integer_space * discrete_space print('starting program...') #available_gpus = gp.getAvailable(limit=2) available_gpus = gp.getAvailable(limit=5) #try: #available_gpus.remove(0)#CHRIS gpu 0 and 5 are differen gpu types on duranium since they are faster, timing will be unreliable, so remove them from list #except: #pass #try: #available_gpus.remove(5) #except:
def test_skippy(): from mipego.mipego import Solution #TODO remove this, only for testing from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace from keras.utils import plot_model #define the search space. #objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600] kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling #skippy parameters skints = OrdinalSpace([0, 2**50 - 1], 'skint') * 3 #CHRIS TODO tweak this skst = OrdinalSpace([2, 10], 'skst') * 3 dense_size = OrdinalSpace([0, 1200], 'dense_size') * 2 no_pooling = NominalSpace([True, False], "no_pooling") #skippy parameters drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 8 # drop_out rate lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2') # l2_regularizer search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skints * skst * dense_size * no_pooling n_init_sample = 1 samples = search_space.sampling(n_init_sample) print(samples) var_names = search_space.var_name.tolist() print(var_names) #a sample #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]] #test parameters #original parameters #RESnet-34-like stack_0 = 1 stack_1 = 6 stack_2 = 4 stack_3 = 4 stack_4 = 6 stack_5 = 6 stack_6 = 6 s_0 = 2 s_1 = 2 s_2 = 1 s_3 = 2 s_4 = 1 s_5 = 2 s_6 = 1 filters_0 = 64 * 2 filters_1 = 64 * 2 filters_2 = 64 * 2 filters_3 = 64 * 2 filters_4 = 128 * 2 filters_5 = 128 * 2 filters_6 = 128 * 2 filters_7 = 128 * 2 filters_8 = 256 * 2 filters_9 = 256 * 2 filters_10 = 256 * 2 filters_11 = 256 * 2 filters_12 = 512 * 2 filters_13 = 512 * 2 k_0 = 7 k_1 = 3 k_2 = 3 k_3 = 3 k_4 = 3 k_5 = 3 k_6 = 3 k_7 = 3 k_8 = 3 k_9 = 3 k_10 = 3 k_11 = 3 k_12 = 3 k_13 = 3 activation = 'relu' activ_dense = 'softmax' dropout_0 = 0.001 dropout_1 = 0.001 dropout_2 = 0.001 dropout_3 = 0.001 dropout_4 = 0.001 dropout_5 = 0.001 dropout_6 = 0.001 dropout_7 = 0.001 lr = 0.1 l2 = 0.0001 step = True global_pooling = True #skippy parameters om_en_om = 1 ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0] for w in range(len(ranges)): #TODO testcode: remove om_en_om = om_en_om << 1 for z in range(ranges[w] // 2): om_en_om = om_en_om << 2 om_en_om += 1 om_en_om = om_en_om << 1 skint_0 = inv_gray(om_en_om) #3826103921638#2**30-1 skint_1 = 0 #19283461627361826#2**30-1 skint_2 = 0 #473829102637452916#2**30-1 skst_0 = 2 skst_1 = 0 skst_2 = 0 dense_size_0 = 1000 * 2 dense_size_1 = 0 no_pooling = False #skippy parameters #assembling parameters samples = [[ stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, lr, l2, step, global_pooling, skint_0, skint_1, skint_2, skst_0, skst_1, skst_2, dense_size_0, dense_size_1, no_pooling ]] #var_names #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling'] X = [ Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples) ] vla = { 's_0': 3, 'l2': 4.4274387289657325e-05, 'filters_7': 423, 'dense_size_1': 992, 'filters_12': 295, 'stack_0': 0, 'filters_2': 53, 'global_pooling': True, 'dropout_6': 0.5606577615096975, 'filters_13': 115, 'filters_4': 396, 'stack_4': 0, 'k_9': 6, 'activation': 'tanh', 'dropout_1': 0.07267176147234225, 'filters_5': 405, 'filters_1': 250, 'k_7': 7, 'filters_3': 408, 'stack_2': 0, 'no_pooling': False, 'dropout_7': 0.12689965102483852, 's_2': 4, 'filters_8': 455, 'dropout_4': 0.8991002969243431, 'k_11': 5, 'skst_0': 7, 'k_4': 3, 'dropout_3': 0.5966691482903116, 'step': False, 'dense_size_0': 583, 'stack_1': 1, 'k_0': 3, 'skint_1': 505527202345094, 'k_1': 5, 'k_8': 1, 'stack_6': 0, 'lr': 0.6919959357016345, 'activ_dense': 'softmax', 'filters_6': 305, 's_1': 3, 'filters_9': 226, 's_4': 2, 'stack_3': 1, 'skst_1': 5, 'skst_2': 6, 'dropout_2': 0.039087410518674766, 'k_12': 4, 'k_3': 6, 'dropout_5': 0.46057411289276423, 'skint_0': 957176709324259, 'k_5': 4, 'k_2': 3, 's_3': 1, 'filters_0': 195, 'k_6': 1, 'k_13': 2, 'skint_2': 1098454353499063, 'filters_11': 107, 'filters_10': 257, 'k_10': 4, 'stack_5': 0, 's_6': 1, 's_5': 2, 'dropout_0': 0.6293343140822664 } print(X) print(X[0].to_dict()) #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)] test = True if test: #model = CNN_conf(X[0].to_dict(),test=test) model = CNN_conf(vla, test=test) plot_model(model, to_file='model_skippy_test.png', show_shapes=True, show_layer_names=True) model.summary() print(model.count_params()) print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb') else: timer, loss = CNN_conf(X[0].to_dict(), test=test) print('timer, loss:') print(timer, loss)
def obj_func(x, gpu_no=None): global eval_n print("Eval # " + str(eval_n) + " (Gpu " + str(gpu_no) + ")") eval_n += 1 x_r, x_i, x_d = np.array([x['C_0'], x['C_1']]), x['I'], x['N'] if x_d == 'OK': tmp = 0 else: tmp = 1 return np.sum(x_r**2.) + abs(x_i - 10) / 123. + tmp * 2. C = ContinuousSpace([-5, 5], 'C') * 2 I = OrdinalSpace([-100, 100], 'I') N = NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E'], 'N') search_space = C * I * N model = RandomForest(levels=search_space.levels) # model = RrandomForest(levels=search_space.levels, seed=1, max_features='sqrt') opt = mipego( search_space, obj_func, model, max_iter=n_step, random_seed=None, n_init_sample=n_init_sample, minimize=True,
def test_skippy(): from mipego.mipego import Solution #TODO remove this, only for testing from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace from keras.utils import plot_model #define the search space. #objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"] filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600] kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling #skippy parameters skstart = OrdinalSpace([0, 50], 'skstart') * 5 skstep = OrdinalSpace([1, 50], 'skstep') * 5 max_pooling = NominalSpace([True, False], "max_pooling") dense_size = OrdinalSpace([0,2000],'dense_size')*2 #skippy parameters drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10 # drop_out rate lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')# l2_regularizer search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size n_init_sample = 1 samples = search_space.sampling(n_init_sample) print(samples) var_names = search_space.var_name.tolist() print(var_names) #a sample #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]] #test parameters #original parameters #RESnet-34-like stack_0 = 1 stack_1 = 6 stack_2 = 4 stack_3 = 4 stack_4 = 6 stack_5 = 6 stack_6 = 6 s_0=2#1#2 s_1=2 s_2=1#1 s_3=2 s_4=1 s_5=2 s_6=1 filters_0=64 filters_1=64 filters_2=64 filters_3=64 filters_4=128 filters_5=128 filters_6=128 filters_7=128 filters_8=256 filters_9=256 filters_10=256 filters_11=256 filters_12=512 filters_13=512 k_0=7 k_1=1 k_2=3 k_3=1 k_4=3 k_5=1 k_6=3 k_7=1 k_8=3 k_9=1 k_10=3 k_11=1 k_12=3 k_13=1 activation='relu' activ_dense='softmax' dropout_0=0.001 dropout_1=0.001 dropout_2=0.001 dropout_3=0.001 dropout_4=0.001 dropout_5=0.001 dropout_6=0.001 dropout_7=0.001 dropout_8=0.001 dropout_9=0.001 lr=0.01 l2=0.0001 step=False#True global_pooling=True #skippy parameters om_en_om = 1 ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0] for w in range(len(ranges)):#TODO testcode: remove om_en_om = om_en_om << 1 for z in range(ranges[w]//2): om_en_om = om_en_om << 2 om_en_om += 1 om_en_om = om_en_om << 1 skstart_0 = 1#inv_gray(om_en_om)#3826103921638#2**30-1 skstart_1 = 1#19283461627361826#2**30-1 skstart_2 = 1#473829102637452916#2**30-1 skstart_3 = 1#473829102637452916#2**30-1 skstart_4 = 1#473829102637452916#2**30-1 skstep_0 = 2 skstep_1 = 1 skstep_2 = 1 skstep_3 = 1 skstep_4 = 1 max_pooling = True dense_size_0 = 1000 dense_size_1 = 0 #skippy parameters #assembling parameters samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]] #var_names #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling'] X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)] vla = {'s_2': 7, 'lr': 0.005478541674651396, 'skstep_2': 4, 'dropout_8': 0.5440199827441856, 'k_12': 15, 'activ_dense': 'softmax', 'stack_4': 3, 'k_5': 2, 'dropout_4': 0.24617655948523018, 's_3': 6, 'k_11': 13, 'filters_10': 84, 'dropout_0': 0.0639815161048702, 'k_7': 13, 'filters_9': 178, 'k_1': 13, 'dropout_6': 0.1752239013431692, 'filters_7': 353, 'skstep_4': 6, 'skstart_2': 0, 'stack_0': 0, 'stack_5': 1, 's_5': 2, 'k_13': 6, 'filters_2': 110, 'filters_0': 248, 'skstart_1': 5, 'filters_6': 341, 'filters_8': 165, 'skstart_4': 2, 'l2': 0.0012874308061650037, 's_0': 9, 'global_pooling': False, 'stack_6': 1, 's_1': 2, 'skstep_0': 4, 'dropout_3': 0.495646008202597, 'skstart_0': 3, 'k_6': 2, 'filters_1': 61, 'dropout_2': 0.028121315386701783, 'stack_3': 2, 'filters_3': 299, 'stack_1': 3, 'max_pooling': True, 'filters_4': 259, 'filters_11': 207, 'k_3': 15, 'k_0': 15, 'dense_size_0': 1400, 'k_4': 10, 's_6': 5, 'dropout_9': 0.004273458743956573, 'skstep_3': 6, 'filters_5': 16, 's_4': 2, 'dropout_1': 0.42526328646019135, 'dense_size_1': 2990, 'k_10': 9, 'k_2': 4, 'skstep_1': 6, 'dropout_5': 0.3927105783290164, 'filters_12': 283, 'dropout_7': 0.01357058138235737, 'activation': 'selu', 'filters_13': 228, 'step': False, 'k_8': 2, 'k_9': 2, 'skstart_3': 1, 'stack_2': 3} print(X) print(X[0].to_dict()) #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)] test = False if test: #model = CNN_conf(X[0].to_dict(),test=test) model = CNN_conf(vla,test=test) plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True) model.summary() print(model.count_params()) print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb') else: #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1) timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1) print('timer, loss:') print(timer, loss)
def test_skippy(): from mipego.mipego import Solution #TODO remove this, only for testing from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace from keras.utils import plot_model with open('skippy_test_train_hist' + '_eval_train_hist.json', 'w') as f: f.write('') #define the search space. #objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600] kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling #skippy parameters skstart = OrdinalSpace([0, 50], 'skstart') * 5 skstep = OrdinalSpace([1, 50], 'skstep') * 5 max_pooling = NominalSpace([True, False], "max_pooling") dense_size = OrdinalSpace([0, 2000], 'dense_size') * 2 #skippy parameters drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10 # drop_out rate lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2') # l2_regularizer search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size n_init_sample = 1 samples = search_space.sampling(n_init_sample) print(samples) var_names = search_space.var_name.tolist() print(var_names) #a sample #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]] #test parameters #original parameters #RESnet-34-like stack_0 = 1 stack_1 = 6 stack_2 = 4 stack_3 = 4 stack_4 = 6 stack_5 = 6 stack_6 = 6 s_0 = 2 #1#2 s_1 = 2 s_2 = 1 #1 s_3 = 2 s_4 = 1 s_5 = 2 s_6 = 1 filters_0 = 64 filters_1 = 64 filters_2 = 64 filters_3 = 64 filters_4 = 128 filters_5 = 128 filters_6 = 128 filters_7 = 128 filters_8 = 256 filters_9 = 256 filters_10 = 256 filters_11 = 256 filters_12 = 512 filters_13 = 512 k_0 = 7 k_1 = 1 k_2 = 3 k_3 = 1 k_4 = 3 k_5 = 1 k_6 = 3 k_7 = 1 k_8 = 3 k_9 = 1 k_10 = 3 k_11 = 1 k_12 = 3 k_13 = 1 activation = 'relu' activ_dense = 'softmax' dropout_0 = 0.001 dropout_1 = 0.001 dropout_2 = 0.001 dropout_3 = 0.001 dropout_4 = 0.001 dropout_5 = 0.001 dropout_6 = 0.001 dropout_7 = 0.001 dropout_8 = 0.001 dropout_9 = 0.001 lr = 0.01 l2 = 0.0001 step = False #True global_pooling = True #skippy parameters om_en_om = 1 ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0] for w in range(len(ranges)): #TODO testcode: remove om_en_om = om_en_om << 1 for z in range(ranges[w] // 2): om_en_om = om_en_om << 2 om_en_om += 1 om_en_om = om_en_om << 1 skstart_0 = 1 #inv_gray(om_en_om)#3826103921638#2**30-1 skstart_1 = 1 #19283461627361826#2**30-1 skstart_2 = 1 #473829102637452916#2**30-1 skstart_3 = 1 #473829102637452916#2**30-1 skstart_4 = 1 #473829102637452916#2**30-1 skstep_0 = 2 skstep_1 = 1 skstep_2 = 1 skstep_3 = 1 skstep_4 = 1 max_pooling = True dense_size_0 = 1000 dense_size_1 = 0 #skippy parameters #assembling parameters samples = [[ stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1 ]] #var_names #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling'] dropout_mult = 1.0 lr_mult = 1.0 X = [ Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples) ] vla = { 's_4': 3, 'k_12': 1, 'k_13': 13, 'k_4': 3, 'filters_9': 273, 'stack_2': 5, 'skstep_1': 8, 'stack_4': 2, 's_2': 7, 'filters_8': 463, 's_6': 5, 'dropout_7': 0.14258839346689015 * dropout_mult, 's_5': 8, 'dropout_3': 0.4887239563686235 * dropout_mult, 'k_0': 3, 'filters_13': 506, 'dropout_1': 0.02305687664777915 * dropout_mult, 'stack_5': 6, 'skstart_4': 5, 'dropout_4': 0.2198815770696341 * dropout_mult, 'filters_12': 368, 'k_9': 13, 'dense_size_0': 915, 'max_pooling': True, 'k_8': 1, 'skstart_1': 4, 'k_1': 3, 's_1': 6, 'filters_6': 476, 'dropout_9': 0.237736517209488 * dropout_mult, 'k_3': 2, 'skstart_2': 0, 's_3': 4, 'step': True, 'filters_1': 251, 'stack_3': 7, 'dropout_6': 0.009317366697570491 * dropout_mult, 'filters_5': 199, 'k_10': 10, 'skstart_0': 2, 'filters_4': 239, 'filters_0': 266, 'dense_size_1': 2114, 'lr': 0.0097450688503161 * lr_mult, 'skstep_4': 9, 'dropout_8': 0.06911053842571835 * dropout_mult, 'filters_2': 397, 'filters_3': 341, 'filters_10': 409, 's_0': 3, 'activation': 'elu', 'k_7': 9, 'stack_6': 2, 'skstart_3': 4, 'stack_0': 4, 'k_11': 6, 'k_2': 3, 'l2': 0.0005256770455060354, 'skstep_0': 7, 'skstep_2': 6, 'dropout_2': 0.12188479132476926 * dropout_mult, 'k_5': 13, 'global_pooling': True, 'skstep_3': 2, 'filters_11': 59, 'dropout_0': 0.0010461409934142763, 'k_6': 4, 'stack_1': 0, 'filters_7': 394, 'dropout_5': 0.3355844862089496 * dropout_mult, 'activ_dense': 'softmax' } #'droput_0': 0.0010461409934142763 print(X) print(X[0].to_dict()) #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)] test = True if test: model = CNN_conf(X[0].to_dict(), test=test) #model = CNN_conf(vla,test=test) plot_model(model, to_file='model_skippy_test.png', show_shapes=True, show_layer_names=True) model.summary() print(model.count_params()) print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb') else: #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1) timer, loss = CNN_conf(vla, test=test, epochs=200, verbose=1) #timer, loss = CNN_conf(vla,test=test,epochs= 200,verbose=1,data_augmentation=True,use_validation=True) #TODO use this for data augmentation and make sure the val set is used for val accuracy, not the test set print('timer, loss:') print(timer, loss)
math.log(0.05)) #CHRIS half the accuracy of random guessing tuple = (float(tuple_str1), float(tuple_str2), success) #return outputval with open(self.save_name + '_thread_log.json', 'a') as outfile: outfile.write('thread ' + str(gpu_no) + ': step 3 gpu 3 obj_func 9\n') return tuple #define the search space. save_name = 'data_skippy_cifar10_big_one_tweaked' objective = obj_func('./all_cnn_bi_skippy.py', save_name=save_name) activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 600], 'filters') * 14 kernel_size = OrdinalSpace([1, 16], 'k') * 14 #CHRIS tweaked strides = OrdinalSpace([1, 10], 's') * 7 #CHRIS tweaked stack_sizes = OrdinalSpace([0, 7], 'stack') * 7 #[0,2] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace( [True, False], "global_pooling") # global_pooling#CHRIS TODO removed False #skippy parameters skstart = OrdinalSpace([0, 7], 'skstart') * 5 skstep = OrdinalSpace(
-1 * math.log(0.05)) #CHRIS half the accuracy of random guessing tuple = (float(tuple_str1), float(tuple_str2), success) #return outputval with open(self.save_name + '_thread_log.json', 'a') as outfile: outfile.write('thread ' + str(gpu_no) + ': step 3 gpu 3 obj_func 9\n') return tuple #define the search space. save_name = '../../../data/s0315435/data_skippy_cifar10_better_data_augmentation_train_tweak_big_one_restarted1' objective = obj_func('./all_cnn_bi_skippy_aug_tr_tw.py', save_name=save_name) lr_rate = ContinuousSpace([1e-4, 2 * 0.003521543292982737], 'lr') drop = ContinuousSpace([0.1, 0.95], 'drop') epochs_drop = OrdinalSpace([1, 40], 'epochs_drop') momentum = ContinuousSpace([0.8, 0.99], 'momentum') optimizer = NominalSpace( ["SGD", "RMSprop", "Adagrad", "Adadelta", "Adam", "Adamax", "Nadam"], 'optimizer') rho = ContinuousSpace([0.8, 0.99], 'rho') search_space = lr_rate * drop * epochs_drop * momentum * optimizer * rho print('starting program...') #available_gpus = gp.getAvailable(limit=2) gpu_limit = 16 available_gpus = gp.getAvailable(limit=gpu_limit) ignore_gpu = [] if len(sys.argv) > 1:
#import our package, the surrogate model and the search space classes from mipego import ParallelBO from mipego.Surrogate import RandomForest from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace # Load the dataset iris = load_iris() X_iris = iris.data y_iris = iris.target # First we need to define the Search Space # the search space consists of one continues variable # one ordinal (integer) variable # and two categorical (nominal) variables. Cvar = ContinuousSpace([1.0, 20.0], 'C') # one integer variable with label C degree = OrdinalSpace([2, 6], 'degree') gamma = NominalSpace(['scale', 'auto'], 'gamma') kernel = NominalSpace(['linear', 'poly', 'rbf', 'sigmoid'], 'kernel') #the complete search space is just the sum of the parameter spaces search_space = Cvar + gamma + degree + kernel #now we define the objective function (the model optimization) def train_model(c): #define the model # We will use a Support Vector Classifier svm = SVC(kernel=c['kernel'], gamma=c['gamma'], C=c['C'], degree=c['degree'])
def test_skippy(): from mipego.mipego import Solution #TODO remove this, only for testing from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace from keras.utils import plot_model with open('skippy_test_train_hist' + '_eval_train_hist.json', 'w') as f: f.write('') #define the search space. #objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"] filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600] kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling #skippy parameters skstart = OrdinalSpace([0, 50], 'skstart') * 5 skstep = OrdinalSpace([1, 50], 'skstep') * 5 max_pooling = NominalSpace([True, False], "max_pooling") dense_size = OrdinalSpace([0,2000],'dense_size')*2 #skippy parameters drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10 # drop_out rate lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')# l2_regularizer search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size n_init_sample = 1 samples = search_space.sampling(n_init_sample) print(samples) var_names = search_space.var_name.tolist() print(var_names) #a sample #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]] #test parameters #original parameters #RESnet-34-like stack_0 = 1 stack_1 = 6 stack_2 = 4 stack_3 = 4 stack_4 = 6 stack_5 = 6 stack_6 = 6 s_0=2#1#2 s_1=2 s_2=1#1 s_3=2 s_4=1 s_5=2 s_6=1 filters_0=64 filters_1=64 filters_2=64 filters_3=64 filters_4=128 filters_5=128 filters_6=128 filters_7=128 filters_8=256 filters_9=256 filters_10=256 filters_11=256 filters_12=512 filters_13=512 k_0=7 k_1=1 k_2=3 k_3=1 k_4=3 k_5=1 k_6=3 k_7=1 k_8=3 k_9=1 k_10=3 k_11=1 k_12=3 k_13=1 activation='relu' activ_dense='softmax' dropout_0=0.001 dropout_1=0.001 dropout_2=0.001 dropout_3=0.001 dropout_4=0.001 dropout_5=0.001 dropout_6=0.001 dropout_7=0.001 dropout_8=0.001 dropout_9=0.001 lr=0.0097450688503161#0.01 l2=0.0005256770455060354#0.0001 step=True global_pooling=True #skippy parameters om_en_om = 1 ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0] for w in range(len(ranges)):#TODO testcode: remove om_en_om = om_en_om << 1 for z in range(ranges[w]//2): om_en_om = om_en_om << 2 om_en_om += 1 om_en_om = om_en_om << 1 skstart_0 = 1#inv_gray(om_en_om)#3826103921638#2**30-1 skstart_1 = 1#19283461627361826#2**30-1 skstart_2 = 1#473829102637452916#2**30-1 skstart_3 = 1#473829102637452916#2**30-1 skstart_4 = 1#473829102637452916#2**30-1 skstep_0 = 2 skstep_1 = 1 skstep_2 = 1 skstep_3 = 1 skstep_4 = 1 max_pooling = True dense_size_0 = 1000 dense_size_1 = 0 #skippy parameters #assembling parameters samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]] #var_names #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling'] X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)] vla = {'stack_4': 5, 'k_9': 7, 'filters_8': 271, 'lr': 0.004160465185980011, 'filters_11': 502, 'skstep_2': 7, 'filters_4': 137, 'dropout_6': 0.8661297289998243, 'dropout_1': 0.4391661461074476, 'k_13': 1, 'stack_5': 0, 'dense_size_0': 447, 'filters_5': 595, 'k_2': 2, 's_4': 3, 'dropout_0': 0.2844338183563386, 'filters_0': 80, 'filters_7': 573, 's_2': 3, 'dropout_3': 0.02013305610010194, 'k_3': 6, 'dropout_5': 0.5555388378722113, 'k_5': 4, 'skstart_2': 6, 'dropout_4': 0.43662540748596945, 'dropout_2': 0.11810545604797541, 'stack_3': 5, 'skstep_1': 1, 'dense_size_1': 1411, 'k_11': 6, 'filters_1': 583, 'dropout_9': 0.651891663204505, 'skstep_4': 2, 'dropout_7': 0.25151371341217515, 'skstep_0': 3, 'skstart_0': 6, 'k_8': 1, 's_5': 4, 'filters_3': 452, 'skstep_3': 3, 'max_pooling': False, 'filters_2': 11, 'global_pooling': False, 'l2': 0.00036819381287011194, 'dropout_8': 0.4454841154679102, 'skstart_3': 4, 'skstart_4': 6, 'stack_6': 0, 'k_1': 2, 'stack_0': 2, 'skstart_1': 6, 's_6': 3, 'k_4': 4, 'filters_6': 240, 'filters_12': 43, 'filters_10': 220, 'step': True, 'stack_2': 3, 'k_6': 5, 'k_7': 2, 'stack_1': 3, 's_0': 3, 'k_12': 1, 'filters_13': 383, 'k_10': 2, 's_3': 2, 'k_0': 4, 's_1': 4, 'activ_dense': 'softmax', 'filters_9': 373, 'activation': 'tanh'} print(X) print(X[0].to_dict()) #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)] test = True if test: model = CNN_conf(X[0].to_dict(),test=test) #model = CNN_conf(vla,test=test) plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True) model.summary() print(model.count_params()) print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb') else: timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 200,verbose=1) #timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1) print('timer, loss:') print(timer, loss)
math.log(0.05)) #CHRIS half the accuracy of random guessing tuple = (float(tuple_str1), float(tuple_str2), success) #return outputval with open(self.save_name + '_thread_log.json', 'a') as outfile: outfile.write('thread ' + str(gpu_no) + ': step 3 gpu 3 obj_func 9\n') return tuple #define the search space. save_name = 'data_skippy_cifar10_lots_of_points' objective = obj_func('./all_cnn_bi_skippy.py', save_name=save_name) activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 600], 'filters') * 14 #128 should be 600 kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 7], 'stack') * 7 #[0,2] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace( [True, False], "global_pooling") # global_pooling#CHRIS TODO removed False #skippy parameters skstart = OrdinalSpace([0, 7], 'skstart') * 5 skstep = OrdinalSpace(
math.log(0.05)) #CHRIS half the accuracy of random guessing tuple = (float(tuple_str1), float(tuple_str2), success) #return outputval with open(self.save_name + '_thread_log.json', 'a') as outfile: outfile.write('thread ' + str(gpu_no) + ': step 3 gpu 3 obj_func 9\n') return tuple #define the search space. save_name = '../../../data/s0315435/data_skippy_cifar10_better_data_augmentation_big_one_restarted1' objective = obj_func('./all_cnn_bi_skippy_aug.py', save_name=save_name) activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 600], 'filters') * 14 kernel_size = OrdinalSpace([1, 16], 'k') * 14 #CHRIS tweaked strides = OrdinalSpace( [1, 4], 's' ) * 7 #CHRIS tweaked TODO maybe limit to max of 3, because now the image is reduces too soon (used to be max 10) CHRIS tweaked again CHRIS tweaked a third time stack_sizes = OrdinalSpace([0, 7], 'stack') * 7 #[0,2] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace( [True, False], "global_pooling") # global_pooling#CHRIS TODO removed False #skippy parameters
def test_skippy(): from mipego.mipego import Solution #TODO remove this, only for testing from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace from keras.utils import plot_model with open('skippy_test_train_hist_aug' + '_eval_train_hist.json', 'w') as f: f.write('') #define the search space. #objective = obj_func('./all-cnn_bi.py') activation_fun = ["softmax"] activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"] filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600] kernel_size = OrdinalSpace([1, 8], 'k') * 14 strides = OrdinalSpace([1, 5], 's') * 7 stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7] activation = NominalSpace(activation_fun_conv, "activation") # activation function activation_dense = NominalSpace( activation_fun, "activ_dense") # activation function for dense layer step = NominalSpace([True, False], "step") # step global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling #skippy parameters skstart = OrdinalSpace([0, 50], 'skstart') * 5 skstep = OrdinalSpace([1, 50], 'skstep') * 5 max_pooling = NominalSpace([True, False], "max_pooling") dense_size = OrdinalSpace([0, 2000], 'dense_size') * 2 #skippy parameters drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10 # drop_out rate lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2') # l2_regularizer search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size n_init_sample = 1 samples = search_space.sampling(n_init_sample) print(samples) var_names = search_space.var_name.tolist() print(var_names) #a sample #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]] #test parameters #original parameters #RESnet-34-like stack_0 = 1 stack_1 = 6 stack_2 = 4 stack_3 = 4 stack_4 = 6 stack_5 = 6 stack_6 = 6 s_0 = 2 #1#2 s_1 = 2 s_2 = 1 #1 s_3 = 2 s_4 = 1 s_5 = 2 s_6 = 1 filters_0 = 64 filters_1 = 64 filters_2 = 64 filters_3 = 64 filters_4 = 128 filters_5 = 128 filters_6 = 128 filters_7 = 128 filters_8 = 256 filters_9 = 256 filters_10 = 256 filters_11 = 256 filters_12 = 512 filters_13 = 512 k_0 = 7 k_1 = 1 k_2 = 3 k_3 = 1 k_4 = 3 k_5 = 1 k_6 = 3 k_7 = 1 k_8 = 3 k_9 = 1 k_10 = 3 k_11 = 1 k_12 = 3 k_13 = 1 activation = 'relu' activ_dense = 'softmax' dropout_0 = 0.001 dropout_1 = 0.001 dropout_2 = 0.001 dropout_3 = 0.001 dropout_4 = 0.001 dropout_5 = 0.001 dropout_6 = 0.001 dropout_7 = 0.001 dropout_8 = 0.001 dropout_9 = 0.001 lr = 0.01 l2 = 0.0001 step = False #True global_pooling = True #skippy parameters om_en_om = 1 ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0] for w in range(len(ranges)): #TODO testcode: remove om_en_om = om_en_om << 1 for z in range(ranges[w] // 2): om_en_om = om_en_om << 2 om_en_om += 1 om_en_om = om_en_om << 1 skstart_0 = 1 #inv_gray(om_en_om)#3826103921638#2**30-1 skstart_1 = 1 #19283461627361826#2**30-1 skstart_2 = 1 #473829102637452916#2**30-1 skstart_3 = 1 #473829102637452916#2**30-1 skstart_4 = 1 #473829102637452916#2**30-1 skstep_0 = 2 skstep_1 = 1 skstep_2 = 1 skstep_3 = 1 skstep_4 = 1 max_pooling = True dense_size_0 = 1000 dense_size_1 = 0 #skippy parameters #assembling parameters samples = [[ stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1 ]] #var_names #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling'] dropout_mult = 1.0 lr_mult = 1.0 X = [ Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples) ] vla = { 'k_6': 8, 'dropout_3': 0.11567654065541401, 'stack_2': 2, 'skstep_3': 2, 'skstart_2': 6, 'k_11': 9, 'zca_whitening': False, 'k_1': 11, 's_4': 3, 'filters_4': 191, 'k_7': 9, 'dropout_6': 0.19656398582242512, 'fill_mode': 'nearest', 'filters_0': 246, 'lr': 0.003521543292982737, 'skstart_4': 1, 's_3': 3, 'height_shift_range': 0.5512549395731117, 'dropout_9': 0.282536761776477, 'dense_size_0': 1344, 'filters_11': 507, 's_0': 2, 'dropout_4': 0.025329970168830974, 'filters_10': 305, 'filters_12': 474, 'dropout_8': 0.10220859898802954, 'samplewise_std_normalization': False, 'cval': 0.24779638415638786, 'step': False, 'skstep_0': 2, 'skstart_3': 3, 'featurewise_std_normalization': False, 's_5': 3, 'skstep_1': 8, 'k_4': 14, 'stack_0': 3, 'max_pooling': True, 'dropout_0': 0.005004155479145995, 'batch_size_sp': 75, 'skstart_1': 0, 'skstep_2': 2, 'filters_6': 535, 'k_12': 4, 'stack_5': 0, 'horizontal_flip': True, 'filters_2': 397, 'stack_4': 5, 'l2': 0.00043366714416766863, 'skstart_0': 6, 'filters_7': 202, 'filters_13': 350, 'k_2': 4, 'k_3': 4, 's_2': 3, 's_6': 3, 'rotation_range': 31, 'shear_range': 4.413108635288765, 'filters_5': 109, 's_1': 1, 'k_8': 9, 'k_9': 5, 'channel_shift_range': 0.002134671459292783, 'samplewise_center': False, 'k_0': 2, 'dropout_5': 0.09773198911653828, 'vertical_flip': False, 'k_5': 10, 'zoom_range': 0.02446592218470434, 'width_shift_range': 0.11326574574565945, 'stack_6': 0, 'k_10': 10, 'dropout_2': 0.3496803660826153, 'activation': 'selu', 'stack_3': 1, 'k_13': 4, 'zca_epsilon': 1.2393513955305375e-06, 'filters_3': 473, 'dense_size_1': 1216, 'stack_1': 1, 'dropout_1': 0.16597601970646955, 'filters_8': 353, 'dropout_7': 0.2567508735733037, 'featurewise_center': False, 'filters_9': 339, 'global_pooling': True, 'skstep_4': 1, 'activ_dense': 'softmax', 'filters_1': 120 } print(X) print(X[0].to_dict()) #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)] test = False if test: #model = CNN_conf(X[0].to_dict(),test=test) model = CNN_conf(vla, test=test) plot_model(model, to_file='model_skippy_test.png', show_shapes=True, show_layer_names=True) model.summary() print(model.count_params()) print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb') else: #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1) #timer, loss = CNN_conf(vla,test=test,epochs= 200,verbose=1) timer, loss = CNN_conf( vla, test=test, epochs=200, verbose=1, data_augmentation=True, use_validation=True, test_on_validation=True ) #TODO use this for data augmentation and make sure the val set is used for val accuracy, not the test set print('timer, loss:') print(timer, loss)