Ejemplo n.º 1
0
conf_array = data[0]
fit_array = data[1]
time_array = data[2]
loss_array = data[3]
n_eval_array = data[4]
index_array = data[5]
name_array = data[6]

all_r2 = None
if len(data) > 7:
    all_r2 = data[7]

#print(data)
solutions = []
for i in range(len(conf_array)):
    solutions.append(Solution(conf_array[i]))
    solutions[i].fit = fit_array[i]
    solutions[i].time = time_array[i]
    solutions[i].loss = loss_array[i]
    solutions[i].n_eval = n_eval_array[i]
    solutions[i].index = index_array[i]
    solutions[i].var_name = name_array[i]

print("len(solutions): " + str(len(solutions)))

pauser = 0.008

time = [x.time for x in solutions]
loss = [x.loss for x in solutions]

#print('time:')
def test_skippy():
    from mipego.mipego import Solution  #TODO remove this, only for testing
    from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
    from keras.utils import plot_model

    with open('skippy_test_train_hist' + '_eval_train_hist.json', 'w') as f:
        f.write('')
    #define the search space.
    #objective = obj_func('./all-cnn_bi.py')
    activation_fun = ["softmax"]
    activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"]

    filters = OrdinalSpace([10, 100],
                           'filters') * 14  #TODO [0,100] should be [0,600]
    kernel_size = OrdinalSpace([1, 8], 'k') * 14
    strides = OrdinalSpace([1, 5], 's') * 7
    stack_sizes = OrdinalSpace([0, 4],
                               'stack') * 7  #TODO [0,4] should be [0,7]

    activation = NominalSpace(activation_fun_conv,
                              "activation")  # activation function
    activation_dense = NominalSpace(
        activation_fun, "activ_dense")  # activation function for dense layer
    step = NominalSpace([True, False], "step")  # step
    global_pooling = NominalSpace([True, False],
                                  "global_pooling")  # global_pooling

    #skippy parameters
    skstart = OrdinalSpace([0, 50], 'skstart') * 5
    skstep = OrdinalSpace([1, 50], 'skstep') * 5
    max_pooling = NominalSpace([True, False], "max_pooling")
    dense_size = OrdinalSpace([0, 2000], 'dense_size') * 2
    #skippy parameters

    drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10  # drop_out rate
    lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')  # learning rate
    l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')  # l2_regularizer

    search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size

    n_init_sample = 1
    samples = search_space.sampling(n_init_sample)
    print(samples)
    var_names = search_space.var_name.tolist()
    print(var_names)

    #a sample
    #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]

    #test parameters
    #original parameters
    #RESnet-34-like
    stack_0 = 1
    stack_1 = 6
    stack_2 = 4
    stack_3 = 4
    stack_4 = 6
    stack_5 = 6
    stack_6 = 6
    s_0 = 2  #1#2
    s_1 = 2
    s_2 = 1  #1
    s_3 = 2
    s_4 = 1
    s_5 = 2
    s_6 = 1
    filters_0 = 64
    filters_1 = 64
    filters_2 = 64
    filters_3 = 64
    filters_4 = 128
    filters_5 = 128
    filters_6 = 128
    filters_7 = 128
    filters_8 = 256
    filters_9 = 256
    filters_10 = 256
    filters_11 = 256
    filters_12 = 512
    filters_13 = 512
    k_0 = 7
    k_1 = 1
    k_2 = 3
    k_3 = 1
    k_4 = 3
    k_5 = 1
    k_6 = 3
    k_7 = 1
    k_8 = 3
    k_9 = 1
    k_10 = 3
    k_11 = 1
    k_12 = 3
    k_13 = 1
    activation = 'relu'
    activ_dense = 'softmax'
    dropout_0 = 0.001
    dropout_1 = 0.001
    dropout_2 = 0.001
    dropout_3 = 0.001
    dropout_4 = 0.001
    dropout_5 = 0.001
    dropout_6 = 0.001
    dropout_7 = 0.001
    dropout_8 = 0.001
    dropout_9 = 0.001
    lr = 0.01
    l2 = 0.0001
    step = False  #True
    global_pooling = True

    #skippy parameters
    om_en_om = 1
    ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0]
    for w in range(len(ranges)):  #TODO testcode: remove
        om_en_om = om_en_om << 1
        for z in range(ranges[w] // 2):
            om_en_om = om_en_om << 2
            om_en_om += 1
    om_en_om = om_en_om << 1
    skstart_0 = 1  #inv_gray(om_en_om)#3826103921638#2**30-1
    skstart_1 = 1  #19283461627361826#2**30-1
    skstart_2 = 1  #473829102637452916#2**30-1
    skstart_3 = 1  #473829102637452916#2**30-1
    skstart_4 = 1  #473829102637452916#2**30-1
    skstep_0 = 2
    skstep_1 = 1
    skstep_2 = 1
    skstep_3 = 1
    skstep_4 = 1
    max_pooling = True
    dense_size_0 = 1000
    dense_size_1 = 0
    #skippy parameters

    #assembling parameters
    samples = [[
        stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0,
        s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2,
        filters_3, filters_4, filters_5, filters_6, filters_7, filters_8,
        filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1,
        k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13,
        activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3,
        dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr,
        l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3,
        skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4,
        max_pooling, dense_size_0, dense_size_1
    ]]

    #var_names
    #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']

    dropout_mult = 1.0
    lr_mult = 1.0
    X = [
        Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)
    ]
    vla = {
        's_4': 3,
        'k_12': 1,
        'k_13': 13,
        'k_4': 3,
        'filters_9': 273,
        'stack_2': 5,
        'skstep_1': 8,
        'stack_4': 2,
        's_2': 7,
        'filters_8': 463,
        's_6': 5,
        'dropout_7': 0.14258839346689015 * dropout_mult,
        's_5': 8,
        'dropout_3': 0.4887239563686235 * dropout_mult,
        'k_0': 3,
        'filters_13': 506,
        'dropout_1': 0.02305687664777915 * dropout_mult,
        'stack_5': 6,
        'skstart_4': 5,
        'dropout_4': 0.2198815770696341 * dropout_mult,
        'filters_12': 368,
        'k_9': 13,
        'dense_size_0': 915,
        'max_pooling': True,
        'k_8': 1,
        'skstart_1': 4,
        'k_1': 3,
        's_1': 6,
        'filters_6': 476,
        'dropout_9': 0.237736517209488 * dropout_mult,
        'k_3': 2,
        'skstart_2': 0,
        's_3': 4,
        'step': True,
        'filters_1': 251,
        'stack_3': 7,
        'dropout_6': 0.009317366697570491 * dropout_mult,
        'filters_5': 199,
        'k_10': 10,
        'skstart_0': 2,
        'filters_4': 239,
        'filters_0': 266,
        'dense_size_1': 2114,
        'lr': 0.0097450688503161 * lr_mult,
        'skstep_4': 9,
        'dropout_8': 0.06911053842571835 * dropout_mult,
        'filters_2': 397,
        'filters_3': 341,
        'filters_10': 409,
        's_0': 3,
        'activation': 'elu',
        'k_7': 9,
        'stack_6': 2,
        'skstart_3': 4,
        'stack_0': 4,
        'k_11': 6,
        'k_2': 3,
        'l2': 0.0005256770455060354,
        'skstep_0': 7,
        'skstep_2': 6,
        'dropout_2': 0.12188479132476926 * dropout_mult,
        'k_5': 13,
        'global_pooling': True,
        'skstep_3': 2,
        'filters_11': 59,
        'dropout_0': 0.0010461409934142763,
        'k_6': 4,
        'stack_1': 0,
        'filters_7': 394,
        'dropout_5': 0.3355844862089496 * dropout_mult,
        'activ_dense': 'softmax'
    }  #'droput_0': 0.0010461409934142763
    print(X)
    print(X[0].to_dict())
    #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
    test = True
    if test:
        model = CNN_conf(X[0].to_dict(), test=test)
        #model = CNN_conf(vla,test=test)
        plot_model(model,
                   to_file='model_skippy_test.png',
                   show_shapes=True,
                   show_layer_names=True)
        model.summary()
        print(model.count_params())
        print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb')
    else:
        #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1)
        timer, loss = CNN_conf(vla, test=test, epochs=200, verbose=1)
        #timer, loss = CNN_conf(vla,test=test,epochs= 200,verbose=1,data_augmentation=True,use_validation=True) #TODO use this for data augmentation and make sure the val set is used for val accuracy, not the test set
        print('timer, loss:')
        print(timer, loss)
Ejemplo n.º 3
0
def test_skippy():
    from mipego.mipego import Solution  #TODO remove this, only for testing
    from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
    from keras.utils import plot_model
    #define the search space.
    #objective = obj_func('./all-cnn_bi.py')
    activation_fun = ["softmax"]
    activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"]

    filters = OrdinalSpace([10, 100],
                           'filters') * 14  #TODO [0,100] should be [0,600]
    kernel_size = OrdinalSpace([1, 8], 'k') * 14
    strides = OrdinalSpace([1, 5], 's') * 7
    stack_sizes = OrdinalSpace([0, 4],
                               'stack') * 7  #TODO [0,4] should be [0,7]

    activation = NominalSpace(activation_fun_conv,
                              "activation")  # activation function
    activation_dense = NominalSpace(
        activation_fun, "activ_dense")  # activation function for dense layer
    step = NominalSpace([True, False], "step")  # step
    global_pooling = NominalSpace([True, False],
                                  "global_pooling")  # global_pooling

    #skippy parameters
    skints = OrdinalSpace([0, 2**50 - 1], 'skint') * 3  #CHRIS TODO tweak this
    skst = OrdinalSpace([2, 10], 'skst') * 3
    dense_size = OrdinalSpace([0, 1200], 'dense_size') * 2
    no_pooling = NominalSpace([True, False], "no_pooling")
    #skippy parameters

    drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 8  # drop_out rate
    lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')  # learning rate
    l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')  # l2_regularizer

    search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skints * skst * dense_size * no_pooling

    n_init_sample = 1
    samples = search_space.sampling(n_init_sample)
    print(samples)
    var_names = search_space.var_name.tolist()
    print(var_names)

    #a sample
    #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]

    #test parameters
    #original parameters
    #RESnet-34-like
    stack_0 = 1
    stack_1 = 6
    stack_2 = 4
    stack_3 = 4
    stack_4 = 6
    stack_5 = 6
    stack_6 = 6
    s_0 = 2
    s_1 = 2
    s_2 = 1
    s_3 = 2
    s_4 = 1
    s_5 = 2
    s_6 = 1
    filters_0 = 64 * 2
    filters_1 = 64 * 2
    filters_2 = 64 * 2
    filters_3 = 64 * 2
    filters_4 = 128 * 2
    filters_5 = 128 * 2
    filters_6 = 128 * 2
    filters_7 = 128 * 2
    filters_8 = 256 * 2
    filters_9 = 256 * 2
    filters_10 = 256 * 2
    filters_11 = 256 * 2
    filters_12 = 512 * 2
    filters_13 = 512 * 2
    k_0 = 7
    k_1 = 3
    k_2 = 3
    k_3 = 3
    k_4 = 3
    k_5 = 3
    k_6 = 3
    k_7 = 3
    k_8 = 3
    k_9 = 3
    k_10 = 3
    k_11 = 3
    k_12 = 3
    k_13 = 3
    activation = 'relu'
    activ_dense = 'softmax'
    dropout_0 = 0.001
    dropout_1 = 0.001
    dropout_2 = 0.001
    dropout_3 = 0.001
    dropout_4 = 0.001
    dropout_5 = 0.001
    dropout_6 = 0.001
    dropout_7 = 0.001
    lr = 0.1
    l2 = 0.0001
    step = True
    global_pooling = True

    #skippy parameters
    om_en_om = 1
    ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0]
    for w in range(len(ranges)):  #TODO testcode: remove
        om_en_om = om_en_om << 1
        for z in range(ranges[w] // 2):
            om_en_om = om_en_om << 2
            om_en_om += 1
    om_en_om = om_en_om << 1
    skint_0 = inv_gray(om_en_om)  #3826103921638#2**30-1
    skint_1 = 0  #19283461627361826#2**30-1
    skint_2 = 0  #473829102637452916#2**30-1
    skst_0 = 2
    skst_1 = 0
    skst_2 = 0
    dense_size_0 = 1000 * 2
    dense_size_1 = 0
    no_pooling = False
    #skippy parameters

    #assembling parameters
    samples = [[
        stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0,
        s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2,
        filters_3, filters_4, filters_5, filters_6, filters_7, filters_8,
        filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1,
        k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13,
        activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3,
        dropout_4, dropout_5, dropout_6, dropout_7, lr, l2, step,
        global_pooling, skint_0, skint_1, skint_2, skst_0, skst_1, skst_2,
        dense_size_0, dense_size_1, no_pooling
    ]]

    #var_names
    #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']

    X = [
        Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)
    ]
    vla = {
        's_0': 3,
        'l2': 4.4274387289657325e-05,
        'filters_7': 423,
        'dense_size_1': 992,
        'filters_12': 295,
        'stack_0': 0,
        'filters_2': 53,
        'global_pooling': True,
        'dropout_6': 0.5606577615096975,
        'filters_13': 115,
        'filters_4': 396,
        'stack_4': 0,
        'k_9': 6,
        'activation': 'tanh',
        'dropout_1': 0.07267176147234225,
        'filters_5': 405,
        'filters_1': 250,
        'k_7': 7,
        'filters_3': 408,
        'stack_2': 0,
        'no_pooling': False,
        'dropout_7': 0.12689965102483852,
        's_2': 4,
        'filters_8': 455,
        'dropout_4': 0.8991002969243431,
        'k_11': 5,
        'skst_0': 7,
        'k_4': 3,
        'dropout_3': 0.5966691482903116,
        'step': False,
        'dense_size_0': 583,
        'stack_1': 1,
        'k_0': 3,
        'skint_1': 505527202345094,
        'k_1': 5,
        'k_8': 1,
        'stack_6': 0,
        'lr': 0.6919959357016345,
        'activ_dense': 'softmax',
        'filters_6': 305,
        's_1': 3,
        'filters_9': 226,
        's_4': 2,
        'stack_3': 1,
        'skst_1': 5,
        'skst_2': 6,
        'dropout_2': 0.039087410518674766,
        'k_12': 4,
        'k_3': 6,
        'dropout_5': 0.46057411289276423,
        'skint_0': 957176709324259,
        'k_5': 4,
        'k_2': 3,
        's_3': 1,
        'filters_0': 195,
        'k_6': 1,
        'k_13': 2,
        'skint_2': 1098454353499063,
        'filters_11': 107,
        'filters_10': 257,
        'k_10': 4,
        'stack_5': 0,
        's_6': 1,
        's_5': 2,
        'dropout_0': 0.6293343140822664
    }
    print(X)
    print(X[0].to_dict())
    #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
    test = True
    if test:
        #model = CNN_conf(X[0].to_dict(),test=test)
        model = CNN_conf(vla, test=test)
        plot_model(model,
                   to_file='model_skippy_test.png',
                   show_shapes=True,
                   show_layer_names=True)
        model.summary()
        print(model.count_params())
        print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb')
    else:
        timer, loss = CNN_conf(X[0].to_dict(), test=test)
        print('timer, loss:')
        print(timer, loss)
Ejemplo n.º 4
0
def test_skippy():
    from mipego.mipego import Solution #TODO remove this, only for testing
    from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
    from keras.utils import plot_model
    #define the search space.
    #objective = obj_func('./all-cnn_bi.py')
    activation_fun = ["softmax"]
    activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"]

    filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600]
    kernel_size = OrdinalSpace([1, 8], 'k') * 14
    strides = OrdinalSpace([1, 5], 's') * 7
    stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7]

    activation = NominalSpace(activation_fun_conv, "activation")  # activation function
    activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer
    step = NominalSpace([True, False], "step")  # step
    global_pooling = NominalSpace([True, False], "global_pooling")  # global_pooling
    
    #skippy parameters
    skstart = OrdinalSpace([0, 50], 'skstart') * 5
    skstep = OrdinalSpace([1, 50], 'skstep') * 5
    max_pooling = NominalSpace([True, False], "max_pooling")
    dense_size = OrdinalSpace([0,2000],'dense_size')*2
    #skippy parameters

    drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10        # drop_out rate
    lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')        # learning rate
    l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')# l2_regularizer

    search_space =  stack_sizes * strides * filters *  kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size
    
    n_init_sample = 1
    samples = search_space.sampling(n_init_sample)
    print(samples)
    var_names = search_space.var_name.tolist()
    print(var_names)
    
    #a sample
    #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]

    #test parameters
    #original parameters
    #RESnet-34-like
    stack_0 = 1
    stack_1 = 6
    stack_2 = 4
    stack_3 = 4
    stack_4 = 6
    stack_5 = 6
    stack_6 = 6
    s_0=2#1#2
    s_1=2
    s_2=1#1
    s_3=2
    s_4=1
    s_5=2
    s_6=1
    filters_0=64
    filters_1=64
    filters_2=64
    filters_3=64
    filters_4=128
    filters_5=128
    filters_6=128
    filters_7=128
    filters_8=256
    filters_9=256
    filters_10=256
    filters_11=256
    filters_12=512
    filters_13=512
    k_0=7
    k_1=1
    k_2=3
    k_3=1
    k_4=3
    k_5=1
    k_6=3
    k_7=1
    k_8=3
    k_9=1
    k_10=3
    k_11=1
    k_12=3
    k_13=1
    activation='relu'
    activ_dense='softmax'
    dropout_0=0.001
    dropout_1=0.001
    dropout_2=0.001
    dropout_3=0.001
    dropout_4=0.001
    dropout_5=0.001
    dropout_6=0.001
    dropout_7=0.001
    dropout_8=0.001
    dropout_9=0.001
    lr=0.01
    l2=0.0001
    step=False#True
    global_pooling=True

    #skippy parameters
    om_en_om = 1
    ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0]
    for w in range(len(ranges)):#TODO testcode: remove
        om_en_om = om_en_om << 1
        for z in range(ranges[w]//2):
            om_en_om = om_en_om << 2
            om_en_om += 1
    om_en_om = om_en_om << 1
    skstart_0 = 1#inv_gray(om_en_om)#3826103921638#2**30-1
    skstart_1 = 1#19283461627361826#2**30-1
    skstart_2 = 1#473829102637452916#2**30-1
    skstart_3 = 1#473829102637452916#2**30-1
    skstart_4 = 1#473829102637452916#2**30-1
    skstep_0 = 2
    skstep_1 = 1
    skstep_2 = 1
    skstep_3 = 1
    skstep_4 = 1
    max_pooling = True
    dense_size_0 = 1000
    dense_size_1 = 0
    #skippy parameters

    #assembling parameters
    samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]]
    
    #var_names
    #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']

    
    X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)]
    vla = {'s_2': 7, 'lr': 0.005478541674651396, 'skstep_2': 4, 'dropout_8': 0.5440199827441856, 'k_12': 15, 'activ_dense': 'softmax', 'stack_4': 3, 'k_5': 2, 'dropout_4': 0.24617655948523018, 's_3': 6, 'k_11': 13, 'filters_10': 84, 'dropout_0': 0.0639815161048702, 'k_7': 13, 'filters_9': 178, 'k_1': 13, 'dropout_6': 0.1752239013431692, 'filters_7': 353, 'skstep_4': 6, 'skstart_2': 0, 'stack_0': 0, 'stack_5': 1, 's_5': 2, 'k_13': 6, 'filters_2': 110, 'filters_0': 248, 'skstart_1': 5, 'filters_6': 341, 'filters_8': 165, 'skstart_4': 2, 'l2': 0.0012874308061650037, 's_0': 9, 'global_pooling': False, 'stack_6': 1, 's_1': 2, 'skstep_0': 4, 'dropout_3': 0.495646008202597, 'skstart_0': 3, 'k_6': 2, 'filters_1': 61, 'dropout_2': 0.028121315386701783, 'stack_3': 2, 'filters_3': 299, 'stack_1': 3, 'max_pooling': True, 'filters_4': 259, 'filters_11': 207, 'k_3': 15, 'k_0': 15, 'dense_size_0': 1400, 'k_4': 10, 's_6': 5, 'dropout_9': 0.004273458743956573, 'skstep_3': 6, 'filters_5': 16, 's_4': 2, 'dropout_1': 0.42526328646019135, 'dense_size_1': 2990, 'k_10': 9, 'k_2': 4, 'skstep_1': 6, 'dropout_5': 0.3927105783290164, 'filters_12': 283, 'dropout_7': 0.01357058138235737, 'activation': 'selu', 'filters_13': 228, 'step': False, 'k_8': 2, 'k_9': 2, 'skstart_3': 1, 'stack_2': 3}
    print(X)
    print(X[0].to_dict())
    #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
    test = False
    if test:
        #model = CNN_conf(X[0].to_dict(),test=test)
        model = CNN_conf(vla,test=test)
        plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True)
        model.summary()
        print(model.count_params())
        print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb')
    else:
        #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1)
        timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1)
        print('timer, loss:')
        print(timer, loss)
Ejemplo n.º 5
0
    all_loss_r2 = data[8]

    surr_time_fit_hist = data[9]
    surr_time_mies_hist = data[10]
    surr_loss_fit_hist = data[11]
    surr_loss_mies_hist = data[12]
    time_between_gpu_hist = data[13]

    solutions = []
    for i in range(len(conf_array)):
        conf_x = [conf_array[i][j] for j in name_array[i]]
        solutions.append(
            Solution(x=conf_x,
                     fitness=fit_array[i],
                     n_eval=n_eval_array[i],
                     index=index_array[i],
                     var_name=name_array[i],
                     loss=loss_array[i],
                     time=time_array[i]))

    opt.data = solutions

    for i in range(len(opt.data)):
        opt.data[i].fitness = fit_array[i]
        opt.data[i].time = time_array[i]
        opt.data[i].loss = loss_array[i]
        opt.data[i].n_eval = n_eval_array[i]
        opt.data[i].index = index_array[i]
        opt.data[i].var_name = name_array[i]

    opt.all_time_r2 = all_time_r2
Ejemplo n.º 6
0
def test_skippy():
    from mipego.mipego import Solution #TODO remove this, only for testing
    from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
    from keras.utils import plot_model
    
    with open('skippy_test_train_hist' + '_eval_train_hist.json', 'w') as f:
        f.write('')
    #define the search space.
    #objective = obj_func('./all-cnn_bi.py')
    activation_fun = ["softmax"]
    activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"]

    filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600]
    kernel_size = OrdinalSpace([1, 8], 'k') * 14
    strides = OrdinalSpace([1, 5], 's') * 7
    stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7]

    activation = NominalSpace(activation_fun_conv, "activation")  # activation function
    activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer
    step = NominalSpace([True, False], "step")  # step
    global_pooling = NominalSpace([True, False], "global_pooling")  # global_pooling
    
    #skippy parameters
    skstart = OrdinalSpace([0, 50], 'skstart') * 5
    skstep = OrdinalSpace([1, 50], 'skstep') * 5
    max_pooling = NominalSpace([True, False], "max_pooling")
    dense_size = OrdinalSpace([0,2000],'dense_size')*2
    #skippy parameters

    drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10        # drop_out rate
    lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')        # learning rate
    l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')# l2_regularizer

    search_space =  stack_sizes * strides * filters *  kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size
    
    n_init_sample = 1
    samples = search_space.sampling(n_init_sample)
    print(samples)
    var_names = search_space.var_name.tolist()
    print(var_names)
    
    #a sample
    #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]

    #test parameters
    #original parameters
    #RESnet-34-like
    stack_0 = 1
    stack_1 = 6
    stack_2 = 4
    stack_3 = 4
    stack_4 = 6
    stack_5 = 6
    stack_6 = 6
    s_0=2#1#2
    s_1=2
    s_2=1#1
    s_3=2
    s_4=1
    s_5=2
    s_6=1
    filters_0=64
    filters_1=64
    filters_2=64
    filters_3=64
    filters_4=128
    filters_5=128
    filters_6=128
    filters_7=128
    filters_8=256
    filters_9=256
    filters_10=256
    filters_11=256
    filters_12=512
    filters_13=512
    k_0=7
    k_1=1
    k_2=3
    k_3=1
    k_4=3
    k_5=1
    k_6=3
    k_7=1
    k_8=3
    k_9=1
    k_10=3
    k_11=1
    k_12=3
    k_13=1
    activation='relu'
    activ_dense='softmax'
    dropout_0=0.001
    dropout_1=0.001
    dropout_2=0.001
    dropout_3=0.001
    dropout_4=0.001
    dropout_5=0.001
    dropout_6=0.001
    dropout_7=0.001
    dropout_8=0.001
    dropout_9=0.001
    lr=0.0097450688503161#0.01
    l2=0.0005256770455060354#0.0001
    step=True
    global_pooling=True

    #skippy parameters
    om_en_om = 1
    ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0]
    for w in range(len(ranges)):#TODO testcode: remove
        om_en_om = om_en_om << 1
        for z in range(ranges[w]//2):
            om_en_om = om_en_om << 2
            om_en_om += 1
    om_en_om = om_en_om << 1
    skstart_0 = 1#inv_gray(om_en_om)#3826103921638#2**30-1
    skstart_1 = 1#19283461627361826#2**30-1
    skstart_2 = 1#473829102637452916#2**30-1
    skstart_3 = 1#473829102637452916#2**30-1
    skstart_4 = 1#473829102637452916#2**30-1
    skstep_0 = 2
    skstep_1 = 1
    skstep_2 = 1
    skstep_3 = 1
    skstep_4 = 1
    max_pooling = True
    dense_size_0 = 1000
    dense_size_1 = 0
    #skippy parameters

    #assembling parameters
    samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]]
    
    #var_names
    #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']

    
    X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)]
    vla = {'stack_4': 5, 'k_9': 7, 'filters_8': 271, 'lr': 0.004160465185980011, 'filters_11': 502, 'skstep_2': 7, 'filters_4': 137, 'dropout_6': 0.8661297289998243, 'dropout_1': 0.4391661461074476, 'k_13': 1, 'stack_5': 0, 'dense_size_0': 447, 'filters_5': 595, 'k_2': 2, 's_4': 3, 'dropout_0': 0.2844338183563386, 'filters_0': 80, 'filters_7': 573, 's_2': 3, 'dropout_3': 0.02013305610010194, 'k_3': 6, 'dropout_5': 0.5555388378722113, 'k_5': 4, 'skstart_2': 6, 'dropout_4': 0.43662540748596945, 'dropout_2': 0.11810545604797541, 'stack_3': 5, 'skstep_1': 1, 'dense_size_1': 1411, 'k_11': 6, 'filters_1': 583, 'dropout_9': 0.651891663204505, 'skstep_4': 2, 'dropout_7': 0.25151371341217515, 'skstep_0': 3, 'skstart_0': 6, 'k_8': 1, 's_5': 4, 'filters_3': 452, 'skstep_3': 3, 'max_pooling': False, 'filters_2': 11, 'global_pooling': False, 'l2': 0.00036819381287011194, 'dropout_8': 0.4454841154679102, 'skstart_3': 4, 'skstart_4': 6, 'stack_6': 0, 'k_1': 2, 'stack_0': 2, 'skstart_1': 6, 's_6': 3, 'k_4': 4, 'filters_6': 240, 'filters_12': 43, 'filters_10': 220, 'step': True, 'stack_2': 3, 'k_6': 5, 'k_7': 2, 'stack_1': 3, 's_0': 3, 'k_12': 1, 'filters_13': 383, 'k_10': 2, 's_3': 2, 'k_0': 4, 's_1': 4, 'activ_dense': 'softmax', 'filters_9': 373, 'activation': 'tanh'}
    print(X)
    print(X[0].to_dict())
    #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
    test = True
    if test:
        model = CNN_conf(X[0].to_dict(),test=test)
        #model = CNN_conf(vla,test=test)
        plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True)
        model.summary()
        print(model.count_params())
        print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb')
    else:
        timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 200,verbose=1)
        #timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1)
        print('timer, loss:')
        print(timer, loss)
Ejemplo n.º 7
0
        p = partition_par(par, lo, hi)
        quicksort_par(par, lo, p - 1)
        quicksort_par(par, p + 1, hi)


def partition_par(par, lo, hi):
    pivot = par[hi].loss
    i = lo
    for j in range(lo, hi):
        if par[j].loss < pivot:
            help = par[i]
            par[i] = par[j]
            par[j] = help
            i = i + 1
    help = par[i]
    par[i] = par[hi]
    par[hi] = help
    return i


n = 100

solutions = [Solution([1]) for i in range(n)]
for i in range(n):
    solutions[i].loss = i

random.shuffle(solutions)
print([x.loss for x in solutions])
quicksort_par(solutions, 0, len(solutions) - 1)
print([x.loss for x in solutions])
def test_skippy():
    from mipego.mipego import Solution  #TODO remove this, only for testing
    from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
    from keras.utils import plot_model

    with open('skippy_test_train_hist_aug' + '_eval_train_hist.json',
              'w') as f:
        f.write('')
    #define the search space.
    #objective = obj_func('./all-cnn_bi.py')
    activation_fun = ["softmax"]
    activation_fun_conv = ["elu", "relu", "tanh", "sigmoid", "selu"]

    filters = OrdinalSpace([10, 100],
                           'filters') * 14  #TODO [0,100] should be [0,600]
    kernel_size = OrdinalSpace([1, 8], 'k') * 14
    strides = OrdinalSpace([1, 5], 's') * 7
    stack_sizes = OrdinalSpace([0, 4],
                               'stack') * 7  #TODO [0,4] should be [0,7]

    activation = NominalSpace(activation_fun_conv,
                              "activation")  # activation function
    activation_dense = NominalSpace(
        activation_fun, "activ_dense")  # activation function for dense layer
    step = NominalSpace([True, False], "step")  # step
    global_pooling = NominalSpace([True, False],
                                  "global_pooling")  # global_pooling

    #skippy parameters
    skstart = OrdinalSpace([0, 50], 'skstart') * 5
    skstep = OrdinalSpace([1, 50], 'skstep') * 5
    max_pooling = NominalSpace([True, False], "max_pooling")
    dense_size = OrdinalSpace([0, 2000], 'dense_size') * 2
    #skippy parameters

    drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10  # drop_out rate
    lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')  # learning rate
    l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')  # l2_regularizer

    search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size

    n_init_sample = 1
    samples = search_space.sampling(n_init_sample)
    print(samples)
    var_names = search_space.var_name.tolist()
    print(var_names)

    #a sample
    #samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]

    #test parameters
    #original parameters
    #RESnet-34-like
    stack_0 = 1
    stack_1 = 6
    stack_2 = 4
    stack_3 = 4
    stack_4 = 6
    stack_5 = 6
    stack_6 = 6
    s_0 = 2  #1#2
    s_1 = 2
    s_2 = 1  #1
    s_3 = 2
    s_4 = 1
    s_5 = 2
    s_6 = 1
    filters_0 = 64
    filters_1 = 64
    filters_2 = 64
    filters_3 = 64
    filters_4 = 128
    filters_5 = 128
    filters_6 = 128
    filters_7 = 128
    filters_8 = 256
    filters_9 = 256
    filters_10 = 256
    filters_11 = 256
    filters_12 = 512
    filters_13 = 512
    k_0 = 7
    k_1 = 1
    k_2 = 3
    k_3 = 1
    k_4 = 3
    k_5 = 1
    k_6 = 3
    k_7 = 1
    k_8 = 3
    k_9 = 1
    k_10 = 3
    k_11 = 1
    k_12 = 3
    k_13 = 1
    activation = 'relu'
    activ_dense = 'softmax'
    dropout_0 = 0.001
    dropout_1 = 0.001
    dropout_2 = 0.001
    dropout_3 = 0.001
    dropout_4 = 0.001
    dropout_5 = 0.001
    dropout_6 = 0.001
    dropout_7 = 0.001
    dropout_8 = 0.001
    dropout_9 = 0.001
    lr = 0.01
    l2 = 0.0001
    step = False  #True
    global_pooling = True

    #skippy parameters
    om_en_om = 1
    ranges = [stack_6, stack_5, stack_4, stack_3, stack_2, stack_1, stack_0]
    for w in range(len(ranges)):  #TODO testcode: remove
        om_en_om = om_en_om << 1
        for z in range(ranges[w] // 2):
            om_en_om = om_en_om << 2
            om_en_om += 1
    om_en_om = om_en_om << 1
    skstart_0 = 1  #inv_gray(om_en_om)#3826103921638#2**30-1
    skstart_1 = 1  #19283461627361826#2**30-1
    skstart_2 = 1  #473829102637452916#2**30-1
    skstart_3 = 1  #473829102637452916#2**30-1
    skstart_4 = 1  #473829102637452916#2**30-1
    skstep_0 = 2
    skstep_1 = 1
    skstep_2 = 1
    skstep_3 = 1
    skstep_4 = 1
    max_pooling = True
    dense_size_0 = 1000
    dense_size_1 = 0
    #skippy parameters

    #assembling parameters
    samples = [[
        stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0,
        s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2,
        filters_3, filters_4, filters_5, filters_6, filters_7, filters_8,
        filters_9, filters_10, filters_11, filters_12, filters_13, k_0, k_1,
        k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13,
        activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3,
        dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr,
        l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3,
        skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4,
        max_pooling, dense_size_0, dense_size_1
    ]]

    #var_names
    #['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']

    dropout_mult = 1.0
    lr_mult = 1.0
    X = [
        Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)
    ]
    vla = {
        'k_6': 8,
        'dropout_3': 0.11567654065541401,
        'stack_2': 2,
        'skstep_3': 2,
        'skstart_2': 6,
        'k_11': 9,
        'zca_whitening': False,
        'k_1': 11,
        's_4': 3,
        'filters_4': 191,
        'k_7': 9,
        'dropout_6': 0.19656398582242512,
        'fill_mode': 'nearest',
        'filters_0': 246,
        'lr': 0.003521543292982737,
        'skstart_4': 1,
        's_3': 3,
        'height_shift_range': 0.5512549395731117,
        'dropout_9': 0.282536761776477,
        'dense_size_0': 1344,
        'filters_11': 507,
        's_0': 2,
        'dropout_4': 0.025329970168830974,
        'filters_10': 305,
        'filters_12': 474,
        'dropout_8': 0.10220859898802954,
        'samplewise_std_normalization': False,
        'cval': 0.24779638415638786,
        'step': False,
        'skstep_0': 2,
        'skstart_3': 3,
        'featurewise_std_normalization': False,
        's_5': 3,
        'skstep_1': 8,
        'k_4': 14,
        'stack_0': 3,
        'max_pooling': True,
        'dropout_0': 0.005004155479145995,
        'batch_size_sp': 75,
        'skstart_1': 0,
        'skstep_2': 2,
        'filters_6': 535,
        'k_12': 4,
        'stack_5': 0,
        'horizontal_flip': True,
        'filters_2': 397,
        'stack_4': 5,
        'l2': 0.00043366714416766863,
        'skstart_0': 6,
        'filters_7': 202,
        'filters_13': 350,
        'k_2': 4,
        'k_3': 4,
        's_2': 3,
        's_6': 3,
        'rotation_range': 31,
        'shear_range': 4.413108635288765,
        'filters_5': 109,
        's_1': 1,
        'k_8': 9,
        'k_9': 5,
        'channel_shift_range': 0.002134671459292783,
        'samplewise_center': False,
        'k_0': 2,
        'dropout_5': 0.09773198911653828,
        'vertical_flip': False,
        'k_5': 10,
        'zoom_range': 0.02446592218470434,
        'width_shift_range': 0.11326574574565945,
        'stack_6': 0,
        'k_10': 10,
        'dropout_2': 0.3496803660826153,
        'activation': 'selu',
        'stack_3': 1,
        'k_13': 4,
        'zca_epsilon': 1.2393513955305375e-06,
        'filters_3': 473,
        'dense_size_1': 1216,
        'stack_1': 1,
        'dropout_1': 0.16597601970646955,
        'filters_8': 353,
        'dropout_7': 0.2567508735733037,
        'featurewise_center': False,
        'filters_9': 339,
        'global_pooling': True,
        'skstep_4': 1,
        'activ_dense': 'softmax',
        'filters_1': 120
    }
    print(X)
    print(X[0].to_dict())
    #cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
    test = False
    if test:
        #model = CNN_conf(X[0].to_dict(),test=test)
        model = CNN_conf(vla, test=test)
        plot_model(model,
                   to_file='model_skippy_test.png',
                   show_shapes=True,
                   show_layer_names=True)
        model.summary()
        print(model.count_params())
        print(str(model.count_params() * 4 * 2 / 1024 / 1024 / 1024) + ' Gb')
    else:
        #timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1)
        #timer, loss = CNN_conf(vla,test=test,epochs= 200,verbose=1)
        timer, loss = CNN_conf(
            vla,
            test=test,
            epochs=200,
            verbose=1,
            data_augmentation=True,
            use_validation=True,
            test_on_validation=True
        )  #TODO use this for data augmentation and make sure the val set is used for val accuracy, not the test set
        print('timer, loss:')
        print(timer, loss)