Пример #1
0
def train_simple_recurrent_model(multi_class_model,
                                 model_name,
                                 already_trained_model=None):
    x_train, y_train = prepare_sequential_data()
    loaded_multi_class_model = keras.models.load_model(multi_class_model)
    x_train, y_train = construct_probabilities_sequences(
        x_train, y_train, loaded_multi_class_model)
    print(x_train.shape, y_train.shape)
    if not already_trained_model:
        model = StandardModel(classes=6)
        model = model.build_simple_recurrent_model()
        model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
        model.fit(x_train, y_train, epochs=5, batch_size=1)
        model.save(model_name)
    else:
        if os.path.exists(already_trained_model):
            model = keras.models.load_model(already_trained_model)
            model.compile(Adamax(),
                          loss='binary_crossentropy',
                          metrics=['acc'])
            model.fit(x_train, y_train, epochs=5, batch_size=1)
            model.save(model_name)
        else:
            print_error("Provided model file doesn't exist! Exiting...")
            sys.exit(1)
Пример #2
0
def train_binary_model(base_model, model_name, already_trained_model=None):
    x_train, y_train, x_test, y_test = prepare_data()
    if not already_trained_model:
        model = StandardModel(base_model, (512, 512, 3),
                              classes=2,
                              use_softmax=True)
        model = model.build_model()
        model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
        model.fit_generator(DataGenerator(x_train,
                                          labels=y_train,
                                          n_classes=2,
                                          batch_size=8),
                            epochs=1)
        model.save(model_name)
    else:
        if os.path.exists(already_trained_model):
            model = keras.models.load_model(already_trained_model)
            model.compile(Adamax(),
                          loss='binary_crossentropy',
                          metrics=['acc'])
            model.fit_generator(DataGenerator(x_train,
                                              labels=y_train,
                                              n_classes=2,
                                              batch_size=8),
                                epochs=1)
            model.save(model_name)
        else:
            print_error("Provided model file doesn't exist! Exiting...")
            sys.exit(1)
Пример #3
0
def define_learning_rate_and_optimizer(optimizer, learning_rate):
	if(learning_rate>0):
		if(optimizer=='Adam'):
			return Adam(lr=learning_rate),learning_rate
		elif(optimizer=='Adamax'):
			return Adamax(lr=learning_rate),learning_rate
		elif(optimizer=='Adadelta'):
			return Adadelta(lr=learning_rate),learning_rate
		elif(optimizer=='Adagrad'):
			return Adagrad(lr=learning_rate),learning_rate
		elif(optimizer=='Nadam'):
			return Nadam(lr=learning_rate),learning_rate
		elif(optimizer=='RMSprop'):
			return RMSprop(lr=learning_rate),learning_rate
		elif(optimizer=='SGD'):
			return SGD(lr=learning_rate),learning_rate

	#learning_rate is the default value
	if(optimizer=='Adam'):
		return Adam(),1e-3
	elif(optimizer=='Adamax'):
		return Adamax(),2e-3
	elif(optimizer=='Adadelta'):
		return Adadelta(),1
	elif(optimizer=='Adagrad'):
		return Adagrad(),1e-2
	elif(optimizer=='Nadam'):
		return Nadam(),2e-3
	elif(optimizer=='RMSprop'):
		return RMSprop(),1e-3
	elif(optimizer=='SGD'):
		return SGD(),1e-2
Пример #4
0
def train_recurrent_multi_class_model(base_model,
                                      model_name,
                                      already_trained_model=None):
    x_train, y_train = prepare_sequential_data()
    if not already_trained_model:
        model = StandardModel(base_model, (512, 512, 3),
                              classes=5,
                              use_softmax=False,
                              pooling_method=None)
        model = model.build_model()
        model.compile(Adamax(), loss='binary_crossentropy', metrics=['acc'])
        model.fit_generator(LSTMDataGenerator(x_train, labels=y_train),
                            epochs=3)
        model.save(model_name)
    else:
        if os.path.exists(already_trained_model):
            model = keras.models.load_model(already_trained_model)
            model.compile(Adamax(),
                          loss='binary_crossentropy',
                          metrics=['acc'])
            model.fit_generator(LSTMDataGenerator(x_train, labels=y_train),
                                epochs=3)
            model.save(model_name)
        else:
            print_error("Provided model file doesn't exist! Exiting...")
            sys.exit(1)
Пример #5
0
 def run(self):
     X_transformed = Pipeline((tolower, splitter, joiner)).transform(X)
     model = Sequential()
     model.add(
         Convolution2D(32, (len(loader.ALPHABET), 4),
                       input_shape=(len(loader.ALPHABET), loader.MAX_LENGTH,
                                    1),
                       border_mode='valid',
                       activation='relu'))
     model.add(MaxPooling2D(pool_size=(1, 3)))
     model.add(
         Convolution2D(32, 1, 3, border_mode='valid', activation='relu'))
     model.add(MaxPooling2D(pool_size=(1, 3)))
     model.add(Flatten())
     model.add(Dropout(0.7))
     model.add(Dense(256, activation='relu', W_constraint=maxnorm(3)))
     model.add(Dense(loader.NB_CLASSES, activation='softmax'))
     model.compile(loss='categorical_crossentropy',
                   optimizer=Adamax(),
                   metrics=['accuracy'])
     print(model.summary())
     X_train, X_test, Y_train, Y_test = train_test_split(X_transformed, y)
     model.fit_generator(CharTableSource().get_generator(X_train,
                                                         Y_train)(),
                         steps_per_epoch=16,
                         validation_steps=16,
                         epochs=1,
                         verbose=1,
                         validation_data=CharTableSource().get_generator(
                             X_test, Y_test)())
     score = model.evaluate_generator(CharTableSource().get_generator(
         X_test, Y_test)(),
                                      steps=32)
     print('Test accuracy:', score[1])
     return model
Пример #6
0
def df_accuracy(exp_type, num_classes, num_epochs, seq_length, VERBOSE, X_tr,
                Y_tr, X_vl, Y_vl, X_te, Y_te):
    OPTIMIZER = Adamax(lr=0.002,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=1e-08,
                       decay=0.0)  # Optimizer
    input_shape = (seq_length, 1)
    # Building and training model

    #print ("Building and training DF model")
    if exp_type == 'RT':
        model = DFNetRT.build(input_shape=input_shape, classes=num_classes)
    else:
        model = DFNet.build(input_shape=input_shape, classes=num_classes)
    #print(model.summary())
    model.compile(loss="categorical_crossentropy",
                  optimizer=OPTIMIZER,
                  metrics=["accuracy"])
    #print ("Model compiled")

    # Start training
    history = model.fit(X_tr,
                        Y_tr,
                        batch_size=128,
                        epochs=num_epochs,
                        verbose=VERBOSE,
                        validation_data=(X_vl, Y_vl))

    # Start evaluating model with testing data
    score_test = model.evaluate(X_te, Y_te, verbose=0)
    print("Testing accuracy:", score_test[1])
    return score_test[1]
def create_model_test():

    model = Sequential()

    model.add(
        Dense(280,
              input_dim=16,
              init='normal',
              activation='relu',
              W_regularizer=l1l2(l1=5e-06, l2=5e-06),
              activity_regularizer=l1l2(l1=0, l2=1e-5))
    )  #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))
    model.add(Dropout(0.25))
    model.add(
        Dense(370, activation='relu', activity_regularizer=l1l2(l1=0,
                                                                l2=5e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(120, activation='relu', W_regularizer=l1l2(l1=0,
                                                               l2=5e-06)))
    model.add(Dropout(0.55))

    model.add(Dense(1))

    model.add(Activation('sigmoid'))

    admax = Adamax(lr=0.002,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=0.0)  #decay ? 0.002

    model.compile(optimizer=admax,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])  # Gradient descent
    return model
Пример #8
0
def build_multitask_bin_seg_cat_network(nb_classes,
                                        char_inputs, char_encoded,
                                        word_inputs, word_encoded,
                                        gaze_inputs, gaze_encoded):
    network = concatenate([gaze_encoded, char_encoded, word_encoded], name='concat_layer')
    network = Dense(100, activation='relu', name='common_dense_layer')(network)

    bin_output = Dense(1, activation='sigmoid', name='bin_output')(network)
    seg_output = Dense(3, activation='softmax', name='seg_output')(network)
    cat_output = Dense(nb_classes, activation='softmax', name='cat_output')(network)

    network_inputs  = gaze_inputs + char_inputs + word_inputs
    network_outputs = [bin_output, seg_output, cat_output]

    model = Model(inputs=network_inputs, outputs=network_outputs, name='ne_model')

    adamax = Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(optimizer=adamax,
                  loss={'bin_output': 'binary_crossentropy',
                        'seg_output': 'categorical_crossentropy',
                        'cat_output': 'categorical_crossentropy'},
                  loss_weights={'bin_output': 1.,
                                'seg_output': 1.,
                                'cat_output': 1.},
                  metrics={'bin_output': [utils.fbeta_score, 'accuracy'],
                           'seg_output': [utils.fbeta_score, 'accuracy'],
                           'cat_output': [utils.fbeta_score, 'accuracy']})
    return model
Пример #9
0
def baseline_cnn(embedding_matrix, num_tokens, embedding_dim, dropout_rate,
                 regularization_rate, num_classes):
    img_input = Input(shape=(2048, ))
    text_input = Input(shape=(None, ))

    vgg_model = img_model(img_input, regularization_rate)
    lstm_model = text_model(embedding_matrix, num_tokens, embedding_dim,
                            text_input, dropout_rate, regularization_rate)

    print("Merging final model...")
    fc_model = merge([vgg_model, lstm_model], mode='mul')
    fc_model = Dropout(dropout_rate)(fc_model)
    fc_model = Dense(1000,
                     activation='tanh',
                     W_constraint=maxnorm(3),
                     kernel_initializer=glorot_normal(),
                     kernel_regularizer=l2(regularization_rate))(fc_model)
    fc_model = Dropout(dropout_rate)(fc_model)
    fc_model = Dense(num_classes,
                     activation='softmax',
                     W_constraint=maxnorm(3),
                     kernel_initializer=glorot_normal(),
                     kernel_regularizer=l2(regularization_rate))(fc_model)

    model = Model(inputs=[img_input, text_input], outputs=fc_model)
    opt = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print(model.summary())
    plot_model(model, to_file='model_plots/model_baseline_cnn.png')
    return model
Пример #10
0
def get_optimizer(optimizer):
    """
    Creates optimizer function from name

    Arguments:
        optimizer -- optimizer name.

    Returns:
        optimizer -- optimizer function

    Raises:
        Exception -- not valid optimizer
    """
    if optimizer == 'SGD':
        return SGD()
    elif optimizer == 'RMSprop':
        return RMSprop()
    elif optimizer == 'Adagrad':
        return Adagrad()
    elif optimizer == 'Adadelta':
        return Adadelta()
    elif optimizer == 'Adam':
        return Adam()
    elif optimizer == 'Adamax':
        return Adamax()
    elif optimizer == 'Nadam':
        return Nadam()
    else:
        raise ValueError('Unexpected optimizer value: ' + str(optimizer))
Пример #11
0
def create_nn():
    if os.path.exists('race-car.h5'):
        return load_model('race-car.h5')

    model = Sequential()
    model.add(Dense(512, init='lecun_uniform',
                    input_shape=(vector_size, )))  # 7x7 + 3.  or 14x14 + 3
    model.add(Activation('relu'))
    #model.add(Dropout(0.25))

    #   model.add(Dense(32, init='lecun_uniform'))
    #   model.add(Activation('relu'))
    #   model.add(Dropout(0.3))

    model.add(Dense(11, init='lecun_uniform'))
    model.add(Activation(
        'linear'))  #linear output so we can have range of real-valued outputs

    #     rms = RMSprop(lr=0.005)
    #     sgd = SGD(lr=0.1, decay=0.0, momentum=0.0, nesterov=False)
    #     try "adam"
    #     adam = Adam(lr=0.0005)
    adamax = Adamax()  #Adamax(lr=0.001)
    model.compile(loss='mse', optimizer=adamax)
    model.summary()

    return model
Пример #12
0
def get_optimizer(config):
    if config.OPTIMIZER == 'SGD':
        return SGD(lr=config.LEARNING_RATE,
                   momentum=config.LEARNING_MOMENTUM,
                   clipnorm=config.GRADIENT_CLIP_NORM,
                   nesterov=config.NESTEROV)
    elif config.OPTIMIZER == 'RMSprop':
        return RMSprop(lr=config.LEARNING_RATE,
                       clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adagrad':
        return Adagrad(lr=config.LEARNING_RATE,
                       clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adadelta':
        return Adadelta(lr=config.LEARNING_RATE,
                        clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adam':
        return Adam(lr=config.LEARNING_RATE,
                    clipnorm=config.GRADIENT_CLIP_NORM,
                    amsgrad=config.AMSGRAD)
    elif config.OPTIMIZER == 'Adamax':
        return Adamax(lr=config.LEARNING_RATE,
                      clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Nadam':
        return Nadam(lr=config.LEARNING_RATE,
                     clipnorm=config.GRADIENT_CLIP_NORM)
    else:
        raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER))
Пример #13
0
def SelectOptimizer(OptimizerName='Adam', LearningRate=0.001, Decay=0.001):

    Optimizer = OptimizerName

    if Optimizer == 'SGD':
        from keras.optimizers import SGD
        return SGD(lr=LearningRate, decay=Decay)

    elif Optimizer == 'RMSprop':
        from keras.optimizers import RMSprop
        return RMSprop(lr=LearningRate, decay=Decay)

    elif Optimizer == 'Adagrad':
        from keras.optimizers import Adagrad
        return Adagrad(lr=LearningRate, decay=Decay)

    elif Optimizer == 'Adadelta':
        from keras.optimizers import Adadelta
        return Adadelta(lr=LearningRate, decay=Decay)

    elif Optimizer == 'Adam':
        from keras.optimizers import Adam
        return Adam(lr=LearningRate, decay=Decay)

    elif Optimizer == 'Adamax':
        from keras.optimizers import Adamax
        return Adamax(lr=LearningRate, decay=Decay)

    elif Optimizer == 'Nadam':
        from keras.optimizers import Nadam
        return Nadam(lr=LearningRate, schedule_decay=Decay)
Пример #14
0
 def run(self):
     X_transformed = Pipeline((tolower, splitter)).transform(X)
     model = Sequential()
     model.add(
         Convolution1D(filters=32,
                       kernel_size=3,
                       padding='same',
                       activation='relu',
                       input_shape=(loader.NB_WORDS * W2V_SIZE, 1)))
     model.add(MaxPooling1D(pool_size=2))
     model.add(LSTM(100))
     model.add(Dropout(0.7))
     model.add(Dense(256, activation='relu', W_constraint=maxnorm(3)))
     model.add(Dense(loader.NB_CLASSES, activation='softmax'))
     model.compile(loss='categorical_crossentropy',
                   optimizer=Adamax(),
                   metrics=['accuracy'])
     print(model.summary())
     X_train, X_test, y_train, y_test = Word2VecSeqSource().get_train_test(
         X_transformed, y)
     model.fit(X_train,
               y_train,
               validation_data=(X_test, y_test),
               epochs=1,
               batch_size=64)
     score = model.evaluate(X_test, y_test, verbose=0)
     print('Test accuracy:', score[1])
     return model
def model_fit():

    epochs = 30

    val_generator, train_generator = Data_augmentation()
    checkpoint = create_checkpoint()
    model = building_model()

    earlystop = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=3,
                              verbose=1,
                              restore_best_weights=True)

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=3,
                                  verbose=1,
                                  min_delta=0.0001)

    callbacks = [checkpoint, reduce_lr]
    model.compile(loss="categorical_crossentropy",
                  optimizer=Adamax(lr=0.0001),
                  metrics=['accuracy'])

    with tf.device("/device:GPU:0"):
        history = model.fit_generator(train_generator,
                                      steps_per_epoch=520,
                                      epochs=epochs,
                                      callbacks=callbacks,
                                      validation_data=val_generator,
                                      validation_steps=len(val_generator) //
                                      BATCH_SIZE)

    return history
Пример #16
0
def create_model(w1l1, w1l2, w2l1, w2l2, w3l1, w3l2):
    # create model
    #L=WinnerTakeAll1D_GaborMellis(spatial=1, OneOnX=WTAX)
    model = Sequential()

    model.add(
        Dense(100,
              input_dim=95,
              init='normal',
              activation='relu',
              W_regularizer=l1l2(l1=0.0005, l2=0.0001))
    )  # ,W_regularizer=l1l2(l1=9E-7, l2=5e-07))) #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))
    #model.add(L)
    model.add(Dropout(0.1))
    model.add(Dense(540, activation='relu', W_regularizer=l1l2(l1=w1l1, l2=0)))
    #model.add(L)
    model.add(Dropout(0.3))
    model.add(Dense(310, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(440, activation='relu', W_regularizer=l1l2(l1=0, l2=w3l2)))
    model.add(Dropout(0.4))
    model.add(Dense(2))
    model.add(Activation('softmax'))
    admax = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)
    model.compile(optimizer=admax,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])  # Gradient descent
    return model
Пример #17
0
def assemble_network_xu_mod(n):
    print("Assembling start")

    input_value = Input(shape=(1, 128, 128), dtype="float32")
    x = type_1_layer(input_value, 64)
    x = type_1_layer(x, 16)

    x = type_2_layer(x, 16)
    x = type_2_layer(x, 16)
    x = type_2_layer(x, 16)
    x = type_2_layer(x, 16)
    x = type_2_layer(x, 16)

    x = type_3_layer(x, 16)
    x = type_3_layer(x, 64)
    x = type_3_layer(x, 128)

    x = type_4_layer(x, 256)

    x = Dense(256, activation='relu')(x)
    x = Dense(2)(x)
    out = Activation('softmax')(x)
    network = Model(inputs=input_value, outputs=out)
    opt = Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None)
    network.compile(
        loss='binary_crossentropy',
        optimizer=opt,
        metrics=['accuracy'],
    )
    print("Assembly done")
    return network
Пример #18
0
    def main(optimizer):

        if optimizer == 'adadelta':
            optimizer = Adadelta(
                lr=1.0,  # 1.0
                rho=0.95,  # 0.95
                epsilon=1e-08,  # 1e-08
                decay=0.0)  # 0.0

        elif optimizer == 'adagrad':
            optimizer = Adagrad(
                lr=0.01,  # 0.01
                epsilon=1e-08,  # 1e-08
                decay=0.0)  # 0.0

        elif optimizer == 'adam':
            optimizer = Adam(
                lr=0.001,  # 0.001
                beta_1=0.9,  # 0.9
                beta_2=0.999,  # 0.999
                epsilon=1e-08,  # 1e-08
                decay=0.0)  # 0.0

        elif optimizer == 'adamax':
            optimizer = Adamax(
                lr=0.001,  # 0.001
                beta_1=0.9,  # 0.9
                beta_2=0.999,  # 0.999
                epsilon=1e-08,  # 1e-08
                decay=0.0)  # 0.0

        elif optimizer == 'nadam':
            optimizer = Nadam(
                lr=0.001,  # 0.001
                beta_1=0.9,  # 0.9
                beta_2=0.999,  # 0.999
                epsilon=1e-08,  # 1e-08
                schedule_decay=0.004)  # 0.004

        elif optimizer == 'rmsprop':
            optimizer = RMSprop(
                lr=0.001,  # 0.001
                rho=0.9,  # 0.9
                epsilon=1e-08,  # 1e-08
                decay=0.0)  # 0.0

        elif optimizer == 'sgd':
            optimizer = SGD(
                lr=0.01,  # 0.01
                momentum=0.0,  # 0.0
                decay=0.0,  # 0.0
                nesterov=False)  # False

        else:
            raise Exception('[!] Something is wrong - the name of the \
                            optimizer is not a valid choice. Valid choices: \
                            adadelta, adagrad, adam, adamax, nadam, rmsprop, \
                            sgd')

        return optimizer
 def __init__(self, x_train, y_train, x_test, y_test, batch_size, epochs,
              dropout, lr, name):
     self.x_train = x_train
     self.x_test = x_test
     self.y_train = y_train
     self.y_test = y_test
     self.name = name
     self.lr = lr
     self.batch_size = batch_size
     self.epochs = epochs
     self.class_weight = compute_class_weight(class_weight='balanced',
                                              classes=np.arange(10),
                                              y=y_train.argmax(axis=1))
     model = Sequential()
     model.add(
         Bidirectional(LSTM(300, return_sequences=True),
                       input_shape=(n_steps, dim_input)))
     model.add(AttentionWithContext())
     model.add(Addition())
     model.add(Dense(300))
     model.add(LeakyReLU())
     model.add(Dropout(dropout))
     model.add(Dense(300))
     model.add(LeakyReLU())
     model.add(Dropout(dropout))
     model.add(Dense(10, activation='softmax'))
     # Lower learning rate to prevent divergence
     adamax = Adamax(self.lr)
     model.compile(adamax, 'categorical_crossentropy', metrics=['accuracy'])
     self.model = model
Пример #20
0
def build_model(mode):
    model = Sequential()
    if mode == 'shallow':
        model = Sequential()
        model.add(Conv2D(64,(3,3),input_shape=(48,48,1),activation = 'relu'))
        model.add(Conv2D(64,(3,3),activation = 'relu'))
        model.add(MaxPooling2D((2,2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128,(3,3),activation = 'relu'))
        model.add(MaxPooling2D((2,2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(256,(3,3),activation = 'relu'))
        model.add(MaxPooling2D((2,2)))
        model.add(Dropout(0.3))

        model.add(Flatten())
        model.add(Dense(units=1500,activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(units=7,activation='softmax'))
      

        opt = Adamax(lr = 0.002, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay = 0.0)
        model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.summary() 
    return model
Пример #21
0
    def train_network(self):
        epoch = 300
        batch_size = 69
        learning_rate = math.exp(-6)
        input_dim = 3

        # NEURAL NET --------------------------------------------------------------------

        model = Sequential()
        model.add(Dense(96, input_dim=input_dim, activation='relu'))
        model.add(Dense(96, activation='relu'))
        model.add(Dense(96, activation='relu'))
        model.add(Dropout(0.4))
        model.add(Dense(96, activation='relu'))
        model.add(Dense(96, activation='relu'))
        model.add(Dense(96, activation='relu'))
        model.add(Dropout(0.4))

        # model.add(SimpleRNN(450, input_shape=(3,1), activation='relu'))
        # model.add(Dense(280, activation='relu'))
        # model.add(Dense(280, activation='relu'))
        # model.add(Dropout(0.45))
        # model.add(Dense(180, activation='relu'))
        # model.add(Dense(180, activation='relu'))
        # model.add(Dropout(0.45))
        model.add(Dense(96, activation='relu'))
        model.add(Dense(2, activation='softmax'))

        opt = Adamax(lr=learning_rate)
        model.compile(loss="sparse_categorical_crossentropy",
                      optimizer=opt,
                      metrics=['accuracy'])

        # FIT AND EVALUATE --------------------------------------------------------------
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()) + "-->" +
                                  str(i))

        lgen = lg()

        train_data = np.loadtxt("Training/train_data.txt", delimiter=" ")
        label_train = lgen.generate("ACC_Yauto/LOG_ACCELEROMETRO.txt",
                                    "ACC_Nauto/LOG_ACCELEROMETRO.txt")

        test_data = np.loadtxt("Test/test_data.txt", delimiter=" ")
        label_test = lgen.generate("ACC_Yauto_test/Test_Auto.txt",
                                   "ACC_Nauto_test/Test_AutoN.txt")

        # train_data = np.expand_dims(train_data, axis=2)
        # test_data = np.expand_dims(test_data, axis=2)

        model.fit(train_data,
                  label_train,
                  epochs=epoch,
                  batch_size=batch_size,
                  callbacks=[tensorboard])
        res = model.evaluate(test_data, label_test)
        print("\n\nRESULT: %s: %.2f%%" %
              (model.metrics_names[1], res[1] * 100))

        model.save('ANN_inference.h5')
Пример #22
0
def select(optimizer, initial_lr):

    if optimizer == 'SGD':
        return SGD(lr=initial_lr, decay=1e-6, momentum=0.9, nesterov=True)
    elif optimizer == 'RMSprop':
        return RMSprop(lr=initial_lr, rho=0.9, epsilon=None, decay=0.0)
    elif optimizer == 'Adagrad':
        return Adagrad(lr=initial_lr, epsilon=None, decay=0.0)
    elif optimizer == 'Adadelta':
        return Adadelta(lr=initial_lr, rho=0.95, epsilon=None, decay=0.0)
    elif optimizer == 'Adam':
        return Adam(lr=initial_lr,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.0,
                    amsgrad=False)
    elif optimizer == 'Adamax':
        return Adamax(lr=initial_lr,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=None,
                      decay=0.0)
    elif optimizer == 'Nadam':
        return Nadam(lr=initial_lr,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=None,
                     schedule_decay=0.004)
Пример #23
0
def get_network():
    input_value = Input(shape=(1, 128, 128))
    x = first_type_layer(input_value, 64)
    x = first_type_layer(x, 16)

    x = second_type_layer(x, 16)
    x = second_type_layer(x, 16)
    x = second_type_layer(x, 16)
    x = second_type_layer(x, 16)
    x = second_type_layer(x, 16)

    x = third_type_layer(x, 16)
    x = third_type_layer(x, 64)
    x = third_type_layer(x, 128)
    x = third_type_layer(x, 256)
    x = fourth_type_layer(x, 512)
    x = Dense(512, activation='relu')(x)

    output = Dense(2, activation='sigmoid')(x)
    # print("SUMMARY")
    network = Model(inputs=input_value, outputs=output)
    # network.summary()
    # print("SUMMARY")
    # opt = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.001)
    opt = Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
    network.compile(optimizer=opt,
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
    print('Network Compiled')
    return network
Пример #24
0
def quadlayer(input_size, rate=0.1):
    model = Sequential()
    model.add(
        Dense(units=1000,
              activation='relu',
              input_dim=input_size,
              kernel_initializer='random_uniform'))
    model.add(Dropout(rate))
    model.add(
        Dense(units=500,
              activation='relu',
              kernel_initializer='random_uniform'))
    model.add(Dropout(rate))
    model.add(
        Dense(units=100,
              activation='relu',
              kernel_initializer='random_uniform'))
    model.add(Dropout(rate))
    model.add(
        Dense(units=10, activation='relu',
              kernel_initializer='random_uniform'))
    model.add(Dropout(rate))
    model.add(
        Dense(units=1,
              activation='sigmoid',
              kernel_initializer='random_uniform'))
    model.compile(loss='binary_crossentropy',
                  optimizer=Adamax(),
                  metrics=['accuracy'])
    return model
Пример #25
0
def train_mlp1(x_train, y_train, x_test, y_test, input_dim, num_classes=24):
    """

    :param x_train:
    :param y_train:
    :param x_test:
    :param y_test:
    :param input_dim:
    :param num_classes:
    :return:
    """
    model = Sequential()
    model.add(Dense(512, input_dim=input_dim))
    model.add(Activation('relu'))   # An "activation" is just a non-linear function applied to the output of the layer
                                    # above. Here, with a "rectified linear unit", we clamp all values below 0 to 0.
    model.add(Dropout(0.1))        # Dropout helps protect the model from memorizing or "overfitting" the training data
    model.add(Dense(1024))
    model.add(Activation('relu'))

    model.add(Dropout(0.1))
    model.add(Dense(386))
    model.add(Activation('relu'))

    model.add(Dropout(0.1))
    model.add(Dense(num_classes))
    model.add(Activation256('softmax'))  # This special "softmax" activation among other things,
                                      # ensures the output is a valid probability distribution, that is
                                      # that its values are all non-negative and sum to 1.

    model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5), metrics=["accuracy"])
    model.fit(x_train, y_train,
              batch_size=40, nb_epoch=16, verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=1)
    return score[1]
Пример #26
0
    def __init__(self, data, name="", batch_size=args.batch_size, lr=args.lr, epochs=args.epochs, dropout=args.dropout):
        vectors = np.stack(data.iloc[:, 1].values)
        labels = data.iloc[:, 0].values
        positive_idxs = np.where(labels == 1)[0]
        negative_idxs = np.where(labels == 0)[0]
        undersampled_negative_idxs = np.random.choice(negative_idxs, len(positive_idxs), replace=False)
        resampled_idxs = np.concatenate([positive_idxs, undersampled_negative_idxs])

        x_train, x_test, y_train, y_test = train_test_split(vectors[resampled_idxs], labels[resampled_idxs],
                                                            test_size=0.2, stratify=labels[resampled_idxs])
        self.x_train = x_train
        self.x_test = x_test
        self.y_train = to_categorical(y_train)
        self.y_test = to_categorical(y_test)
        self.name = name
        self.batch_size = batch_size
        self.epochs = epochs
        self.class_weight = compute_class_weight(class_weight='balanced', classes=[0, 1], y=labels)
        model = Sequential()
        model.add(LSTM(300, input_shape=(vectors.shape[1], vectors.shape[2])))
        model.add(Dense(300))
        model.add(LeakyReLU())
        model.add(Dropout(dropout))
        model.add(Dense(300))
        model.add(LeakyReLU())
        model.add(Dropout(dropout))
        model.add(Dense(2, activation='softmax'))
        # Lower learning rate to prevent divergence
        adamax = Adamax(lr)
        model.compile(adamax, 'categorical_crossentropy', metrics=['accuracy'])
        self.model = model
Пример #27
0
def build_model(model_fn, model_file=None):
    if model_file is not None:
        model = load_model(model_file)
        print(model.get_config())
        return model

    model = model_fn()

    optimizers = []
    optimizers.append(SGD(lr=.1, momentum=0.1, decay=0.0))
    optimizers.append(RMSprop(lr=0.001, rho=0.9, epsilon=1e-06))
    optimizers.append(Adagrad(lr=0.01, epsilon=1e-06))
    optimizers.append(Adadelta(lr=1.0, rho=0.95, epsilon=1e-06))
    #this is the optimizer that is used - Adam
    #you can change the lr parameter
    #initial: 2
    lr = 0.0001 / 2
    log("Learning rate for Adam: {}".format(lr))
    optimizers.append(Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
    optimizers.append(Adamax(lr=0.002, beta_1=0.9, beta_2=0.999,
                             epsilon=1e-08))

    model.compile(loss='mean_squared_error', optimizer=optimizers[4])

    return model
Пример #28
0
    def learn_std(self,train_data,train_target, drp_rate=0.1, epochs=150, batch_size=5,
              num_class=2):
        # Trying to get consistent results but this is just the first step
        #   It looks like keras doesn't currenlty allow seeds to be initialized
        #   So every time there is new seed and random new weights so the results can be
        #       different on each run
        numpy.random.seed(0)

        self. model = Sequential()

        num_features = train_data.shape[1]
        # numpy.randomfrom keras.utils import to_categorical.seed(0)
        # Adding first dense layer with acitivation relu and dropout
        self.model.add(Dense(num_features+1, input_dim=num_features))
        self.model.add(Activation("sigmoid"))


        # Adding final output layer with softmax
        self.model.add(Dense(units=num_class))
        self.model.add(Activation('softmax'))
        print np.unique(train_target)
        print train_target[0]
        y_classes = [np.argmax(train_target, axis=None, out=None) for y in train_target]
        y_ints = [y.argmax() for y in train_target]
        #print y_classes.shape
        cw = class_weight.compute_class_weight('balanced', np.unique(y_ints),
                                              y_ints)

        print cw
        earlystop = EarlyStopping(monitor='val_loss', min_delta=0.00001, patience=955, \
                                  verbose=1, mode='auto')
        callbacks_list = [earlystop]
        callbacks_list = []
        self.model.compile(loss='categorical_crossentropy', optimizer=Adamax(), metrics=['accuracy'])
        self.model.fit(train_data, train_target, batch_size=batch_size, validation_split=0.2, epochs=epochs,  callbacks=callbacks_list, class_weight=cw)
Пример #29
0
def create_model():

	model = Sequential()
	# 30
	model.add(Dense(285, input_dim=30, init='normal', activation='relu' ,trainable=True, W_regularizer=l1l2(l1=9E-7, l2=5e-07))) #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))
	model.add(Dropout(0.35))
	model.add(L)
	model.add(Dense(360,  activation ='relu'))
	model.add(Dropout(0.35))
	model.add(L)
	model.add(Dense(270,  activation ='relu'))
	model.add(Dropout(0.35))
	model.add(L)

	model.add(Dense(1))
		
	model.add(Activation('sigmoid'))

	#model.load_weights('weights_boosting.h5')

	admax = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)#decay ? 0.002
	reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.97, patience=1, min_lr=0.00001)
	callbacks = [
		ModelCheckpoint("/home/admin-7036/Documents/Projet python/bosongit/weigh.hdf", monitor='val_loss', save_best_only=True, verbose=0),
		ReduceLROnPlateau(monitor='val_loss', factor=0.97, patience=1, min_lr=0.00001)
	] 

	model.compile(optimizer=admax, loss='binary_crossentropy', metrics=['sparse_categorical_accuracy']) # Gradient descent
	#model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) # Gradient descent
	#model[j].compile(optimizer='adam', loss='categorical_crossentropy', metrics=[''sparse_categorical_accuracy'']) # Gradient descent

	print (model.summary()) # Affiche les details du reseau !

	return model
Пример #30
0
def create_cnn_vectorization():
    # if os.path.exists('race-car_larger2.h5'):
    #     print("Model is loaded")
    #     return load_model('race-car_larger2.h5')

    model = Sequential()

    model.add(Conv2D(filters = 16, kernel_size = 8, strides = (4,4), input_shape=( 84, 84, 4)))
    model.add(Activation('relu'))
    model.add(Conv2D(filters = 32, kernel_size = 4,  input_shape=( 84, 84, 4)))
    model.add(Activation('relu'))
    model.add(Flatten())

    
    model.add(Dense(256, init='lecun_uniform'))
    model.add(Activation('relu'))

    model.add(Dense(128, init='lecun_uniform'))
    model.add(Activation('relu'))

    model.add(Dense(11, init = 'lecun_uniform'))
    model.add(Activation('linear'))
    
    model.compile(loss='mse', optimizer=Adamax(lr=0.001))  # lr=0.001
    
    
    return model