Exemplo n.º 1
0
def build_model():
    # expected input data shape: (batch_size, timesteps, data_dim)
    model = Sequential()

    model.add(
        LSTM(90,
             return_sequences=True,
             input_shape=(timesteps, data_dim),
             dropout=0.12))  # returns a sequence of vectors of dimension 32

    model.add(LSTM(64, return_sequences=True, dropout=0.12))

    model.add(LSTM(32, return_sequences=True, dropout=0.12))

    model.add(LSTM(20, dropout=0.12))  # return a single vector of dimension 32

    model.add(Dense(num_classes, activation='softmax'))

    optimizer = adagrad(lr=0.001)

    start = time.time()
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    print("> Compilation Time : ", time.time() - start)
    return model
Exemplo n.º 2
0
def build_model(image_features, caption_features, embedding_matrix):
    image_dense = Dense(WORD_DIM, name="image_dense")(image_features)
    image_output = BatchNormalization()(image_dense)

    cap_embed = Embedding(input_dim=embedding_matrix.shape[0],
                          output_dim=WORD_DIM,
                          weights=[embedding_matrix],
                          input_length=MAX_SEQUENCE_LENGTH,
                          trainable=False,
                          name="caption_embedding")(caption_features)

    #flat = Flatten()(cap_embed)
    lstm_out = LSTM(100)(cap_embed)
    caption_output = Dense(WORD_DIM, name="lstm_dense")(lstm_out)
    caption_output = BatchNormalization()(lstm_out)
    output = Dot(axes=-1, normalize=True)([image_output, caption_output])
    #concated = concatenate([image_output, caption_output], axis=-1)

    if args.optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=float(args.learning_rate))
    if args.optimizer == 'adam':
        opt = optimizers.adam(lr=float(args.learning_rate))
    if args.optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=float(args.learning_rate))

    mymodel = Model(inputs=[image_features, caption_features], outputs=output)
    mymodel.compile(optimizer=opt,
                    loss=mean_squared_error,
                    metrics=['accuracy'])
    return mymodel
Exemplo n.º 3
0
def create_model(input_size, output_size, n_layers, n_neurons,
                 activation_function, learning_rate, dropout_rate, optimizer):
    model = models.Sequential()
    model.add(
        layers.Dense(n_neurons,
                     input_shape=(input_size, ),
                     name='new_androdet_dense_1'))
    for _ in range(n_layers):
        if dropout_rate != 0.0:
            model.add(layers.Dropout(dropout_rate, noise_shape=None,
                                     seed=None))
        model.add(layers.Dense(n_neurons, activation=activation_function))
    model.add(layers.Dense(output_size, activation="sigmoid"))
    #model.summary()
    if optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=learning_rate)
    elif optimizer == 'adam':
        opt = optimizers.adam(lr=learning_rate)
    elif optimizer == 'sgd':
        opt = optimizers.sgd(lr=learning_rate)
    elif optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=learning_rate)
    elif optimizer == 'adadelta':
        opt = optimizers.adadelta(lr=learning_rate)
    elif optimizer == 'adamax':
        opt = optimizers.adamax(lr=learning_rate)
    elif optimizer == 'nadam':
        opt = optimizers.nadam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=["mean_squared_error"])
    return model
    def __init__(self, n_input):
        #model
        self.model = Sequential()
        #input
        self.model.add(Dense(32, input_dim = n_input, kernel_initializer='uniform',activation='softplus'))
        #hidden layers
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(64, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(128, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(256, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(128, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(64, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(32, kernel_initializer='he_uniform', activation='softplus'))
        self.model.add(Dropout(rate=0.2))
        self.model.add(Dense(1, kernel_initializer='he_uniform', activation='sigmoid'))
        #compile
        self.model.compile(optimizer=adagrad(lr=0.05), loss='binary_crossentropy')

        #title
        self.title = 'optimizer: adagrad , lr = 0.05, loss = binary crossentropy'

        #training
        self.history = None

        #name
        self.model_name = "BCM1"
Exemplo n.º 5
0
 def make_model(self):
     # design network
     model = Sequential()
     if len(self.num_neurons) == 1:
         model.add(
             LSTM(self.num_neurons[0],
                  input_shape=(None, self.train_X.shape[2]),
                  return_sequences=False,
                  activation=self.act_func))
     else:
         model.add(
             LSTM(self.num_neurons[0],
                  input_shape=(None, self.train_X.shape[2]),
                  return_sequences=True,
                  activation=self.act_func))
         for layer in self.num_neurons[1:len(self.num_neurons) - 1]:
             model.add(
                 LSTM(layer,
                      return_sequences=True,
                      activation=self.act_func))
         model.add(
             LSTM(self.num_neurons[-1],
                  return_sequences=False,
                  activation=self.act_func))
     model.add(Dense(self.num_regions, activation='linear'))
     optimizer = optimizers.adagrad()
     model.compile(loss=self.loss, optimizer=optimizer)
     self.model = model
Exemplo n.º 6
0
def create_and_train_model(splitted_data, embedding_layer):
    question_train = splitted_data["train"][0]
    answer_train = splitted_data["train"][1]
    label_train = splitted_data["train"][2]

    question_test = splitted_data["dev"][0]
    answer_test = splitted_data["dev"][1]
    label_test = splitted_data["dev"][2]

    model = build_model(embedding_layer)

    model.compile(
        loss='binary_crossentropy',  #loss = 'categorical_crossentropy', #
        #optimizer=optimizers.adagrad(lr=0.01, epsilon=1e-08),
        optimizer=optimizers.adagrad(lr=0.0001),
        metrics=['accuracy'])
    #metrics=[precision_threshold(0.5), recall_threshold(0.5)])
    #class_mode='binary')

    model.fit([question_train, answer_train],
              label_train,
              batch_size=128,
              epochs=30,
              validation_split=0.1,
              verbose=2
              #show_accuracy=True
              )
    #validation_data=([question_test, answer_test], label_test))
    #model_json = model.to_json()
    #with open("squad_model.json", "w") as json_file:
    #    json_file.write(model_json)
    model.save_weights("simple_squad_model_2.h5")

    label_predicted = model.predict([question_test, answer_test])
    print("predicted: ", label_predicted[:100])
    print("actual: ", label_test[:100])

    score = model.evaluate([question_test, answer_test], label_test, verbose=1)

    print("keras score: ", score)

    label_binary = []
    for list in label_predicted:
        for num in list:
            threshold = 0.5
            if num > threshold:
                label_binary.append(1)
            else:
                label_binary.append(0)

    label_binary = np.asarray(label_binary)
    print(label_binary[:100])
    precision = precision_score(label_test, label_binary)
    recall = recall_score(label_test, label_binary)
    f1 = f1_score(label_test, label_binary)

    print("precision: ", precision)
    print("recall: ", recall)
    print("f1: ", f1)
def adagrad(lr=0.01, epsilon=None, decay=0.0):
    """
    Adagrad optimizer
    :param lr: >=0, initial learning rate
    :param epsilon: >=0, If None, defaults to K.epsilon()
    :param decay: learning rate decay over each update
    """
    return optimizers.adagrad(lr=lr, epsilon=epsilon, decay=decay)
Exemplo n.º 8
0
def Main():

    """Main Loop"""
    start = time.time()



    averagedResult = {}
    averagedResult["SGD"]=train(SGD(),"SGD",0, [sample],trainAveragingIterations)
    averagedResult["Momentum"]=train(SGD(momentum=0.9),"Momentum", 0,[sample],trainAveragingIterations)
    averagedResult["Search-then-Converge"]=train(SGD(), "STC",0, [sample,stc],trainAveragingIterations)
    averagedResult["Cyclical Learning Rate"]=train(SGD(), "CLR",0,[sample,clr],trainAveragingIterations)
    averagedResult["ADAGRAD"]=train(adagrad(),"Adagrad",0,[sample],trainAveragingIterations)
    averagedResult["RMSProp"]=train(RMSprop(),"RMSProp",0,[sample],trainAveragingIterations)
    averagedResult["Adam"]=train(adam(), "Adam",0,[sample],trainAveragingIterations)
    print("Time")
    print(time.time()-start)


    iterations = [graphSamplePeriod*(i+1) for i in range(int(trainingSize/graphSamplePeriod))]

    for i in averagedResult:
        accuracy = averagedResult[i]['training_accuracy']

        plt.plot(iterations, accuracy*100, label=i)
        plt.xlabel("Iteration")
        plt.ylabel("Training Accuracy (%)")
        plt.legend()
    plt.show()
    for i in averagedResult:
        loss = averagedResult[i]['training_loss']

        plt.plot(iterations, loss, label=i)
        plt.xlabel("Iteration")
        plt.ylabel("Training Loss")
        plt.legend()
    plt.show()

    iterations = [tableSamplePeriod*(i+1) for i in range(int(trainingSize/tableSamplePeriod))]
    for i in averagedResult:
        test_accuracy = averagedResult[i]['test_accuracy']

        plt.plot(iterations, test_accuracy * 100, label=i)
        plt.xlabel("Iteration")
        plt.ylabel("Test Accuracy (%)")
        plt.legend()
    plt.show()
    for i in averagedResult:
        loss = averagedResult[i]['test_loss']

        plt.plot(iterations, loss, label=i)
        plt.xlabel("Iteration")
        plt.ylabel("Test Loss")
        plt.legend()
    plt.show()
    for i in averagedResult:
        print("{0} test accuracy: {1} test loss: {2}".format(i,averagedResult[i]["test_accuracy"],averagedResult[i]["test_loss"]))
Exemplo n.º 9
0
def add_head_to_model(trained_model, head_name, gate_name, num_output_neurons):
    # наращиваем классификатр от слоя по имени gate_name
    bottleneck = trained_model.get_layer(name=gate_name).output
    output = get_classifier(output_neurons=num_output_neurons)(bottleneck)
    model = Model(inputs=trained_model.input, outputs=output, name=head_name)
    optimiser = adagrad(lr=0.02)  #sgd(momentum=0.9, nesterov=True)

    model.compile(optimizer=optimiser, loss=binary_crossentropy)
    return model
Exemplo n.º 10
0
    def _build_net(self) -> keras.models.Model:
        input_tensor = keras.layers.Input(shape=self.inputshape())
        hidden_tensor = keras.layers.Dense(800, activation=relu)(input_tensor)

        pitch_tensor = keras.layers.Dense(127, activation=softmax)(hidden_tensor)
        tsbq_tensor = keras.layers.Dense(self.maxtsbq + 1, activation=softmax)(hidden_tensor)
        dq_tensor = keras.layers.Dense(self.maxdq + 1, activation=softmax)(hidden_tensor)

        model = keras.models.Model(inputs=input_tensor, outputs=[pitch_tensor, tsbq_tensor, dq_tensor])
        model.compile(optimizer=adagrad(), loss=categorical_crossentropy)

        self.epochs = 20
        self.outfuns = sampler(.3), weighted_nlargest(2), weighted_nlargest(2)
        return model
Exemplo n.º 11
0
def create_model(layers_and_filters,
                 kernels,
                 activation,
                 input_shape,
                 dropout_rate,
                 optimizer,
                 learning_rate,
                 output_size=1):
    model = models.Sequential()
    i = 0
    for filters in layers_and_filters:
        model.add(
            layers.Conv2D(filters,
                          kernel_size=kernels[i],
                          strides=kernels[i],
                          activation=activation,
                          input_shape=input_shape))
        i += 1
        if i < len(layers_and_filters):
            model.add(layers.MaxPooling2D(pool_size=(2, 2)))
            model.add(layers.BatchNormalization())

    if dropout_rate != 0:
        model.add(layers.Dropout(dropout_rate))
    model.add(layers.Flatten())
    model.add(layers.Dense(output_size, activation='sigmoid'))

    if optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=learning_rate)
    elif optimizer == 'adam':
        opt = optimizers.adam(lr=learning_rate)
    elif optimizer == 'sgd':
        opt = optimizers.sgd(lr=learning_rate)
    elif optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=learning_rate)
    elif optimizer == 'adadelta':
        opt = optimizers.adadelta(lr=learning_rate)
    elif optimizer == 'adamax':
        opt = optimizers.adamax(lr=learning_rate)
    elif optimizer == 'nadam':
        opt = optimizers.nadam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=["mean_squared_error"])

    #model.summary()
    return model
Exemplo n.º 12
0
def build_and_fit_autoencoder(x_train, layers, opt, learn_rate):
    input_dim = len(x_train[0])
    input_data = Input(shape=(input_dim, ))
    # define encoding dimensions as input_dim reduced by half per layer
    encoded_layer_sizes = [
        int(input_dim / (2**i)) for i in range(0, layers + 1)
    ]

    # encoder layers
    encoded = Dense(encoded_layer_sizes[1],
                    activation='relu',
                    kernel_initializer='random_normal')(
                        input_data)  # encoded layer
    for i in range(1, layers):  # we already have initial encoder layer
        encoded = Dense(encoded_layer_sizes[i + 1],
                        activation='relu',
                        kernel_initializer='random_normal')(
                            encoded)  # encoded layer

    # decoder layers
    decoded = Dense(encoded_layer_sizes[layers - 1],
                    activation='relu',
                    kernel_initializer='random_normal')(
                        encoded)  # decoded layer
    for i in range(1, layers):  # we already have initial decoder layer
        decoded = Dense(encoded_layer_sizes[layers - (i + 1)],
                        activation='relu',
                        kernel_initializer='random_normal')(
                            decoded)  # decoded layer

    # build, compile, and fit the model
    autoencoder = Model(inputs=input_data, outputs=decoded)
    #opt = optimizers.adam(lr=0.001)
    if opt == 'adam':
        opt = optimizers.adam(lr=learn_rate / 10.0)  # adam needs slower rate
    elif opt == 'sgd':
        opt = optimizers.sgd(lr=learn_rate)
    else:
        opt = optimizers.adagrad(lr=learn_rate)
    autoencoder.compile(optimizer=opt, loss='mean_squared_error')
    autoencoder.fit(x_train,
                    x_train,
                    epochs=10,
                    batch_size=32,
                    shuffle=True,
                    verbose=0)
    return autoencoder
Exemplo n.º 13
0
def build_model(image_features, caption_features, embedding_matrix):

    #conv1_out = Conv1D(10, kernel_size=(2), strides=(1), padding='valid', dilation_rate=(1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)(image_features)
    image_dense = Dense(WORD_DIM, name="image_dense")(image_features)
    image_output = BatchNormalization()(image_dense)
    selu1 = Activation('selu')(image_output)
    selu2 = Activation('selu')(selu1)
    selu3 = Activation('selu')(selu2)
    selu4 = Activation('selu')(selu3)
    selu5 = Activation('selu')(selu4)
    selu6 = Activation('selu')(selu5)
    selu7 = Activation('selu')(selu6)
    selu8 = Activation('selu')(selu7)
    selu9 = Activation('selu')(selu8)
    selu10 = Activation('selu')(selu9)



    cap_embed = Embedding(
        input_dim=embedding_matrix.shape[0],
        output_dim=WORD_DIM,
        weights=[embedding_matrix],
        input_length=MAX_SEQUENCE_LENGTH,
        trainable=False,
        name="caption_embedding"
        )(caption_features)

    #flat = Flatten()(cap_embed)
    lstm_out = LSTM(100)(cap_embed)
    caption_output = Dense(WORD_DIM, name="lstm_dense")(lstm_out)
    caption_output = BatchNormalization()(lstm_out)
    output = Dot(axes=-1, normalize=True)([selu10, caption_output])
    #concated = concatenate([image_output, caption_output], axis=-1)

    if args.optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=float(args.learning_rate))
    if args.optimizer == 'adam':
        opt = optimizers.adam(lr=float(args.learning_rate))
    if args.optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=float(args.learning_rate))


    mymodel = Model(inputs=[image_features, caption_features], outputs=output)
    mymodel.compile(optimizer=opt, loss=mean_squared_error, metrics=['accuracy'])
    return mymodel
Exemplo n.º 14
0
def build_and_fit_model(train_X,
                        test_X,
                        train_y,
                        test_y,
                        numlayers=5,
                        dropout=0.25,
                        opt='adam',
                        learn_rate=0.001):
    model = Sequential()
    # define initial layer size and uniform scaling-down factor per layer
    layersize, layer_scale = len(train_X[0]), 1.0 / float(numlayers + 1)

    # input layer, then scaled down fully connected layers, then output layer
    model.add(
        Dense(layersize,
              input_dim=layersize,
              kernel_initializer='normal',
              activation='relu'))
    for i in range(numlayers):
        this_layersize = layersize - int(layersize * (layer_scale * (i + 1)))
        model.add(
            Dense(this_layersize,
                  kernel_initializer='normal',
                  activation='tanh'))
        model.add(Dropout(dropout))
    model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))

    #opt = optimizers.adam(lr=0.0001)
    if opt == 'adam':
        opt = optimizers.adam(lr=learn_rate / 10.0)  # adam needs slower rate
    elif opt == 'sgd':
        opt = optimizers.sgd(lr=learn_rate)
    else:
        opt = optimizers.adagrad(lr=learn_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.fit(train_X, train_y, epochs=50, verbose=0)
    ypred = np.array([i[0] for i in model.predict(test_X, batch_size=32)])
    metrics = gen_eval_metrics(test_y, ypred)
    accuracy = metrics[0]
    print('Fold accuracy: ' + str(accuracy))
    #score = model.evaluate(test_X, test_y, batch_size=32)
    return metrics
Exemplo n.º 15
0
    def _build_net(self) -> keras.models.Model:
        input_tensor = keras.layers.Input(shape=self.inputshape())
        hidden_tensor = keras.layers.Dense(800, activation=relu)(input_tensor)

        pitch_tensor = keras.layers.Dense(127,
                                          activation=softmax)(hidden_tensor)
        tsbq_tensor = keras.layers.Dense(self.maxtsbq + 1,
                                         activation=softmax)(hidden_tensor)
        dq_tensor = keras.layers.Dense(self.maxdq + 1,
                                       activation=softmax)(hidden_tensor)

        model = keras.models.Model(
            inputs=input_tensor,
            outputs=[pitch_tensor, tsbq_tensor, dq_tensor])
        model.compile(optimizer=adagrad(), loss=categorical_crossentropy)

        self.epochs = 20
        self.outfuns = sampler(.3), weighted_nlargest(2), weighted_nlargest(2)
        return model
Exemplo n.º 16
0
    def resetModel(self, model):
        with self.Trace["model/reset/optimizer"]:
            if self.OpType == "SGD":
                optimizer = optimizers.SGD(**self.OptimizerParams)
            elif self.OpType == "adadelta":
                optimizer = optimizers.adadelta(**self.OptimizerParams)
            elif self.OpType == "adagrad":
                optimizer = optimizers.adagrad(**self.OptimizerParams)
            else:
                raise VaueError("Unknown optimizer type %s" % (self.OpType, ))
        #self.Job.message("========= optimizer:%s, %s\n   mbsize=%d, iterations=%d" % (optimizer, optimizer_params, self.MBSize, self.Iterations))

        with self.Trace["model/reset/compile"]:
            model.compile(optimizer=optimizer,
                          loss=self.Loss,
                          metrics=[self.Metric])
        with self.Trace["model/reset/set_weights"]:
            model.set_weights(self.Weights0)

        return model
Exemplo n.º 17
0
def build_lstm_model(win_size_timesteps, data_dim, num_classes, layers_dict,
                     lr):
    # expected input data shape: (batch_size, timesteps, data_dim)

    model = Sequential()

    for layer in layers_dict:
        if layer['layer'] == 'input':
            model.add(
                LSTM(layer['units'],
                     return_sequences=True,
                     input_shape=(win_size_timesteps, data_dim),
                     dropout=layer['dropout']))
        elif layer['layer'] == 'last':
            model.add(LSTM(layer['units'], dropout=layer['dropout'])
                      )  # return a single vector of dimension 32
        elif layer['layer'] == 'dense':
            activation = layer['activation']
            model.add(Dense(num_classes, activation=activation))
        else:
            model.add(
                LSTM(layer['units'],
                     return_sequences=True,
                     dropout=layer['dropout']))

    #model.add(Dense(num_classes, activation='softmax'))

    optimizer = adagrad(lr)

    if activation == 'softmax':
        loss = 'categorical_crossentropy'
    elif activation == 'sigmoid':
        loss = 'binary_crossentropy'

    model.compile(
        loss=loss,
        optimizer=optimizer,
        metrics=['accuracy']  #, metrics.categorical_accuracy]
    )

    return model
def buildMICCAIModel(inputSz):
	regulPen = l2(0.001)
	#regulPen = l1_l2(l1=0.01, l2=0.01) # used to 0.01/0.01 22.5.2017

	## Architecture
	model = Sequential()
		
	model.add(Dense(50, input_dim=inputSz,kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen))  # , init='he_uniform'   % 5 # sigmoid
    ##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))
	
	
	#model.add(Dense(50,kernel_initializer=initializers.he_normal(seed=None),  W_regularizer=regulPen))  # tanh # linear
	##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))
	
	
	#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	##model.add(LeakyReLU(alpha=0.3))
	#model.add(PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None))
	#model.add(Dropout(0.5))

	
	
	#model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	model.add(Dense(50, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen)) # tanh
	model.add(Activation('relu'))
	
	model.add(Dense(5, kernel_initializer=initializers.he_normal(seed=None), W_regularizer=regulPen))  # % 4 # linear
	ada = adagrad(lr=0.001)
	sgd = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=False)
	#earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
	
	model.compile(loss='mse', optimizer=ada)
	return model
Exemplo n.º 19
0
def modelArchitecture():
    print("**")
    regularizer_weight = 0.001
    #model_resnet=ResNet50(include_top=False,weights="imagenet", input_shape=(224,224,3))
    #model_resnet=VGG16(include_top=False, input_shape=(224,224,3))
    #x=model_resnet.output

    model_resnet = resnet50.ResNet50(include_top=False,
                                     weights="imagenet",
                                     input_shape=(224, 224, 3))
    model = Sequential()
    model.add(model_resnet)
    model.add(Flatten())

    model.add(Dropout(0.5))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(2, activation='sigmoid'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.adagrad(lr=0.001),
                  metrics=['acc'])
    print("\n\t model summary=", model.summary())
    return model
def GetModel(mode='create', filename='none', X=None, Y=None):
    model = None

    if (mode == 'create'):
        model = CreateModel(X=X, Y=Y)
        print("Neural net created...")

    if (mode == 'load_W'):
        model = CreateModel(X=X, Y=Y)
        model.load_weights(filename)
        print("Neural net loaded...")

    if (mode == 'load_model'):
        model = keras_file_manager.LoadFromJSon(filename)
        print("Neural net loaded...")

    adag = adagrad()
    adad = adadelta()
    model.compile(loss='binary_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    return model
Exemplo n.º 21
0
def create_model(optimizer='Adagrad',
                 kernel_initializer='he_normal',
                 activationXlast='relu',
                 dropout_rate=0.3,
                 weight_constraint=0,
                 init_neurons=180,
                 hidden_neurons=400,
                 lr=0.01,
                 decay=0.0):
    model = Sequential()
    model.add(
        Dense(init_neurons,
              kernel_initializer=kernel_initializer,
              input_dim=102,
              activation=activationXlast))  # ,
    # kernel_constraint=maxnorm(weight_constraint)))
    model.add(Dropout(dropout_rate))
    model.add(
        Dense(hidden_neurons,
              kernel_initializer=kernel_initializer,
              activation=activationXlast))
    model.add(
        Dense(hidden_neurons,
              kernel_initializer=kernel_initializer,
              activation=activationXlast))
    model.add(
        Dense(hidden_neurons,
              kernel_initializer=kernel_initializer,
              activation=activationXlast))
    model.add(
        Dense(1, kernel_initializer=kernel_initializer, activation='linear'))
    if optimizer == 'Adagrad':
        optimizer = optimizers.adagrad(lr=lr, decay=decay)
    model.compile(loss='mean_squared_logarithmic_error',
                  optimizer=optimizer,
                  metrics=['mean_squared_logarithmic_error'])
    return model
Exemplo n.º 22
0
def initialize_optimizer(optimizer_name: str, learning_rate: float, beta1: float, beta2: float,
                         lr_decay: float, rho: float, fuzz: float, momentum: float) \
        -> Union[adam, rmsprop, sgd, adagrad, adadelta, adamax]:
    """
    Initializes an optimizer based on the user's choices.

    :param optimizer_name: the optimizer's name.
        Can be one of 'adam', 'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adamax'.
    :param learning_rate: the optimizer's learning_rate
    :param beta1: the optimizer's beta1
    :param beta2: the optimizer's beta2
    :param lr_decay: the optimizer's lr_decay
    :param rho: the optimizer's rho
    :param fuzz: the optimizer's fuzz
    :param momentum: the optimizer's momentum
    :return: the optimizer.
    """
    if optimizer_name == 'adam':
        return adam(lr=learning_rate,
                    beta_1=beta1,
                    beta_2=beta2,
                    decay=lr_decay)
    elif optimizer_name == 'rmsprop':
        return rmsprop(lr=learning_rate, rho=rho, epsilon=fuzz)
    elif optimizer_name == 'sgd':
        return sgd(lr=learning_rate, momentum=momentum, decay=lr_decay)
    elif optimizer_name == 'adagrad':
        return adagrad(lr=learning_rate, decay=lr_decay)
    elif optimizer_name == 'adadelta':
        return adadelta(lr=learning_rate, rho=rho, decay=lr_decay)
    elif optimizer_name == 'adamax':
        return adamax(lr=learning_rate,
                      beta_1=beta1,
                      beta_2=beta2,
                      decay=lr_decay)
    else:
        raise ValueError('An unexpected optimizer name has been encountered.')
Exemplo n.º 23
0
               activation='linear'))
    model.add(Flatten())

    model.add(Dense(output_dim=win_size * num_filters, activation="relu"))
    model.add(Dense(output_dim=win_size, activation="relu"))
    model.add(Dense(output_dim=win_size * num_filters, activation="relu"))

    model.add(Reshape(target_shape=(win_size, num_filters)))
    model.add(
        Conv1D(nb_filter=1,
               filter_length=filter_len,
               border_mode='same',
               activation='relu'))

    model.compile(loss='mean_squared_error',
                  optimizer=opt.adagrad(lr=0.01, epsilon=1e-8, decay=0.0),
                  metrics=['accuracy'])
    keras_plot(model,
               to_file='kettle_model.png',
               show_shapes=True,
               show_layer_names=True)
    model.fit(X_train,
              Y_train,
              batch_size=100,
              nb_epoch=10,
              verbose=1,
              callbacks=[])

    model.save(model_filename)
else:
    model = load_model(model_filename)
Exemplo n.º 24
0
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# 4. 모델 학습과정 설정하기

optimizer_list = []
# 기본 sgd
optimizer_list.append(['SGD', optimizers.SGD()])
# momentum
optimizer_list.append(['Momentum', optimizers.SGD(momentum=0.9)])
# NAG
optimizer_list.append((['NAG', optimizers.SGD(momentum=0.9, nesterov=True)]))
# Adagrad
optimizer_list.append(['Adagrad', optimizers.adagrad()])
# RMSProp
optimizer_list.append(['RMSProp', optimizers.rmsprop()])
# AdaDelta
optimizer_list.append(['AdaDelta', optimizers.adadelta()])
# Adam
optimizer_list.append(['Adam', optimizers.adam()])
# Nadam
optimizer_list.append(['Nadam', optimizers.nadam()])

score_list = []
opt_name_list = []
for optimizer_element in optimizer_list:
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer_element[1],
                  metrics=['accuracy'])
Exemplo n.º 25
0
def trainAEClassifier(windowSz, pData):
    #############
    ### CONSTANTS
    #############
    hiddenSz = 40
    noEpochs = 1000
    #batchSz = 65536
    batchSz = 128000

    #############
    ### READ DATA
    #############
    print('-> reading data')
    f = h5py.File(pData, "r")
    bg = np.array(f["seqBackground"].value)
    fg = np.array(f["seqSpuelung"].value)
    f.close()

    fg = np.swapaxes(fg, 0, 1)
    bg = np.swapaxes(bg, 0, 1)

    print("fg.shape: " + str(fg.shape))
    print("bg.shape: " + str(bg.shape))

    x_fg = ThermographicDataHandler.slidingWindowPartitioning(fg, windowSz, 150)
    x_bg = ThermographicDataHandler.slidingWindowPartitioning(bg, windowSz, 150)

    sz1 = x_fg.shape
    sz2 = x_bg.shape
    x_fg = np.reshape(x_fg, (sz1[0] * sz1[1], sz1[2]))
    x_bg = np.reshape(x_bg, (sz2[0] * sz2[1], sz2[2]))
    ## normalize data
    #x_fg = ThermographicDataHandler.centerData(x_fg_raw)
    #x_bg = ThermographicDataHandler.centerData(x_bg_raw)

    ###################
    ### PARTITION DATA
    ###################
    # foreground samples
    (nX1, nT1) = x_fg.shape
    noFgSamples = nX1
    y_fg = np.ones(nX1)
    noFgSamplesTraining = int(np.round(0.8 * nX1))
    # background samples
    (nX2, nT2) = x_bg.shape
    y_bg = 0 * np.ones(nX2)
    noBgSamplesTraining = int(np.round(0.8 * nX2))
    # create training and testing sets
    X_train = np.concatenate((x_fg[0:noFgSamplesTraining, :], x_bg[0:noBgSamplesTraining, :]), axis=0)
    Y_train = np.concatenate((y_fg[0:noFgSamplesTraining], y_bg[0:noBgSamplesTraining]), axis=0)
    X_test = np.concatenate((x_fg[noFgSamplesTraining:, :], x_bg[noBgSamplesTraining:, :]), axis=0)
    Y_test = np.concatenate((y_fg[noFgSamplesTraining:], y_bg[noBgSamplesTraining:]), axis=0)

    #############
    ### Deep Parameter Approximation
    #############
    X_train = DPA.predict(X_train)
    X_test = DPA.predict(X_test)


    #############
    ### Train DT Classifier
    #############
    print('-> train DT classifier')
    clf = tree.DecisionTreeClassifier()
    clf = clf.fit(X_train, Y_train)
    joblib.dump(clf, pClassifierDT)
    score_pred = clf.score(X_train, Y_train)
    score_test = clf.score(X_test, Y_test)
    print(str(score_pred) + " vs " + str(score_test))
    cvres = cross_val_score(clf, X_test, Y_test, cv=10)
    print(str(cvres))

    #############
    ### Train NN Classifier
    #############
    ada = adagrad(lr=0.001)
    print('-> train NN classifier')
    model = Sequential()
    model.add(Dense(50, input_dim=5, activation='linear'))
    model.add(Dropout(0.05))
    model.add(Dense(10, activation='relu'))
    model.add(Dropout(0.05))
    model.add(Dense(50, activation='relu'))
    model.add(Dropout(0.05))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer=ada, metrics=['accuracy'],
                  loss='binary_crossentropy' )
    model.fit(X_train, Y_train, batch_size=batchSz, nb_epoch=noEpochs,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    model.save_weights(pClassifierWeights, overwrite=True)
    json_rep = model.to_json()
    f = open(pClassifierConfig, "wb")
    json.dump(json_rep, f)
Exemplo n.º 26
0
    def build_model(self):
        """ build VAE model
        """

        input_dim = (self.image_size, self.image_size, self.image_channel)

        #   encoder architecture

        x = Input(shape=input_dim)

        conv_1 = Conv2D(self.image_channel,
                        kernel_size=self.num_conv,
                        padding='same',
                        activation='relu')(x)

        conv_2 = Conv2D(self.nfilters,
                        kernel_size=self.num_conv,
                        padding='same',
                        activation='relu',
                        strides=(2, 2))(conv_1)

        conv_3 = Conv2D(self.nfilters,
                        kernel_size=self.num_conv,
                        padding='same',
                        activation='relu',
                        strides=1)(conv_2)

        conv_4 = Conv2D(self.nfilters,
                        kernel_size=self.num_conv,
                        padding='same',
                        activation='relu',
                        strides=1)(conv_3)

        flat = Flatten()(conv_4)
        hidden = Dense(self.inter_dim, activation='relu')(flat)

        #   reparameterization trick

        z_mean = Dense(self.latent_dim)(hidden)
        z_log_var = Dense(self.latent_dim)(hidden)

        z = Lambda(self.sampling)([z_mean, z_log_var])

        #   decoder architecture

        output_dim = (self.batch_size, self.image_size // 2,
                      self.image_size // 2, self.nfilters)

        #   instantiate rather than pass through for later resuse

        decoder_hid = Dense(self.inter_dim, activation='relu')

        decoder_upsample = Dense(self.nfilters * self.image_size // 2 *
                                 self.image_size // 2,
                                 activation='relu')

        decoder_reshape = Reshape(output_dim[1:])

        decoder_deconv_1 = Conv2DTranspose(self.nfilters,
                                           kernel_size=self.num_conv,
                                           padding='same',
                                           strides=1,
                                           activation='relu')

        decoder_deconv_2 = Conv2DTranspose(self.nfilters,
                                           kernel_size=self.num_conv,
                                           padding='same',
                                           strides=1,
                                           activation='relu')

        decoder_deconv_3_upsamp = Conv2DTranspose(self.nfilters,
                                                  kernel_size=self.num_conv,
                                                  strides=(2, 2),
                                                  padding='valid',
                                                  activation='relu')

        decoder_mean_squash = Conv2D(self.image_channel,
                                     kernel_size=2,
                                     padding='valid',
                                     activation='sigmoid')

        hid_decoded = decoder_hid(z)
        up_decoded = decoder_upsample(hid_decoded)
        reshape_decoded = decoder_reshape(up_decoded)
        deconv_1_decoded = decoder_deconv_1(reshape_decoded)
        deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
        x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
        x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)

        #   need to keep generator model separate so new inputs can be used

        decoder_input = Input(shape=(self.latent_dim, ))
        _hid_decoded = decoder_hid(decoder_input)
        _up_decoded = decoder_upsample(_hid_decoded)
        _reshape_decoded = decoder_reshape(_up_decoded)
        _deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
        _deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
        _x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
        _x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)

        #   instantiate VAE models

        self.vae = Model(x, x_decoded_mean_squash)
        self.encoder = Model(x, z_mean)
        self.decoder = Model(decoder_input, _x_decoded_mean_squash)

        #   VAE loss terms w/ KL divergence

        def vae_loss(x, x_decoded_mean_squash):
            xent_loss = self.image_size * self.image_size * metrics.binary_crossentropy(
                K.flatten(x), K.flatten(x_decoded_mean_squash))
            kl_loss = -0.5 * K.sum(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
            vae_loss = K.mean(xent_loss + kl_loss)
            return vae_loss

        rms = optimizers.adagrad(lr=self.learn_rate)

        self.vae.compile(optimizer=rms, loss=vae_loss)

        self.vae.summary()
Exemplo n.º 27
0
#Set optimisers
opt.SGD(
        lr = 0.001, 
        decay = 0.1, 
        momentum = 0.9, 
        nesterov = True
        )
opt.rmsprop(
        lr=0.001, 
        rho=0.9,
        epsilon=1e-08, 
        decay=0.0
        )
opt.adagrad(
        lr=0.01, 
        epsilon=1e-08, 
        decay=0.0
        )

opt.adadelta(
        lr=1.0, 
        rho=0.95, 
        epsilon=1e-08, 
        decay=0.0
        )

opt.adam(
        lr=0.001,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-08, 
Exemplo n.º 28
0
def create_and_train_model(splitted_data, embedding_layer):
    train = splitted_data["train"]
    test = splitted_data["test"]
    question_train = train[0]
    answer_train = train[1]
    label_train = train[2]

    question_test = test[0]
    answer_test = test[1]
    label_test = test[2]

    model = build_model(embedding_layer)
    #model = bidirectional_attention.BidirectionalAttentionFlow(params.Params).model()
    model.load_weights("simple_squad_model_1.h5", by_name=True)
    model.compile(
        loss='binary_crossentropy',  #loss = 'categorical_crossentropy', #
        #optimizer=optimizers.adagrad(lr=0.01, epsilon=1e-08),
        optimizer=optimizers.adagrad(lr=0.0001),
        metrics=['accuracy'])
    #metrics=[precision_threshold(0.5), recall_threshold(0.5)])
    #class_mode='binary')

    model.fit([question_train, answer_train],
              label_train,
              batch_size=128,
              epochs=20,
              validation_split=0.1,
              verbose=2,
              callbacks=[EarlyStopping(monitor='val_loss', patience=10)]

              #show_accuracy=True
              )
    #validation_data=([question_test, answer_test], label_test))

    label_predicted = []
    divide_list = lambda lst, sz: [
        lst[i:i + sz] for i in range(0, len(lst), sz)
    ]
    #divided_label_test = divide_list(label_test,10)
    reciprocal_ranks = []

    label_index_begin = 0
    list_size = 0
    for i in range(0, len(question_test)):
        q = question_test[i]
        a = answer_test[i]
        current_predicted = model.predict([q, a])
        binary_predicted = [0] * len(current_predicted)
        highest_index = np.argmax(current_predicted)
        #highest = current_predicted[i]
        #for num in range(0, len(current_predicted)):
        #    if current_predicted[num] > highest:
        #        highest = current_predicted[num]
        #        highest_index = num
        binary_predicted[highest_index] = 1
        label_predicted = label_predicted + binary_predicted

        #get the rank here
        current_answer_list_length = len(a)
        label_index_end = label_index_begin + current_answer_list_length
        current_label_list = label_test[label_index_begin:label_index_end]

        index_of_1 = np.where(current_label_list == 1)[0][0]  #e.g. 2
        #value_of_the_item_thats_supposed_to_be_highest = current_predicted[index_of_1]
        sorted_by_index = np.argsort(
            current_predicted.flatten())  #e.g. [0,2,1]
        actual_rank = np.where(sorted_by_index == index_of_1)[0][0] + 1

        reciprocal_ranks = reciprocal_ranks + [1 / actual_rank]
        #print("recip rank", reciprocal_ranks[i])
        list_size = list_size + 1

    input_size = len(question_test)
    #reciprocal_ranks = np.array(reciprocal_ranks)
    sum_of_ranks = np.sum(reciprocal_ranks)
    mrr = 1 / input_size * sum_of_ranks

    label_predicted = np.asarray(label_predicted)
    #label_predicted = model.predict([question_test, answer_test])
    print("predicted: ", label_predicted[:100])
    print("actual: ", label_test[:100])

    precision = precision_score(label_test, label_predicted)
    recall = recall_score(label_test, label_predicted)
    f1 = f1_score(label_test, label_predicted)

    print("precision: ", precision)
    print("recall: ", recall)
    print("f1: ", f1)
    print("mrr: ", mrr)
Exemplo n.º 29
0
                                           smooth)


# Dice loss computed as -dice co-efficient
def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)


# Combined loss of weighted multi-class logistic loss and dice loss
def customized_loss(y_true, y_pred):
    return (1 * K.categorical_crossentropy(y_true, y_pred)) + (
        0.5 * dice_coef_loss(y_true, y_pred))


# Using SGD optimiser with Nesterov momentum and a learning rate of 0.001
optimiser = optimizers.adagrad(lr=0.005, momentum=0.9, nesterov=True)
# optimiser = 'Adam'

# Compiling the model
model.compile(optimizer=optimiser,
              loss=customized_loss,
              metrics=['accuracy', dice_coef],
              sample_weight_mode='temporal')

# Defining Callback functions which will be called by model during runtime when specified condition is satisfied
lr_reducer = ReduceLROnPlateau(factor=0.5,
                               cooldown=0,
                               patience=6,
                               min_lr=0.5e-6)
csv_logger = CSVLogger('exp1.csv')
model_checkpoint = ModelCheckpoint("exp1.hdf5",
Exemplo n.º 30
0
 def create_optimizer_instance(self, **d):
     return optimizers.adagrad(**d)
Exemplo n.º 31
0
def create_model(activation, optimizer, learning_rate, output_size,
                 merged_layers):

    original_new_androdet_model = models.load_model(
        "../new_androdet/model_trained.k")
    original_cnn_model = models.load_model("../cnn/model_trained.k")
    original_dnn_model = models.load_model("../bow/model_trained.k")

    new_androdet_model = models.Sequential()
    cnn_model = models.Sequential()
    dnn_model = models.Sequential()

    for layer in original_new_androdet_model.layers[:-1]:
        layer.name = 'new_androdet_' + layer.name
        layer.trainable = False
        new_androdet_model.add(layer)

    for layer in original_cnn_model.layers[:-1]:
        layer.name = 'cnn_' + layer.name
        layer.trainable = False
        cnn_model.add(layer)

    for layer in original_dnn_model.layers[:-1]:
        layer.name = 'dnn_' + layer.name
        layer.trainable = False
        dnn_model.add(layer)

    entropy_input_layer = layers.Input(shape=(1, ), name='entropy_input')

    merge_layer = layers.concatenate([
        cnn_model.layers[-1].get_output_at(-1),
        dnn_model.layers[-1].get_output_at(-1), entropy_input_layer
    ])

    for (i, n_neurons) in enumerate(merged_layers):
        merge_layer = layers.Dense(n_neurons,
                                   activation=activation,
                                   name='dense{}'.format(i))(merge_layer)

    output_trivial = layers.concatenate(
        [merge_layer, new_androdet_model.layers[-1].get_output_at(-1)])
    output_trivial = layers.Dense(1, activation='sigmoid')(output_trivial)

    output_rest = layers.Dense(output_size - 1,
                               activation='sigmoid')(merge_layer)

    output_all = layers.concatenate([output_trivial, output_rest])

    model = models.Model(inputs=[
        new_androdet_model.layers[0].get_input_at(-1),
        cnn_model.layers[0].get_input_at(-1),
        dnn_model.layers[0].get_input_at(-1), entropy_input_layer
    ],
                         outputs=output_all)

    if optimizer == 'rmsprop':
        opt = optimizers.rmsprop(lr=learning_rate)
    elif optimizer == 'adam':
        opt = optimizers.adam(lr=learning_rate)
    elif optimizer == 'sgd':
        opt = optimizers.sgd(lr=learning_rate)
    elif optimizer == 'adagrad':
        opt = optimizers.adagrad(lr=learning_rate)
    elif optimizer == 'adadelta':
        opt = optimizers.adadelta(lr=learning_rate)
    elif optimizer == 'adamax':
        opt = optimizers.adamax(lr=learning_rate)
    elif optimizer == 'nadam':
        opt = optimizers.nadam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=["mean_squared_error"])
    model.summary()

    return model
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(
    layers.Dense(100,
                 activation='relu',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=l2(0.01)))
model.add(
    layers.Dense(1,
                 activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=l2(0.01)))
model.compile(
    loss='binary_crossentropy',
    #optimizer=optimizers.Nadam(lr=0.005),
    optimizer=optimizers.adagrad(),
    metrics=['acc'])
model.summary()

checkpointer = keras.callbacks.ModelCheckpoint(
    filepath='d:/data/optflow-{epoch}-{val_acc}.hdf5',
    verbose=1,
    save_best_only=True)
#estopper = keras.callbacks.EarlyStopping(monitor='val_acc', patience=30, baseline=0.8) # restore_best_weights=True,
hist = model.fit_generator(
    train_flow | infshuffle()
    | pp.as_batch(feature_field_name='optflow', label_field_name='class_id'),
    steps_per_epoch=100,
    validation_data=test_flow | infshuffle()
    | pp.as_batch(feature_field_name='optflow', label_field_name='class_id'),
    use_multiprocessing=False,  # has to be false on windows..
Exemplo n.º 33
0
    train_path = 'D:/University_Work/Spring 2019/566/train_data/'
    eval_path = 'D:/University_Work/Spring 2019/566/eval_data/'
    small_eval = 'D:/University_Work/Spring 2019/566/small_eval/'
    temp_path = 'D:/University_Work/Spring 2019/566/temp0/'
    total = 2711000
    # check('D:/University_Work/Spring 2019/566/final/train_data/')
    # a = np.asarray([[1,2],[3,4]])
    # (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
    # a = len(os.listdir(small_eval))
    train_gen = gen(batch_size, train_path)
    eval_gen = gen(batch_size, eval_path)
    test_gen = gen(6, temp_path)
    # (x_test, y_test) =
    #
    opt = optimizers.adagrad(lr=1e-3)
    #
    # print('Build model...')
    # model = Sequential()
    # # model.add(Conv1D(32, kernel_size=(20), activation='relu', input_shape=(200, 1)))
    # # model.add(MaxPooling1D(pool_size=(10), strides=(2)))
    # # model.add(Input(shape=(200, 1), name='read'))
    # model.add(Embedding(input_len, 32, input_length=200, input_shape=(input_len, )))
    # model.add(BatchNormalization())
    # model.add(Bidirectional(GRU(900, return_sequences=False)))
    # # model.add(Dense(256, activation='relu'))
    # model.add(Dense(1, activation='sigmoid'))
    #
    # # try using different optimizers and different optimizer configs
    # model.compile(loss='binary_crossentropy',
    #               optimizer=opt,