コード例 #1
0
ファイル: lstm.py プロジェクト: sumesh1/dl-time-series
def new_instance(input_shape, learning_rate):

    x, y, time, spectral = input_shape

    inputLayer = Input(shape=input_shape)

    cnn_model = Conv3D(256, kernel_size=(3, 3, 5), padding='same')(inputLayer)
    cnn_model = Conv3D(256, kernel_size=(3, 3, 1), padding='valid')(cnn_model)
    cnn_model = BatchNormalization()(cnn_model)
    cnn_model = Activation('relu')(cnn_model)
    cnn_model = Flatten()(cnn_model)

    lstm_model = Cropping3D(cropping=(1, 1, 0))(inputLayer)
    lstm_model = Reshape(target_shape=(time, spectral))(lstm_model)
    lstm_model = BatchNormalization()(lstm_model)
    lstm_model = Bidirectional(CuDNNLSTM(256,
                                         return_sequences=True))(lstm_model)
    lstm_model = Flatten()(lstm_model)

    conc_model = concatenate([lstm_model, cnn_model])
    conc_model = Dense(256, activation='relu')(conc_model)
    conc_model = Dropout(0.3)(conc_model)
    conc_model = Dense(64, activation='relu')(conc_model)
    conc_model = Dense(2, activation='sigmoid')(conc_model)

    conc_model = Model(inputLayer, conc_model)
    optimizer = optimizers.Nadam(lr=learning_rate)
    conc_model.compile(loss='binary_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])

    return conc_model
コード例 #2
0
def get_rank_net(input_shape=32):
    #get two input,one is a vector regarded as more relative ,another is a vector regarded as less relative
    input_relative = Input(shape=(input_shape, ), dtype='float32')
    input_unrelative = Input(shape=(input_shape, ), dtype='float32')

    #get a score model which shared by two input vector
    input_layer = Input(shape=(input_shape, ), dtype='float32')
    model = Dense(
        256,
        activation='relu',
    )(input_layer)
    model = Dense(128, activation='relu')(model)
    model = Dense(64, activation='relu')(model)
    model = Dense(32, activation='relu')(model)
    model = Dense(1)(model)
    score_model = Model(input_layer, model, name='score_model')

    #the output of score model by two vector respective
    out_relative = score_model(input_relative)
    out_unrelative = score_model(input_unrelative)

    #get the diff of two score
    subtract = Subtract()([out_relative, out_unrelative])

    #get the probability of the first input is more relative than the second input
    final_out = Activation('sigmoid')(subtract)

    #build the model
    model = Model(inputs=[input_relative, input_unrelative], outputs=final_out)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])

    return model
コード例 #3
0
def build():
    #load mobilenet architecture from keras
    #top false to get it to classify with our application
    base_model = keras.applications.mobilenet.MobileNet(include_top=False,
                                                        input_shape=(224, 224,
                                                                     3))
    model = base_model.output

    print("good loading of mobilent")

    #make it more fine grain analysis
    model = GlobalAveragePooling2D()(model)
    model = Dense(1024, activation='relu')(model)
    model = Dense(1024, activation="relu")(model)
    model = Dense(512, activation="relu")(model)

    predictions = Dense(10, activation='softmax')(model)
    model = Model(base_model.input, predictions)

    #do not train the first 20 layers of the model
    for layer in model.layers[:20]:
        layer.trainable = False
    for layer in model.layers[20:]:
        layer.trainable = True

    model.summary()

    print("good adding")

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    return model
コード例 #4
0
def base_model():
     input_img = Input(shape=(83,))
     model = Dense(64, activation='relu')(input_img)
     model = Dense(32, activation='relu')(model)
     model = Dense(16, activation='relu')(model)
     model = Dense(1, activation='relu')(model)
     model = Model(input_img, model)
     model.compile(loss=keras_loss.mse, optimizer = 'adam', metrics=['accuracy']) #, metrics=['accuracy']
     return model
コード例 #5
0
def build_model():
  inputs = Input([13, ], name='data')

  model = Dense(2, activation='relu', name='fc1')(inputs)
  outputs = Dense(1, activation='linear', name='fc3')(model)

  model = Model(inputs=[inputs], outputs=outputs)
  model.compile(optimizer=Adam(lr=4e-5), #Gradient Descend Algorithm.
                loss='mse',       #MSE = Mean Squared Error
                metrics=['mae'])
  return model
コード例 #6
0
ファイル: MLPGan.py プロジェクト: jogonba2/DCGAN
def gan_model(latent_dim, discriminator):
	gan_inputs = Input(shape=(latent_dim,))
	gan = Dense(128)(gan_inputs)
	gan = lrelu()(gan)
	gan = Dense(128)(gan)
	gan = lrelu()(gan)
	gan_outputs = Dense(x_mnist.shape[1], activation="sigmoid")(gan)
	discriminator.trainable = False
	gan = discriminator(gan_outputs)
	gan = Model(input=gan_inputs, output=gan)
	gan.compile(loss='binary_crossentropy', optimizer="adam")
	return gan, gan_inputs, gan_outputs
コード例 #7
0
def create_unconditional_discriminator(**kwargs):
    #
    #  Parse settings
    #
    dropout = kwargs.get("dropout", -1.)
    leaky_relu = kwargs.get("leaky_relu", 0.2)
    num_categories = kwargs.get("num_categories")
    num_conditions = kwargs.get("num_conditions")
    num_observables = kwargs.get("num_observables")
    use_batch_norm = kwargs.get("batch_norm", False)
    verbose = kwargs.get("verbose", True)
    mid_layers = kwargs.get("mid_layers", (10, ))

    use_dropout = False
    if dropout > 0: use_dropout = True
    #
    #  Print stage
    #
    if verbose:
        print(
            f"Creating discriminator with {num_observables} observables and {num_conditions} conditions"
        )
    #
    #  Create input layers
    #
    data_input = Input((num_observables, ))
    #
    #  Create initially separate layers for the condition and data
    #
    discriminator = data_input
    for layer_size in mid_layers:
        discriminator = Dense(layer_size)(discriminator)
        discriminator = LeakyReLU(leaky_relu)(discriminator)
        if use_batch_norm: discriminator = BatchNormalization()(discriminator)
        if use_dropout: discriminator = Dropout(dropout)(discriminator)
    #
    #  Compile discriminator model
    #
    discriminator = Dense(num_categories, activation="sigmoid")(discriminator)
    discriminator = Model(name="Discriminator",
                          inputs=[data_input],
                          outputs=[discriminator])
    if num_categories == 1:
        discriminator.compile(loss="binary_crossentropy", optimizer=Adam())
    else:
        discriminator.compile(loss="categorical_crossentropy",
                              optimizer=Adam())
    if verbose: discriminator.summary()
    #
    #  return discriminator
    #
    return discriminator
コード例 #8
0
def get_model():

    inp = Input(shape=(TF_IDF_FEATURES, ))
    model = Dense(1024, activation='relu')(inp)
    model = Dropout(0.8)(model)
    model = Dense(NUM_CLASS, activation="softmax")(model)
    model = Model(inputs=inp, outputs=model)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
コード例 #9
0
def siamese_cnn_lstm(max_sent_len, wordvec_dim, gru_output_dim, output_dim):
    #input_layers = []
    gru_f, gru_b, gru_sent_fwd, gru_sent_bwd = [], [], [], []
    input_layers = [
        Input(shape=(max_sent_len, wordvec_dim)) for i in xrange(2)
    ]
    #input_2 = Input(shape = (max_sent_len, wordvec_dim))
    siamese_sub_parts = []
    for i in xrange(2):
        gru_sent_fwd.append(
            GRU(gru_output_dim,
                return_sequences=False,
                go_backwards=False,
                activation='tanh',
                inner_activation='hard_sigmoid',
                input_shape=(max_sent_len, wordvec_dim)))
        gru_sent_bwd.append(
            GRU(gru_output_dim,
                return_sequences=False,
                go_backwards=True,
                activation='tanh',
                inner_activation='hard_sigmoid',
                input_shape=(max_sent_len, wordvec_dim)))
        gru_f.append(gru_sent_fwd[i](input_layers[i]))
        gru_b.append(gru_sent_bwd[i](input_layers[i]))
        gru_f[i] = BatchNormalization()(gru_f[i])
        gru_f[i] = Activation('softmax')(gru_f[i])
        gru_b[i] = BatchNormalization()(gru_b[i])
        gru_b[i] = Activation('softmax')(gru_b[i])
        gru_merge = merge([gru_f[i], gru_b[i]], mode='concat', concat_axis=-1)
        gru_merge = Dense(20)(gru_merge)
        #gru_merge = Dropout(0.25)(gru_merge)
        #gru_merge = Dense(100)(gru_merge)
        siamese_sub_parts.append(gru_merge)

    #distance = Lambda(euclidean_distance, output_shape = eucl_dist_output_shape)(siamese_sub_parts)
    #model = Dense(output_dim)(distance)

    model = merge(siamese_sub_parts, mode='concat', concat_axis=-1)
    model = Dense(output_dim)(model)

    sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model = BatchNormalization()(model)
    out_layer = Activation('softmax')(model)
    model = Model(input=input_layers, output=out_layer)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adagrad',
                  metrics=['accuracy'])

    return model
コード例 #10
0
def EnhancedHybridResSppNet(class_num, enhanced_class_num):
    _input = Input(shape=(None, None, 3))
    model = _input
    model = ZeroPadding2D((3, 3))(model)
    model = Conv2D(64, (7, 7), strides=(2, 2))(model)
    model = BatchNormalization(axis=3)(model)
    model = Activation('relu')(model)
    model = MaxPooling2D((3, 3), strides=(2, 2))(model)

    model = conv_block(model,
                       3, [64, 64, 256],
                       stage=2,
                       block='a',
                       strides=(1, 1))
    model = identity_block(model, 3, [64, 64, 256], stage=2, block='b')
    model = identity_block(model, 3, [64, 64, 256], stage=2, block='c')

    model = conv_block(model, 3, [128, 128, 512], stage=3, block='a')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='b')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='c')
    model = identity_block(model, 3, [128, 128, 512], stage=3, block='d')

    model = MaxPooling2D((2, 2))(model)

    model = SpatialPyramidPooling([1, 2, 4])(model)

    model1 = Dense(units=class_num)(model)
    model1 = Activation(activation="softmax")(model1)
    model1 = Model(_input, model1)
    model1.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    model2 = Dense(units=enhanced_class_num)(model)
    model2 = Activation(activation="softmax")(model2)
    model2 = Model(_input, model2)
    model2.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    input2 = Input(shape=(100, ))

    model3 = Concatenate((input2, model))
    model3 = Dense(units=class_num)(model3)
    model3 = Activation(activation="softmax")(model3)
    model3 = Model(inputs=[_input, input2], outputs=model3)
    model3.compile(loss="categorical_crossentropy",
                   optimizer=RMSprop(lr=1e-4, decay=1e-6),
                   metrics=['accuracy'])

    return model1, model2, model3
コード例 #11
0
    def train(self):
        batch_size = 32
        mobilenet = MobileNet(weights='imagenet', include_top=False)
        new_model = mobilenet.output
        new_model = GlobalAveragePooling2D()(new_model)
        new_model = Dense(128, activation='relu',
                          name='relu_layer_3')(new_model)
        new_model = Dropout(0.25)(new_model)
        predictions = Dense(2, activation='softmax', name='output')(new_model)
        new_model = Model(inputs=mobilenet.input, outputs=predictions)
        #all mobile layers are frozen
        for layer in new_model.layers[:87]:
            layer.trainable = False
        for layer in new_model.layers[87:]:
            layer.trainable = True

        logdir = "logs/scalars/train/" + datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

        train_data_gen = ImageDataGenerator(validation_split=0.2)
        train_gen = train_data_gen.flow_from_directory(
            './train',
            target_size=(224, 224),
            color_mode='rgb',
            batch_size=32,
            class_mode='categorical',
            shuffle=True,
            subset='training')
        validation_gen = train_data_gen.flow_from_directory(
            './train',
            target_size=(224, 224),
            color_mode='rgb',
            batch_size=32,
            class_mode='categorical',
            shuffle=True,
            subset='validation')

        new_model.compile(optimizer='Adam',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        print(new_model.summary())
        new_model.fit_generator(
            generator=train_gen,
            steps_per_epoch=train_gen.samples // batch_size,
            validation_data=validation_gen,
            validation_steps=validation_gen.samples // batch_size,
            epochs=10,
            callbacks=[tensorboard_callback])
        new_model.save_weights('my_weights')
        new_model.save('my_model')
コード例 #12
0
def counter_model(x_train_all, x_val_all, y_train_genotype, y_val_genotype,
                  results_path):

    res_model = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=(321, 321, 3))
    model_res = res_model.output
    model_flat = Flatten(name='flatten')(model_res)
    model = Dense(1024,
                  activation='relu',
                  activity_regularizer=regularizers.l2(0.10))(model_flat)
    model_genotype = Dense(5, activation='softmax')(model)

    input = Input(shape=(128, 128, 3), name='input')
    model = Conv2D(32, 5, strides=(1, 1), padding="same",
                   activation="relu")(input)
    model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
    model = Conv2D(64, 5, strides=(1, 1), padding="same",
                   activation="relu")(model)
    model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
    model = Conv2D(64, 5, strides=(1, 1), padding="same",
                   activation="relu")(model)
    model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
    model = Conv2D(64, 5, strides=(1, 1), padding="same",
                   activation="relu")(model)
    model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model)
    model = Dense(4096, activation='relu')(model)
    model = Dropout(0.5)(model)
    model = Dense(4096, activation='relu')(model)
    model = Dropout(0.5)(model)
    model_genotype = Dense(5, activation='softmax')(model)

    epoch = 50
    csv_logger = keras.callbacks.CSVLogger('training.log', separator=',')
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.03,
                               mode='min',
                               patience=8)

    model = Model(inputs=res_model.input, outputs=model_genotype)
    model.compile(optimizer=Adam(lr=0.0001),
                  loss='categorical_crossentropy',
                  metrics=[metrics.categorical_accuracy])
    fitted_model = model.fit(x_train_all,
                             y_train_genotype,
                             epochs=epoch,
                             validation_data=(x_val_all, y_val_genotype),
                             batch_size=16,
                             callbacks=[csv_logger])

    return model
コード例 #13
0
def create_simple_model(**kwargs):
    #
    #  Parse settings
    #
    dropout = kwargs.get("dropout", -1.)
    leaky_relu = kwargs.get("leaky_relu", 0.2)
    sigmoid = kwargs.get("sigmoid", False)
    name = kwargs.get("name", "model")
    num_outputs = kwargs.get("num_outputs")
    num_observables = kwargs.get("num_observables")
    use_batch_norm = kwargs.get("batch_norm", False)
    verbose = kwargs.get("verbose", True)

    use_dropout = False
    if dropout > 0: use_dropout = True

    layers = kwargs.get("data_layers", (10, ))
    #
    #  Print stage
    #
    if verbose: print(f"Creating model with {num_observables} observables")
    #
    #  Create input layers
    #
    data_input = Input((num_observables, ))
    #
    #  Create initially separate layers for the condition and data
    #
    model = data_input
    for layer_size in layers:
        if sigmoid:
            model = Dense(layer_size, activation="sigmoid")(model)
        else:
            model = Dense(layer_size)(model)
            model = LeakyReLU(leaky_relu)(model)
        if use_batch_norm: model = BatchNormalization()(model)
        if use_dropout: model = Dropout(dropout)(model)

    #
    #  Compile model
    #
    model = Dense(num_outputs, activation="linear")(model)
    model = Model(name=name, inputs=[data_input], outputs=[model])
    model.compile(loss="mse", optimizer=Adam())
    if verbose: model.summary()
    #
    #  return model
    #
    return model
コード例 #14
0
def get_model_1():
    inputs = Input(shape=(6912, ))
    model = Dense(3951, activation='relu')(inputs)
    model = Dropout(0.5)(model)
    model = Dense(1024, activation='relu')(model)
    model = Dropout(0.3)(model)
    model = Dense(512, activation='relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(64, activation='relu')(model)
    model = Dropout(0.2)(model)
    predictions = Dense(4, activation='softmax')(model)
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def build_model():
  # Input layer
  inputs = Input([4, ], name='data')
  # Hidden layers
  model = Dense(6, activation='relu', name='fc1')(inputs)
  model = Dense(6, activation='relu', name='fc2')(model)
  # Output layer
  outputs = Dense(3, activation='softmax', name='fc3')(model)

  # Define the model
  model = Model(inputs=[inputs], outputs=outputs)

  model.compile(optimizer='adam',#Adam(lr=1e-5),
                loss='categorical_crossentropy',
                metrics=['accuracy'])
  return model
コード例 #16
0
def build_model():
    # Input layer
    inputs = Input([
        2,
    ], name='data')
    # Hidden layers
    model = Dense(128, activation='relu')(inputs)
    # Output layer
    outputs = Dense(2, activation='softmax', name='fc3')(model)

    # Define the model
    model = Model(inputs=[inputs], outputs=outputs)

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #17
0
def cnn_rnn():
    cnn_input_shape = (128, 1)
    cnn_inp = Input(shape=cnn_input_shape, dtype='float32', name='cnn')
    # 两层卷积操作
    c = Conv1D(nb_filters, kernel_size=kernel_size, padding='same',
               strides=1)(cnn_inp)
    c = MaxPooling1D()(c)
    c = Conv1D(nb_filters, kernel_size=kernel_size, padding='same',
               strides=1)(c)
    c = MaxPooling1D()(c)
    c = Flatten()(c)
    c = Dense(32,
              activation='relu',
              kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(c)
    c = Dense(32,
              activation='relu',
              kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(c)
    rnn_inp = Input(shape=(128, ))
    r = Embedding(257, 16, input_length=128)(rnn_inp)
    r = SimpleRNN(128, return_sequences=True)(r)
    r = SimpleRNN(128)(r)
    # r=LSTM(128,return_sequences=True)(r)
    # r=LSTM(128)(r)
    r = Dense(32,
              activation='relu',
              kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(r)
    # r=Dense(32,activation='relu', kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(r)
    cr_inp = concatenate([c, r])
    # 加入注意力机制
    attention_probs = Dense(64, activation='softmax',
                            name="attention_probs")(cr_inp)
    cr_inp = Multiply()([cr_inp, attention_probs])
    # wide特征和deep特征拼接,wide特征直接和输出节点相连
    cr = Dense(256, activation='relu')(cr_inp)
    cr = Dense(128, activation='relu')(cr)
    cr_out = Dense(2, activation='softmax', name='cnn_rnn')(cr)
    # 模型网络的入口和出口
    cr = Model(inputs=[cnn_inp, rnn_inp], outputs=cr_out)
    cr.compile(optimizer=Adam(lr=0.0001),
               loss="categorical_crossentropy",
               metrics=["accuracy"])
    # 以下输入数据进行wide and deep模型的训练
    print(cr.summary())
    return cr
コード例 #18
0
def build_model():
    # Input layer
    inputs = Input([
        13,
    ], name='data')
    # Hidden layers
    model = Dense(32, activation='relu', name='fc1')(inputs)
    model = Dense(32, activation='relu', name='fc2')(model)
    # Output layer
    outputs = Dense(1, activation='linear', name='fc3')(model)

    # Define the model
    model = Model(inputs=[inputs], outputs=outputs)

    model.compile(
        optimizer=Adam(lr=4e-5),  #Gradient Descend Algorithm.
        loss='mse',  #MSE = Mean Squared Error
        metrics=['mae'])  #MAE = Mean Absolute Error
    return model
コード例 #19
0
def build_model():
  inputs = Input([13, ], name='data')

  # Notice how the regularization can be applied to each layer, instead to the entire network
  model = Dense(512, activation='relu', name='fc1')(inputs)
  model = Dense(512, activation='relu', name='fc2')(model)
  model = Dropout(0.1)(model)
  model = Dense(512, activation='relu', name='fc3')(model)
  model = Dense(512, activation='relu', name='fc4')(model)
  model = Dropout(0.1)(model)
  model = Dense(512, activation='relu', name='fc5')(model)
  model = Dropout(0.1)(model)  
  model = Dense(512, activation='relu', name='fc6',kernel_regularizer=keras.regularizers.l1_l2(l1=0.5, l2=0.01))(model)
  model = Dense(512, activation='relu', name='fc7',kernel_regularizer=keras.regularizers.l1_l2(l1=0.05, l2=0.01))(model)
    
  outputs = Dense(1, activation='linear', name='fc8')(model)

  model = Model(inputs=[inputs], outputs=outputs)
  model.compile(optimizer=Adam(lr=4e-5), #Gradient Descend Algorithm.
                loss='mse',       #MSE = Mean Squared Error
                metrics=['mae'])
  return model
コード例 #20
0
def define_GCP_model(num_classes):
    input = Input(shape=(1, 250, 250))
    concat_input = Concatenate(axis=1)([input, input, input])
    inception_net = InceptionV3(input_tensor=concat_input,
                                weights='imagenet',
                                classes=num_classes,
                                include_top=False)
    layers = inception_net.layers
    # freeze the first 150 pre-trained layers
    for layer in layers[0:150]:
        layer.trainable = False
    dropout = .50
    #add additional layers
    model = inception_net.output
    model = GlobalAveragePooling2D()(model)
    model = Dense(100, kernel_regularizer=regularizers.l2(0.1))(model)
    model = BatchNormalization()(model)
    model = Activation(activation='relu')(model)
    model = Dropout(dropout, seed=7)(model)

    model = Dense(100, kernel_regularizer=regularizers.l2(0.1))(model)
    model = BatchNormalization()(model)
    model = Activation(activation='relu')(model)
    model = Dropout(dropout, seed=7)(model)

    model = Dense(100, kernel_regularizer=regularizers.l2(0.1))(model)
    model = BatchNormalization()(model)
    model = Activation(activation='relu')(model)
    model = Dropout(dropout, seed=7)(model)

    preds = Dense(num_classes, activation='softmax')(model)
    model = Model(inputs=input, outputs=preds)
    sgd = optimizers.SGD(lr=0.0025, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    #model.summary()
    return model
コード例 #21
0
def get_model_2():
    """This is regularised"""
    inputs = Input(shape=(6912, ))
    model = Dense(3951, activation='relu')(inputs)
    model = Dropout(0.5)(model)
    model = Dense(1024,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(0.01))(model)
    model = Dropout(0.3)(model)
    model = Dense(512,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(0.01))(model)
    model = Dropout(0.3)(model)
    model = Dense(64,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(0.01))(model)
    model = Dropout(0.3)(model)
    predictions = Dense(4, activation='softmax')(model)
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #22
0
def make_neural_network(input_dim):
    input_layer = Input(shape=(input_dim, ))
    neural_network = Dense(64, input_dim=45, activation="relu")(input_layer)
    neural_network = BatchNormalization()(neural_network)

    neural_network = Dense(64, activation="relu")(neural_network)
    neural_network = Dropout(.2)(neural_network)
    neural_network = BatchNormalization()(neural_network)

    neural_network = Dense(512, activation="relu")(neural_network)
    neural_network = Dropout(.2)(neural_network)
    neural_network = BatchNormalization()(neural_network)

    neural_network = Dense(512, activation="relu")(neural_network)
    neural_network = Dropout(.2)(neural_network)
    neural_network = BatchNormalization()(neural_network)

    neural_network = Dense(1024, activation="relu")(neural_network)
    neural_network = Dropout(.2)(neural_network)
    neural_network = BatchNormalization()(neural_network)

    output_layer = Dense(1, activation="sigmoid")(neural_network)

    neural_network = Model(input=input_layer, output=output_layer)

    optimizer = optimizers.Adam(lr=0.003,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=None,
                                decay=0.0,
                                amsgrad=False)

    neural_network.compile(loss="binary_crossentropy",
                           optimizer=optimizer,
                           metrics=["accuracy"])
    print(neural_network.summary())
    return neural_network
コード例 #23
0
def Fit_transfer_learning_AECNN(model_latent, model_feature, gen_train,
                                gen_valid):

    combined = Concatenate()([model_latent.output, model_feature.output])
    model = Dense(2048, activation="relu")(combined)
    model = Dense(1024, activation="relu")(model)
    model = Dropout(0.5)(model)
    out = Dense(7, activation="softmax")(model)

    model = Model(inputs=[model_latent.input, model_feature.input],
                  outputs=out)
    print(model.summary)

    model.compile(loss="binary_crossentropy",
                  metrics=['acc'],
                  optimizer=Adam(1e-5))

    earlyStop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 patience=3,
                                                 verbose=1)
    #checkpoint = tf.keras.callbacks.ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [earlyStop]

    H = model.fit_generator(gen_train,
                            steps_per_epoch=100,
                            epochs=100,
                            validation_data=gen_valid,
                            validation_steps=50,
                            shuffle=False)
    #plot_model(model, show_shapes=True, show_layer_names=True)
    """
    loss, acc = model.evaluate_generator(generator=datagen.flow([vect1_train, vect2_train], [vect1_valid, vect2_valid]))
    """
    #loss, acc = model.predict([vect1_valid, vect2_valid])
    print("Accuracy: ", H.history)

    return model
                                   fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1. / 255, )

training_set = ImageDataGenerator.flow_from_directory(directory='train',
                                                      target_size=[224, 224],
                                                      class_mode='categorical',
                                                      batch_size=32)

testing_set = ImageDataGenerator.flow_from_directory(directory='test',
                                                     target_size=[224, 224],
                                                     class_mode='categorical',
                                                     batch_size=32)

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.SGD(lr=[0.0001], momentum=0.9),
              metrics=['accuracy'])

vgg19_model = model.fit(training_set,
                        validation_data=testing_set,
                        epochs=30,
                        verbose=1,
                        steps_per_epoch=len(training_set),
                        validation_steps=len(testing_set))

plt.plot(r.history['accuracy'])
plt.plot(r.history['val_accuracy'])
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Accuracy', 'Validation Accuracy', 'Loss', 'Validation Loss'],
コード例 #25
0
ファイル: main.py プロジェクト: BeamScout/beamscout
adam = Adam(lr=0.0002,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-8,
            decay=0.0,
            amsgrad=False)
callbacks_list = [
    callbacks.TensorBoard(log_dir='./logs',
                          write_graph=True,
                          write_images=False)
]
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
model.compile(loss='mse',
              optimizer=adam,
              options=run_options,
              run_metadata=run_metadata)

tick = time.time()
for i in range(0, 10):
    #for a in [0,45]:

    a = random.randint(0, int(len(data_list_all_) / 512 - 1))
    data_list_all = data_list_all_[a * 512:a * 512 + 512]

    data_list_all_data = np.array([i[0]
                                   for i in data_list_all]).astype(np.float64)
    data_list_all_label = np.array([i[1]
                                    for i in data_list_all]).astype(np.float64)

    data_list = data_list_[a * 384:a * 384 + 384]
コード例 #26
0
            weights='imagenet')

partial_vgg = vgg.get_layer('block5_pool').output
model = GlobalMaxPooling2D()(partial_vgg)
model = Dense(len(classes), activation='sigmoid')(model)

model = Model(input=[vgg.input], output=model)

for l in model.layers[:-4]:
    try:
        l.trainable = False
    except Exception:
        pass

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

check = ModelCheckpoint("weights.{epoch:02d}-{val_loss:.5f}.hdf5",
                        monitor='val_loss',
                        verbose=0,
                        save_best_only=True,
                        save_weights_only=False,
                        mode='auto')
early = EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='auto')

print(model.summary())

training_generator = ImageDataGenerator(featurewise_center=False,
                                        samplewise_center=False,
                                        featurewise_std_normalization=False,
コード例 #27
0
ファイル: spring_model.py プロジェクト: rainiethai/mlp
model = Dense(500, kernel_initializer='normal', activation='relu')(model_input)
model = Dense(400, kernel_initializer='normal', activation='relu')(model)
model = Dense(300, kernel_initializer='normal', activation='relu')(model)
model = Dense(200, kernel_initializer='normal', activation='relu')(model)
model = Dense(100, kernel_initializer='normal', activation='relu')(model)
output_reshape = Dense(6, kernel_initializer='normal', activation='linear', name='output')(model)

model = Model(inputs=model_input, outputs=output_reshape)

model.summary()
# Open the file
with open(os.path.join(run_name, 'report.txt'), 'w') as fh:
    # Pass the file handle in as a lambda function to make it callable
    model.summary(print_fn=lambda x: fh.write(x + '\n'))

model.compile(loss='mse', optimizer='adam', metrics=['mae', 'mse'])

"""**Training the Model**"""

# loading the weight if training is already done, if not, then train
if use_bestWeight:
    filename = '13_val_mae:0.0553.hdf5'
    model = load_model(os.path.join(run_name, filename))
    model.fit_generator(generator=training_generator, validation_data=validation_generator, use_multiprocessing=True, workers=2, epochs=40 , verbose=1, callbacks=[callback, reduce_lr, tensorboard, early_stop])

"""# **Prediction**

## **Running Prediction**
"""

model.evaluate_generator(testing_generator, workers=2, use_multiprocessing=True, verbose=1, callbacks=None)
コード例 #28
0
kernel_size = 8

dnn_inp = Input(shape=(18, ))
d = Dense(32, activation='relu', kernel_regularizer=l1_l2(l1=0.01,
                                                          l2=0.01))(dnn_inp)
d = Dense(32, activation='relu', kernel_regularizer=l1_l2(l1=0.01,
                                                          l2=0.01))(dnn_inp)
d = Dense(32, activation='relu')(d)
d_out = Dense(config.HTTPS_CONFIG["num_class"],
              activation='softmax',
              name='dnn_cnn_rnn')(d)

# 模型网络的入口和出口
d = Model(inputs=[dnn_inp], outputs=d_out)
d.compile(optimizer=Adam(lr=0.01),
          loss="categorical_crossentropy",
          metrics=["accuracy"])
# 以下输入数据进行wide and deep模型的训练
print(d.summary())

X_tr = [X_train_dnn]
Y_tr = y_train_dnn
# 测试集
X_te = [X_test_dnn]
Y_te = y_test_dnn
d.fit(X_tr, Y_tr, epochs=100, batch_size=128)

results = d.evaluate(X_te, Y_te)
print("\n", results)
predicts = d.predict(X_te)
y_pre = [np.argmax(i) for i in predicts]
コード例 #29
0
output3 = Dense(31)(model1)
output3_1 = Dense(3)(output3)
output4 = Dense(32)(model1)
output4_1 = Dense(3)(output4)

model1 = Model(inputs = [input1, input2], outputs = [output3_1, output4_1]



model1.summary()



#3. 훈련
model1.compile(loss = 'mse', optimizer = 'adam', metrics=['mse']) # acc분류 지표 따라서 오차가 발생함에도 acc 1이 나옴
model1.fit([x1_train,x2_train] ,
            [y1_train,y2_train],epochs=100, batch_size=1, validation_split=0.25,verbose=1)



#epochs를 2000으로 fit을 했을 때 중간에 loss가 감소하다가 증가하는 구간이 발생 why? overfiting

x_test = [x1_test, x2_test]
y_test = [y1_test, y2_test]

#4. 평가,예측
loss_tot, loss1, mse1 ,loss2,mse2= model1.evaluate(x_test,y_test, batch_size=1)


コード例 #30
0
from keras.models import Sequential, Input, Model
from keras.layers import Dense, Dropout, Conv2D, Flatten
from keras.constraints import unit_norm
from keras.optimizers import Adam

input_shape = 70*13
print(input_shape)

"""MultiLayer Perceprton implementation"""

model_dense_input = Input(shape=(input_shape,))
model_dense = Dense(units=128, activation='relu', input_dim=input_shape,
             kernel_constraint=unit_norm())(model_dense_input)
model_dense = Dropout(0.5)(model_dense)
model_dense = Dense(units=128, activation='relu')(model_dense)
model_dense = Dense(units=64, activation='relu')(model_dense)
model_dense = Dense(units=10, activation='softmax')(model_dense)
model_dense = Model(inputs=model_dense_input, output=model_dense)

model_dense.summary()

adam = Adam(lr = 0.001)
model_dense.compile(loss='categorical_crossentropy',
             optimizer = adam, metrics=['accuracy'])
model_dense.save('model_dense_untrained.h5')