y = np.array(labels)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(y)

onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
df_y = onehot_encoder.fit_transform(integer_encoded)

df_x = np.array(df_x)
df_y = np.array(df_y)

x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=0.25, random_state=4)

#***TO DO: add more layers (conv2d, maxpooling and dense, dropout), test different activations, test different optimizers, change stride length
#Create 2D CNN model architecture (use channels for depth)
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=(8, 8), activation='relu', strides=(1,1), input_shape=(155, 240, 240), padding="same"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=16, kernel_size=(8, 8), activation='relu', strides=(1,1)))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
model.summary()

model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5, verbose=1)
model.save('mri_model.h5')
Beispiel #2
0
def create_model(horizon=1,
                 nb_train_samples=512,
                 batch_size=32,
                 feature_count=11):

    x = Input(shape=(6, feature_count), name="input_layer")
    conv = Conv1D(kernel_size=3, filters=5, activation='relu',
                  dilation_rate=1)(x)
    conv2 = Conv1D(5,
                   kernel_size=3,
                   padding='causal',
                   strides=1,
                   activation='relu',
                   dilation_rate=2)(conv)
    conv3 = Conv1D(5,
                   kernel_size=3,
                   padding='causal',
                   strides=1,
                   activation='relu',
                   dilation_rate=4)(conv2)
    mp = MaxPooling1D(pool_size=2)(conv3)
    # conv2 = Conv1D(filters=5, kernel_size=3, activation='relu')(mp)
    # mp = MaxPooling1D(pool_size=2)(conv2)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)
    shared_dense = Flatten()(shared_dense)
    sub1 = Dense(16, name="task1")(shared_dense)
    # sub2 = Dense(16, name="task2")(shared_dense)
    # sub3 = Dense(16, name="task3")(shared_dense)

    # sub1 = GRU(units=16, name="task1")(shared_dense)
    # sub2 = GRU(units=16, name="task2")(shared_dense)
    # sub3 = GRU(units=16, name="task3")(shared_dense)

    # out1_gp = Dense(1, name="out1_gp")(sub1)
    out1 = Dense(1, name="out1")(sub1)
    # out2 = Dense(1, name="out2")(sub2)
    # out3 = Dense(1, name="out3")(sub3)
    # Gaussian setting
    gp_hypers = {'lik': -2.0, 'cov': [[-0.7], [0.0]]}
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {
            'cg_maxit': 500,
            'cg_tol': 1e-4
        },
        'grid_kwargs': {
            'eq': 1,
            'k': 1e2
        },
        'update_grid': True,
    }
    gp1 = GP(gp_hypers,
             batch_size=batch_size,
             nb_train_samples=nb_train_samples)
    # gp2 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
    # gp3 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)

    outputs = [gp1(out1)]

    model = Model(inputs=x, outputs=outputs)

    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Beispiel #3
0
def create_model_mtl_mtv_exchange_rate(horizon=1,
                                       nb_train_samples=512,
                                       batch_size=32,
                                       feature_count=6,
                                       lag_time=6,
                                       aux_feature_count=8):

    x = Input(shape=(lag_time, feature_count), name="input_layer")
    conv = Conv1D(filters=5, kernel_size=1, activation='relu')(x)
    # conv = Conv1D(filters=5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=2)(x)
    conv = Conv1D(filters=5,
                  kernel_size=3,
                  padding='causal',
                  strides=1,
                  activation='relu',
                  dilation_rate=4)(conv)

    mp = MaxPooling1D(pool_size=1)(conv)
    # conv2 = Conv1D(filters=5, kernel_size=3, activation='relu')(mp)
    # mp = MaxPooling1D(pool_size=2)(conv2)

    lstm1 = GRU(8, return_sequences=True)(mp)
    lstm2 = GRU(16, return_sequences=True)(lstm1)
    # lstm2 = GRU(16, return_sequences=True)(mp)

    shared_dense = Dense(32, name="shared_layer")(lstm2)

    ## sub1 is main task; units = reshape dimension multiplication
    sub1 = GRU(units=(lag_time * aux_feature_count),
               name="task1")(shared_dense)
    sub2 = GRU(units=16, name="task2")(shared_dense)
    sub3 = GRU(units=16, name="task3")(shared_dense)
    sub4 = GRU(units=16, name="task4")(shared_dense)
    sub5 = GRU(units=16, name="task5")(shared_dense)

    sub1 = Reshape((lag_time, aux_feature_count))(sub1)
    auxiliary_input = Input(shape=(lag_time, aux_feature_count),
                            name='aux_input')

    concate = Concatenate(axis=-1)([sub1, auxiliary_input])
    # out1_gp = Dense(1, name="out1_gp")(sub1)
    out1 = Dense(8, name="spec_out1")(concate)
    out1 = Flatten()(out1)
    out1 = Dense(1, name="out1")(out1)

    out2 = Dense(8, name="spec_out2")(sub2)
    out2 = Dense(1, name="out2")(out2)

    out3 = Dense(1, name="spec_out3")(sub3)
    out3 = Dense(1, name="out3")(out3)

    out4 = Dense(1, name="spec_out4")(sub4)
    out4 = Dense(1, name="out4")(out4)

    out5 = Dense(1, name="spec_out5")(sub5)
    out5 = Dense(1, name="out5")(out5)

    outputs = [out1, out2, out3, out4, out5]
    # outputs = [out1, out2, out3, out4]

    model = KerasModel(inputs=[x, auxiliary_input], outputs=outputs)

    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=['mae', 'mape', 'mse'],
                  loss_weights=[1, 0.00, 0.00, 0.00, 0.00])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Beispiel #4
0
                              horizontal_flip=True,
                              fill_mode="nearest")

valAug = ImageDataGenerator()

mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
valAug.mean = mean

baseModel = ResNet50(weights="imagenet",
                     include_top=False,
                     input_tensor=Input(shape=(224, 224, 3)))

headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(lb.classes_), activation="softmax")(headModel)

model = Model(inputs=baseModel.input, outputs=headModel)

for layer in baseModel.layers:
    layer.trainable = False

print("[INFO] compiling model...")
opt = SGD(lr=1e-4, momentum=0.9, decay=1e-4 / args["epochs"])
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])
Beispiel #5
0
    print('Discriminator Model')

    d_input_img = Input(shape=X_train.shape[1:])
    """
    d_input_label2 = Dense(28*28,activation='relu')(d_input_label)
    d_input_label2 = Reshape(X_train.shape[1:])(d_input_label2)
    d_input = concatenate([d_input_img, d_input_label2])
    """

    discriminator_ = Conv2D(64, (5, 5), padding='same',
                            strides=(2, 2))(d_input_img)  #initialization
    discriminator_ = LeakyReLU(0.2)(discriminator_)
    discriminator_ = Conv2D(128, (5, 5), padding='same',
                            strides=(2, 2))(discriminator_)
    discriminator_ = LeakyReLU(0.2)(discriminator_)
    discriminator_ = Flatten()(discriminator_)

    d_input_label = Input(shape=[10])
    discriminator_ = concatenate([discriminator_, d_input_label])
    discriminator_ = Dense(1)(discriminator_)
    discriminator_ = Activation('sigmoid')(discriminator_)
    discriminator = Model(inputs=[d_input_img, d_input_label],
                          outputs=discriminator_)
    discriminator.compile(loss='binary_crossentropy',
                          optimizer=adam,
                          metrics=['accuracy'])

    discriminator.trainable = False

    print('CGAN model')
Beispiel #6
0
print('Y_train shape:', Y_train.shape)

X_train, Y_train = shuffle(X_train, Y_train)

image_input = Input(shape=(224, 224, 3))
model = VGG16(include_top=False,
              weights='imagenet',
              input_tensor=image_input,
              pooling=None,
              classes=nb_classes)

for layer in model.layers[:9]:
    layer.trainable = False

output = model.output
output = Flatten()(output)
output = Dense(1024, activation="relu")(output)
predictions = Dense(10, activation="softmax")(output)

model_final = Model(input=model.input, output=predictions)
#keras.utils.multi_gpu_model(model, gpus=2, cpu_merge=False, cpu_relocation=False)
#opt = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.99, decay=1e-6)# best one
opt = optimizers.SGD(lr=0.0001, momentum=0.9)

model_final.compile(loss='categorical_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])


def train():
    model_final.fit(X_train, Y_train, batch_size=32, epochs=20, shuffle=True)
Beispiel #7
0
    # Two more convolutional layers with kernel size (3,3) and filter 64
    x = Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='valid',
               activation='elu',
               kernel_regularizer=l2(0.001))(x)
    x = Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='valid',
               activation='elu',
               kernel_regularizer=l2(0.001))(x)
    x = Dropout(0.5)(x)

    # Add a flatten layer
    x = Flatten()(x)

    # -- Start of the comma.ai model
    # three convolutional layers (note that this branch also starts with the normalised image)
    y = Conv2D(filters=16,
               kernel_size=(8, 8),
               strides=(4, 4),
               padding='same',
               activation='elu')(nimg)
    y = Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=(2, 2),
               padding='same',
               activation='elu')(y)
    y = Conv2D(filters=64,
               kernel_size=(5, 5),
def custom_model_hand():
    '''
    USER CODE STARTS HERE
    '''
    image_model = Sequential()
    image_model.add(ZeroPadding2D((2, 2), input_shape=(50, 50, 1)))

    #54x54 fed in due to zero padding
    image_model.add(Conv2D(8, (5, 5), activation='relu', name='conv1_1'))
    image_model.add(ZeroPadding2D((2, 2)))
    image_model.add(Conv2D(8, (5, 5), activation='relu', name='conv1_2'))

    image_model.add(MaxPooling2D((2, 2),
                                 strides=(2, 2)))  #convert 50x50 to 25x25

    #25x25 fed in
    image_model.add(ZeroPadding2D((2, 2)))
    image_model.add(Conv2D(16, (5, 5), activation='relu', name='conv2_1'))
    image_model.add(ZeroPadding2D((2, 2)))
    image_model.add(Conv2D(16, (5, 5), activation='relu', name='conv2_2'))

    image_model.add(MaxPooling2D((5, 5),
                                 strides=(5, 5)))  #convert 25x25 to 5x5

    #5x5 fed in
    image_model.add(ZeroPadding2D((2, 2)))
    image_model.add(Conv2D(40, (5, 5), activation='relu', name='conv3_1'))
    image_model.add(ZeroPadding2D((2, 2)))
    image_model.add(Conv2D(32, (5, 5), activation='relu', name='conv3_2'))

    image_model.add(Dropout(0.2))

    image_model.add(Flatten())

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))
    image_model.add(Dropout(0.2))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))
    image_model.add(Dropout(0.15))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))
    image_model.add(Dropout(0.1))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))

    image_model.add(Dense(512))
    image_model.add(Activation('tanh'))

    image_model.add(Dense(5))
    image_model.add(Activation('sigmoid'))

    return image_model
    '''