Ejemplo n.º 1
0
def counter_model_augmentation(results_path, data, missing_labels):
    
    x_train = data[0]
    x_test = data[1]
    y_train_count = data[2]
    y_test_count = data[3]

    mask_value = -1

    TYC = len(y_train_count)
    how_much_mask = missing_labels
    idx_mask = np.random.randint(TYC, size = int(TYC*how_much_mask)) 
    y_train_count[idx_mask] = mask_value
    # y_train_count[:int(TYC*0.2)] = mask_value
    where_miss = np.where(y_train_count == mask_value)
    np.savetxt(results_path+'/missing_labels.csv', where_miss[0], delimiter=',')
    np.savetxt(results_path+'/train_labels.csv', y_train_count, delimiter=',')
    print('Missing Labels  ', where_miss[0])

    def MSE_masked_loss(y_true, y_pred):
        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
        return K.mean(K.square(y_pred*mask - y_true*mask), axis=-1)

    def mse_discrete_accuracy(y_true, y_pred):
        return K.mean(K.square(K.round(y_pred) - y_true), axis=-1)

    x_aug = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
        )

    x_aug.fit(x_train)



    res_model = ResNet50(weights='imagenet', include_top=False, input_shape=(317,309, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1536, activation='relu', name='count_dense1')(model)
    model = Dense(512, activation='relu', activity_regularizer=regularizers.l2(0.04), name='count_dense2')(model)
    leaf_pred = Dense(1, name='count')(model)

    epoch = 100
    steps = int(len(x_train)/3)
    csv_logger = keras.callbacks.CSVLogger(results_path+'/training.log', separator=',')
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0.05, mode='min', patience=12)
    checkpoint = ModelCheckpoint(results_path+'/checkpoint.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')

    model = Model(inputs = res_model.input, outputs = leaf_pred)
    model.compile(optimizer=Adam(lr=0.0001), loss= MSE_masked_loss, metrics={'count': mse_discrete_accuracy})

    fitted_model= model.fit_generator(x_aug.flow(x_train, y_train_count, batch_size=6), steps_per_epoch=steps,
                                                 epochs=epoch, validation_data=(x_test, y_test_count), callbacks= [csv_logger, checkpoint, early_stop])

    model.save(results_path+'/the_model.h5')

    return model
Ejemplo n.º 2
0
model = Model(
    base_model.input, model
)  #This will take the nase model input and concatenate the model we have created

#Freezing the initial 16 layers so that it doesn't get used during training
for i in model.layers[:16]:
    i.trainable = False

#it sets the hyperparmaters
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Model Created")

tfBoard = TensorBoard(log_dir="./logs")

X, y = load_data_full("./data", numClasses)
#Data augmentation to get more photos from existing photos
datagen = ImageDataGenerator(rotation_range=50,
                             horizontal_flip=True,
                             shear_range=0.2,
                             fill_mode='nearest')
datagen.fit(X)

print("Starting Training")
model.fit_generator(datagen.flow(X, y, batch_size=3),
                    steps_per_epoch=len(X) / 3,
                    epochs=20,
                    callbacks=[tfBoard])
print("Saving Model")
model.save("model.h5")
Ejemplo n.º 3
0
generator = UpSampling2D(3)(generator)
generator = Conv2D(30, 5, padding="same", activation="relu")(generator)
generator = Dropout(.25)(generator)
generator = UpSampling2D(2)(generator)
generator = Conv2D(20, 7, padding="same", activation="relu")(generator)
generator = Dropout(.25)(generator)
generator = Conv2D(4, 4, padding="same", activation="sigmoid")(generator)
generator = Model(inputLayer, generator)
generator.compile(loss='binary_crossentropy', optimizer="adam")

print(generator.summary())
print("generator constructed...")
generator.save("generator")
print("generator saved.")

inputLayer = Input(shape=(96, 96, 4))

discriminator = Conv2D(8, 5, padding="same", activation="relu")(inputLayer)
discriminator = Flatten()(discriminator)
discriminator = Dense(32, activation="relu")(discriminator)
discriminator = Dropout(.25)(discriminator)
discriminator = Dense(1, activation='sigmoid')(discriminator)

discriminator = Model(inputLayer, discriminator)
discriminator.compile(loss='binary_crossentropy', optimizer="adam")

print(discriminator.summary())
print("discriminator constructed...")
discriminator.save("discriminator")
print("discriminator saved")
Ejemplo n.º 4
0
output_label = Dense(1, activation='sigmoid')(model)
model = Model(inputs=[question1, question2], outputs=output_label)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

callbacks = [
    ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_acc', save_best_only=True)
]
history = model.fit([q1_trainset, q2_trainset],
                    Y_train,
                    epochs=NB_EPOCHS,
                    validation_split=VALIDATION_SPLIT,
                    verbose=2,
                    batch_size=BATCH_SIZE,
                    callbacks=callbacks)

import matplotlib.pyplot as plt

plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])

model.save(MODEL_WEIGHTS_FILE)
model.load_weights(model_weight_file)
loss, accuracy = model.evaluate([q1_testset, q2_testset], Y_test, verbose=0)

print('loss = {0:.4f}, accuracy = {1:.4f}'.format(loss, accuracy))
Ejemplo n.º 5
0
for epoch in range(0, epochs):

    data_batch, label_batch = dt.get_batch(images, labels, batch_size)
    for ind in range(0, len(data_batch)):
        rotation = angles[dt.np.random.randint(len(angles))]
        data_batch[ind] = dt.rotate_image(data_batch[ind],
                                          rotation,
                                          reshape_bool=False)
        label_batch[ind] = dt.rotate_label(label_batch[ind], rotation)
    model.train_on_batch(data_batch, label_batch)
    print('Epoch: %d' % epoch)
    if (epoch + 1) % (epochs / 10) == 0:
        #tb,tl = dt.get_batch(test_images, test_labels, 8)
        results = model.predict(validate_data[:8])
        dt.create_image_display(validate_data[:8], results, None, False, 2, 4)
        #accuracy = model.test_on_batch(test_images, test_labels, batch_size=batch_size)
        #print('Training Progress: Trained on %d images with Accuracy %d' % (epoch*batch_size, accuracy[0]))

model.save('modelD')

#==========================================
# Validate Model
#==========================================

a, b = dt.get_batch(images, labels, 8)
c, d = dt.get_batch(dt.rotate_images(a, 180, False), dt.rotate_labels(b, 180),
                    None, True)
results = model.predict(a)
results2 = model.predict(c)
dt.create_image_display(a, results, b, False, 2, 4)
dt.create_image_display(c, results2, d, False, 2, 4)
from keras.models import Sequential, Input, Model
from keras.layers import Dense, Dropout, Conv2D, Flatten
from keras.constraints import unit_norm
from keras.optimizers import Adam

"""implementing Convolutional Neural Network"""

model_conv_input = Input(shape=(90, 13, 1))
model_conv = Conv2D(filters=20, kernel_size=(10,10), strides=(6,6),
                    padding='same', activation='sigmoid')(model_conv_input)
# model_conv = Conv2D(filters=16, kernel_size=(5,5), strides=(4,4),
#                     padding='same', activation='sigmoid')(model_conv)
model_conv = Flatten()(model_conv)
model_conv = Dense(units=128, activation='relu')(model_conv)
model_conv = Dense(units=64, activation='relu')(model_conv)
model_conv = Dropout(0.3)(model_conv)
model_conv = Dense(units=10, activation='softmax')(model_conv)
model_conv = Model(inputs=model_conv_input, output=model_conv)

model_conv.summary()

adam = Adam(lr = 0.001)
model_conv.compile(loss='categorical_crossentropy',
              optimizer = adam, metrics=['accuracy'])

model_conv.save('model_conv_untrained.h5')
Ejemplo n.º 7
0
def counter_model(x_train_all, x_val_all, y_train_all, y_val_all):
    x_aug = ImageDataGenerator(
        rotation_range=180,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
    )

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    x_aug.fit(x_train_all)

    res_model = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=(320, 320, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1024, activation='relu')(model)
    model = Dense(512,
                  activation='relu',
                  activity_regularizer=regularizers.l2(0.02))(model)
    leaf_pred = Dense(1)(model)

    eps = 50
    csv_logger = keras.callbacks.CSVLogger(results_path + '/training.log',
                                           separator=',')
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.03,
                               mode='min',
                               patience=8)

    model = Model(inputs=res_model.input, outputs=leaf_pred)

    model.compile(optimizer=Adam(lr=0.0001), loss='mse')
    #fitted_model = model.fit(x_train_all, y_train_all, epochs=eps, batch_size=16, validation_split=0.1, callbacks= [csv_logger])
    fitted_model = model.fit_generator(x_aug.flow(x_train_all,
                                                  y_train_all,
                                                  batch_size=6),
                                       steps_per_epoch=812,
                                       epochs=eps,
                                       validation_data=(x_val_all, y_val_all),
                                       callbacks=[csv_logger, early_stop])
    #model = load_model(results_path+'/the_model.h5')

    ## Saving Model parameters
    # for i, layer in enumerate(res_model.layers):
    # 	print(i, layer.name)
    #model = load_model('./Results/PhenotikiCounter/CVPPP_Chal_split2 2017-06-09 16:45:31/the_model.h5')

    model_json = model.to_json()
    model.save(results_path + '/the_model.h5')

    # #Plotting Loss
    # plt.title('Leaf Counting Loss')
    # plt.plot(range(1,eps+1), fitted_model.history['loss'], label='Training', color='k')
    # plt.plot(range(1,eps+1), fitted_model.history['val_loss'], label='Validation', color='r')
    # plt.xticks(range(1,eps+1))
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.legend(loc='best')
    # plt.savefig(results_path+'/counter_network_train.png')

    return model