Пример #1
0
def counter_model_augmentation(results_path, data, missing_labels):
    
    x_train = data[0]
    x_test = data[1]
    y_train_count = data[2]
    y_test_count = data[3]

    mask_value = -1

    TYC = len(y_train_count)
    how_much_mask = missing_labels
    idx_mask = np.random.randint(TYC, size = int(TYC*how_much_mask)) 
    y_train_count[idx_mask] = mask_value
    # y_train_count[:int(TYC*0.2)] = mask_value
    where_miss = np.where(y_train_count == mask_value)
    np.savetxt(results_path+'/missing_labels.csv', where_miss[0], delimiter=',')
    np.savetxt(results_path+'/train_labels.csv', y_train_count, delimiter=',')
    print('Missing Labels  ', where_miss[0])

    def MSE_masked_loss(y_true, y_pred):
        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
        return K.mean(K.square(y_pred*mask - y_true*mask), axis=-1)

    def mse_discrete_accuracy(y_true, y_pred):
        return K.mean(K.square(K.round(y_pred) - y_true), axis=-1)

    x_aug = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
        )

    x_aug.fit(x_train)



    res_model = ResNet50(weights='imagenet', include_top=False, input_shape=(317,309, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1536, activation='relu', name='count_dense1')(model)
    model = Dense(512, activation='relu', activity_regularizer=regularizers.l2(0.04), name='count_dense2')(model)
    leaf_pred = Dense(1, name='count')(model)

    epoch = 100
    steps = int(len(x_train)/3)
    csv_logger = keras.callbacks.CSVLogger(results_path+'/training.log', separator=',')
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0.05, mode='min', patience=12)
    checkpoint = ModelCheckpoint(results_path+'/checkpoint.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')

    model = Model(inputs = res_model.input, outputs = leaf_pred)
    model.compile(optimizer=Adam(lr=0.0001), loss= MSE_masked_loss, metrics={'count': mse_discrete_accuracy})

    fitted_model= model.fit_generator(x_aug.flow(x_train, y_train_count, batch_size=6), steps_per_epoch=steps,
                                                 epochs=epoch, validation_data=(x_test, y_test_count), callbacks= [csv_logger, checkpoint, early_stop])

    model.save(results_path+'/the_model.h5')

    return model
def counter_model_augmentation(x_train_all, x_val_all, y_train_all, y_val_all):

    x_aug = ImageDataGenerator(
        rotation_range=180,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
    )

    x_aug.fit(x_train_all)

    res_model = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=(320, 320, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1024, activation='relu')(model)
    model = Dense(512,
                  activation='relu',
                  activity_regularizer=regularizers.l2(0.2))(model)
    leaf_pred = Dense(1)(model)

    epoch = 50
    csv_logger = keras.callbacks.CSVLogger('training.log', separator=',')
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.03,
                               mode='min',
                               patience=8)

    model = Model(inputs=res_model.input, outputs=leaf_pred)

    model.compile(optimizer=Adam(lr=0.0001), loss='mse')
    fitted_model = model.fit_generator(x_aug.flow(x_train_all,
                                                  y_train_all,
                                                  batch_size=6),
                                       steps_per_epoch=len(x_train_all) * 2,
                                       epochs=epoch,
                                       validation_data=(x_val_all, y_val_all),
                                       callbacks=[csv_logger, early_stop])

    return model
    '''
Пример #3
0
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20

train_generator = datagen.flow_from_directory(
    train_directory,
    target_size=(224, 224),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle="True")

test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory(test_directory,
                                            target_size = (224, 224),
                                            batch_size = batch_size,
                                            shuffle = False,
                                            class_mode = 'categorical')

validation_generator = test_datagen.flow_from_directory(validation_directory,
                                                        target_size = (224,224),
                                                        batch_size = batch_size,
                                                        shuffle = False,
                                                        class_mode = "categorical")


model.fit_generator(
train_generator,
samples_per_epoch = 1000,
epochs = 5,
validation_data = validation_generator,
nb_val_samples = 100)
Пример #4
0
    model)  #This is for the final layer of size number of classes

model = Model(
    base_model.input, model
)  #This will take the nase model input and concatenate the model we have created

#Freezing the initial 16 layers so that it doesn't get used during training
for i in model.layers[:16]:
    i.trainable = False

#it sets the hyperparmaters
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Model Created")

tfBoard = TensorBoard(log_dir="./logs")

X, y = load_data_full("./data", numClasses)
#Data augmentation to get more photos from existing photos
datagen = ImageDataGenerator(rotation_range=50,
                             horizontal_flip=True,
                             shear_range=0.2,
                             fill_mode='nearest')
datagen.fit(X)

print("Starting Training")
model.fit_generator(datagen.flow(X, y, batch_size=3),
                    steps_per_epoch=len(X) / 3,
                    epochs=20,
                    callbacks=[tfBoard])
print("Saving Model")
model.save("model.h5")
Пример #5
0
def vgg16(classes, epochs, steps_per_epoch, validation_steps, input_shape):
    # 加载数据
    train_batches, valid_batches = load_data(input_shape)

    input_shape += (3, )

    model_vgg = keras.applications.vgg16.VGG16(include_top=False,
                                               weights='imagenet',
                                               input_shape=input_shape)

    for layer in model_vgg.layers:
        layer.trainable = False  # 别去调整之前的卷积层的参数
    model = Flatten(name='flatten')(model_vgg.output)  # 去掉全连接层,前面都是卷积层
    model = Dense(256, activation='relu', name='fc1')(model)
    model = Dense(256, activation='relu', name='fc2')(model)
    model = Dropout(0.5)(model)
    # model = Dropout(0.6)(model)
    if classes == 1:
        print("二元分类")
        model = Dense(classes, activation='sigmoid')(model)  # model就是最后的y
        model = Model(inputs=model_vgg.input, outputs=model, name='vgg16')
        ada = Adam(lr=0.0001,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=None,
                   decay=0.0,
                   amsgrad=False)
        model.compile(loss='binary_crossentropy',
                      optimizer=ada,
                      metrics=['accuracy'])
    else:
        print("多分类")
        model = Dense(classes, activation='softmax')(model)  # model就是最后的y
        model = Model(inputs=model_vgg.input, outputs=model, name='vgg16')
        ada = Adam(lr=0.0001,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=None,
                   decay=0.0,
                   amsgrad=False)
        model.compile(loss='categorical_crossentropy',
                      optimizer=ada,
                      metrics=['accuracy'])

    #保存模型
    out_dir = "../weights/"
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    filepath = "../weights/vgg16_{epoch:04d}.h5"
    # 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=0,
                                 save_best_only=False,
                                 mode='max')
    #学习率调整
    lr_reduce = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  min_lr=0.00000001,
                                  mode="min")
    # 早停
    earlystopping = EarlyStopping(monitor='val_loss',
                                  patience=15,
                                  verbose=1,
                                  mode='min')
    #保存训练过程
    log_dir = "../logs/"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logfile = "../logs/vgg16.csv"
    log = keras.callbacks.CSVLogger(logfile, separator=',', append=False)
    loggraph = keras.callbacks.TensorBoard(log_dir='./logs',
                                           histogram_freq=0,
                                           write_graph=True,
                                           write_images=True)

    callbacks_list = [checkpoint, lr_reduce, log, earlystopping]
    # 训练
    model.fit_generator(train_batches,
                        steps_per_epoch=steps_per_epoch,
                        validation_data=valid_batches,
                        validation_steps=validation_steps,
                        epochs=epochs,
                        verbose=2,
                        callbacks=callbacks_list,
                        workers=16,
                        max_queue_size=20)
Пример #6
0
model = model_inception(Input_layer)

model = Flatten()(model)
#model = Dense(32)(model)
model = Dense(2)(model)
model = Activation('softmax')(model)
model = Model(Input_layer, model)
for layer in model_inception.layers:
    layer.trainable = False
early_stop = EarlyStopping(monitor='val_loss', patience=3, verbose=1)
best_model = ModelCheckpoint('MobileNetModel_4.h5',
                             verbose=1,
                             save_best_only=True)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
model.summary()
with tensorflow.device('/device:XLA_GPU:0'):
    train_log = model.fit_generator(
        train_generator,
        validation_data=val_generator,
        steps_per_epoch=train_generator.n / batch_size,
        validation_steps=val_generator.n / batch_size,
        epochs=EPOCH,
        callbacks=[
            TensorBoard(log_dir='mytensorboard2'), best_model, early_stop
        ])
loss, acc = model.evaluate_generator(test_generator, steps=32)
print('Test result:loss:%f,acc:%f' % (loss, acc))
A = model.predict_generator(test_generator, steps=16)
Пример #7
0
test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')

from keras import callbacks
from keras import metrics

metrics = ['accuracy', 'val_acc']
es = callbacks.EarlyStopping(monitor='val_loss', patience=6)
mc = callbacks.ModelCheckpoint('best_model-1.h5', monitor='val_loss')
callback_list = [es, mc]
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit_generator(training_set,
                    steps_per_epoch=8000,
                    epochs=2,
                    validation_data=test_set,
                    validation_steps=2000,
                    verbose=1,
                    callbacks=callback_list)
model = Dense(64, activation = 'relu', kernel_initializer = "he_normal",
        activity_regularizer = l2(0.001))(model)

model = Dense(1, activation = 'sigmoid',  kernel_initializer = "he_normal")(model)
model = Model(input_image, model)
model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics = ['accuracy'])  

## we will have Vto train in batches because the whole thing won't fit in memory
datagen = ImageDataGenerator(featurewise_center=True, rescale = 1./255 )
training = datagen.flow_from_directory("sampled_tiles/junk_no_junk/training/",
        class_mode="binary", batch_size=32, target_size=(256, 256), color_mode = "grayscale" )

valid = datagen.flow_from_directory("sampled_tiles/junk_no_junk/validation", 
        class_mode="binary", batch_size=32, target_size=(256, 256), color_mode = "grayscale")

history = model.fit_generator(training, epochs = 15, validation_data = valid,
        validation_steps = 800)

with open("models/junk_no_junk_10_29_256px_1024_56k_subset.json", 'w') as out:
    out.write(model.to_json())

next_batch = valid.next()
b1 = next_batch[0]
b1_pred = model.predict_on_batch(b1)
q=0
for i in b1:
    img = np.uint8(np.multiply(np.asarray(i), 255))
    img = img.reshape(256, 256)
    Image.fromarray(np.asarray(img)).save("junk_test/test_" + str(q) + "_class_" + 
                        str(next_batch[1][q]) + "_label_" + str(b1_pred[q][0]) +".png")
    q+=1
Пример #9
0
def counter_model(x_train_all, x_val_all, y_train_all, y_val_all):
    x_aug = ImageDataGenerator(
        rotation_range=180,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=True,
    )

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    x_aug.fit(x_train_all)

    res_model = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=(320, 320, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1024, activation='relu')(model)
    model = Dense(512,
                  activation='relu',
                  activity_regularizer=regularizers.l2(0.02))(model)
    leaf_pred = Dense(1)(model)

    eps = 50
    csv_logger = keras.callbacks.CSVLogger(results_path + '/training.log',
                                           separator=',')
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.03,
                               mode='min',
                               patience=8)

    model = Model(inputs=res_model.input, outputs=leaf_pred)

    model.compile(optimizer=Adam(lr=0.0001), loss='mse')
    #fitted_model = model.fit(x_train_all, y_train_all, epochs=eps, batch_size=16, validation_split=0.1, callbacks= [csv_logger])
    fitted_model = model.fit_generator(x_aug.flow(x_train_all,
                                                  y_train_all,
                                                  batch_size=6),
                                       steps_per_epoch=812,
                                       epochs=eps,
                                       validation_data=(x_val_all, y_val_all),
                                       callbacks=[csv_logger, early_stop])
    #model = load_model(results_path+'/the_model.h5')

    ## Saving Model parameters
    # for i, layer in enumerate(res_model.layers):
    # 	print(i, layer.name)
    #model = load_model('./Results/PhenotikiCounter/CVPPP_Chal_split2 2017-06-09 16:45:31/the_model.h5')

    model_json = model.to_json()
    model.save(results_path + '/the_model.h5')

    # #Plotting Loss
    # plt.title('Leaf Counting Loss')
    # plt.plot(range(1,eps+1), fitted_model.history['loss'], label='Training', color='k')
    # plt.plot(range(1,eps+1), fitted_model.history['val_loss'], label='Validation', color='r')
    # plt.xticks(range(1,eps+1))
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.legend(loc='best')
    # plt.savefig(results_path+'/counter_network_train.png')

    return model
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size)

# this is a similar generator, for validation data
validation_generator = test_datagen.flow(x_val, y_val, batch_size=batch_size)

#callbacks_list = [keras.callbacks.EarlyStopping(monitor='val_acc', patience=5, verbose=0)]
#callbacks_list = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0)]

history = model.fit_generator(
    train_generator,
    steps_per_epoch=2000 // batch_size,
    epochs=15,
    validation_data=validation_generator,
    validation_steps=800 // batch_size,
    #callbacks=callbacks_list
    #callbacks=[learning_rate_reduction]
)

plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.title('Learning curve for the training')

plt.figure(2)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['acc', 'val_acc'], loc='upper right')