Beispiel #1
0
    weight_path = os.path.join(save_dir, weight_name)

    # x_train,y_train,x_test,y_test = image_crop.read_data()

    # x_train_resized = image_crop.resize_imgs(x_train)

    # y = to_categorical(y_train,num_classes=34)

    model = MobileNet(include_top=True,
                      weights=None,
                      classes=8,
                      pooling='max',
                      input_shape=(150, 150, 3))
    checkpoint = ModelCheckpoint(filepath=os.path.join(
        save_dir, 'MobileNet_weight.{epoch:02d}-{loss:.2f}.hdf5'),
                                 verbose=1)

    opt = RMSprop(lr=2e-5)
    model.compile(optimizer=opt,
                  loss=losses.categorical_crossentropy,
                  metrics=[metrics.categorical_accuracy])
    # model.fit(x_train_resized,y,epochs=10,batch_size=36,callbacks=[checkpoint])
    model.fit_generator(train_generator,
                        steps_per_epoch=300,
                        epochs=10,
                        validation_data=validation_generator,
                        validation_steps=100)

    model.save(model_path)
    model.save_weights(weight_path)
callbacks = [
    ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.5, patience=5, mode='max', cooldown=3, verbose=1),
    ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_categorical_accuracy', save_best_only=True, verbose=1),
]

model = MobileNet(input_shape=(size, size, 1), alpha=1., weights=None, classes=NCATS)
model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy',
              metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])
print(model.summary())

model.fit_generator(
                        train_datagen, 
                        steps_per_epoch=STEPS, 
#                        initial_epoch = initial_epoch,
                        epochs=EPOCHS,
                        verbose=1,
                        validation_data=(x_valid, y_valid),
                        callbacks = callbacks
)


model.load_weights(MODEL_WEIGHTS_FILE)

valid_predictions = model.predict(x_valid, batch_size=128, verbose=1)
map3 = mapk(valid_df[['y']].values, preds2catids(valid_predictions).values)
print('Map3: {:.3f}'.format(map3))

test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv'))
test.head()
x_test = df_to_image_array_xd(test, size)
                  classes=num_classes,
                  weights=None)
model.compile(optimizer='nadam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

# model callbacks
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss',
                              factor=0.1,
                              patience=int(patience / 2),
                              verbose=1)
csv_logger = CSVLogger(log_file_path, append=False)
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

# training model
model.fit_generator(image_generator.flow(mode='train'),
                    steps_per_epoch=int(len(train_keys) / batch_size),
                    epochs=num_epochs,
                    verbose=1,
                    callbacks=callbacks,
                    validation_data=image_generator.flow('val'),
                    validation_steps=int(len(val_keys) / batch_size))
Beispiel #4
0
callbacks=[earlystop,checkpoint]

model.compile(loss='binary_crossentropy',
             optimizer=RMSprop(lr=0.0001),
             metrics= ['accuracy'])

nb_train_samples=800
nb_validation_sample=150

epochs=10
batch_size=16

history=model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples //batch_size,
        epochs=epochs,
        callbacks=callbacks,
        validation_data=validation_generator,
        validation_steps=nb_validation_sample // batch_size)


# In[ ]:


from keras.models import load_model


# In[ ]:


classifier=load_model('face_recog.h5')
Beispiel #5
0
    # callbacks
    log_file_path = base_path + dataset_name + '_emotion_training.log'
    csv_logger = CSVLogger(log_file_path, append=False)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  factor=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)
    trained_models_path = base_path + dataset_name + '_mobilenet'
    model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       'val_loss',
                                       verbose=1,
                                       save_best_only=True)
    callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

    # loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    train_data, val_data = split_data(faces, emotions, validation_split)
    train_faces, train_emotions = train_data
    model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                            batch_size),
                        steps_per_epoch=len(train_faces) / batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=callbacks,
                        validation_data=val_data)
from keras.applications import MobileNet
model = MobileNet(weights='../input/mobilenet-h5/mobilenet_1_0_128_tf.h5')
model.summary()
type(model)

x = model.layers[-6].output
predictions = Dense(29,activation='softmax')(x)

model = Model(inputs = model.input, outputs=predictions)
model.summary()

for layer in model.layers[:-23]:
    layer.trainable = False
    
model.compile(Adam(lr = 0.001),loss = 'categorical_crossentropy',metrics = ['accuracy'])
model.fit_generator(train_batches, steps_per_epoch = 3627, validation_data = valid_batches, validation_steps = 723, epochs = 10, verbose = 2)

model.save('sign_language.h5')

####### model build and saved upto here

from keras.models import load_model

class SignLanguageModel():

    Signs = ["A", "B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z",
                     "del", "nothing",
                     "space"]
                     

    def __init__(self):
Beispiel #7
0
    filepath='weights_keras/{epoch:02d}-{val_loss:.2f}.hdf5',
    monitor='val_acc',
    save_best_only=True)
tb = TensorBoard(log_dir='./logs_keras', histogram_freq=1, batch_size=128)

from keras.applications import MobileNetV2
from keras.applications import MobileNet

k_model = MobileNet(input_shape=(224, 224, 3),
                    alpha=1.0,
                    depth_multiplier=1,
                    dropout=1e-3,
                    include_top=True,
                    weights=None,
                    input_tensor=None,
                    pooling=None,
                    classes=200)

k_model.compile(optimizer='adam',
                metrics=['accuracy'],
                loss='categorical_crossentropy')

k_model.summary()

k_model.fit_generator(train_generator,
                      validation_data=val_generator,
                      epochs=35,
                      callbacks=[ckpt],
                      workers=6,
                      use_multiprocessing=True)
Beispiel #8
0
    axs[1].grid()
    axs[1].legend(loc=0)
    fig.savefig(address_hist, dpi=300)
    plt.show();

img_gen = Generate_Data(train_num = 128*4)
length = get_char_length()

input_shape = (IMAGE_HEIGHT,IMAGE_WIDTH,CHANNEL)
model = MobileNet(input_shape=input_shape,alpha=1.,weights=None,classes=CHAR_NUM*length)

#parallel_model = multi_gpu_model(model, 4)
adam = keras.optimizers.Adam(lr = 0.005, beta_1=0.9, beta_2=0.999,decay=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[accuracy,categorical_accuracy,categorical_crossentropy])
cbk = MyCbk(model)
tensorboard = TensorBoard(log_dir=address_tensorboard,histogram_freq=0)
eva = Evaluate(img_gen.test())
moniter = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=5, mode='max', cooldown=3, verbose=1)

hists = []
hist = model.fit_generator(generator=img_gen.next_train(),
                    steps_per_epoch = 600,
                    epochs = 16,
                    validation_data = img_gen.next_val(),
                    validation_steps = 1,
                    verbose = 1,
                    callbacks = [cbk,eva,moniter,tensorboard])
hists.append(hist)
draw_hist(hists)

Beispiel #9
0
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

validation_datagen_1 = ImageDataGenerator()

history_2 = pre_trained_model_1.fit_generator(train_datagen_1.flow(train_x, train_y, batch_size=32),
                              steps_per_epoch=len(train_x) / 32,
                              epochs=10,
                              validation_data=validation_datagen_1.flow(val_x, val_y, batch_size=32),
                              validation_steps=len(val_x) / 32)X

# Try MobileNet transfer learning 
from keras.applications import MobileNet
from keras.applications.mobilenet import preprocess_input
model_3 = MobileNet(input_shape=(100, 100, 3), alpha=1., weights=None, classes=5005)

model_3.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])

from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen_2 = ImageDataGenerator(rotation_range = 40,
                                   width_shift_range = 0.2,
                                   height_shift_range = 0.2,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

validation_datagen_2 = ImageDataGenerator()

history_3 =model_3.fit_generator(train_datagen_2.flow(X, yy, batch_size=32),
                              steps_per_epoch=len(X) / 32,
                              epochs=10)
Beispiel #10
0
    ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.5, patience=1, mode='max', cooldown=3, verbose=1),
    ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_categorical_accuracy', save_best_only=True, verbose=1),
]

#model = model_ResNet50(NCATS)
model = MobileNet(input_shape=(size, size, 3), alpha=1., weights=None, classes=NCATS)
model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy',
              metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])
print(model.summary())

model.fit_generator(
                        train_datagen, 
                        steps_per_epoch=STEPS, 
#                        initial_epoch = initial_epoch,
                        epochs=EPOCHS,
                        verbose=1,
                        validation_data=valid_datagen , #(x_valid, y_valid),
                        validation_steps = valSTEPS,
                        max_q_size=10,
                        callbacks = callbacks
)


model.load_weights(MODEL_WEIGHTS_FILE)

k = 100

test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv'))
test1 = test[:10000]
x_test = df_to_image_array_xd(test1,size=size)
test_predictions1 = model.predict(x_test, batch_size=128, verbose=1)
    optim = SGD(lr=learning_rate, momentum=0.9)
else:
    optim = Adam(lr=learning_rate)

model.compile(
    optimizer=Adam(lr=learning_rate),
    loss='categorical_crossentropy',
    metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])
print(model.summary())

model.fit_generator(
    generator=train_datagen,
    steps_per_epoch=math.ceil(n_samples // batch_size),
    nb_epoch=nb_epoch,
    callbacks=callbacks,
    validation_data=(x_valid, y_valid),
    #                     validation_data=val_gen,
    #                     validation_steps = math.ceil(2468645 / batch_size),
    max_q_size=10,
    workers=4,
    verbose=VERBOSEFLAG)

test_df = pd.read_csv("../input/test_simplified.csv")

n_samples = test_df.shape[0]
pick_per_epoch = math.ceil(n_samples / batch_size)
pick_order = np.arange(test_df.shape[0])

all_preds = []

for i in trange(pick_per_epoch):
if __name__ == '__main__':
    input_shape = (299, 299, 3)
    # hdf = "m1.h5"
    hdf = "temporal_vg_smoke_v1.h5"

    m = MobileNet(input_shape=(299, 299, 20), weights=None, classes=2)
    # load_model(hdf)
    m.load_weights(hdf)
    m.compile("adam", categorical_crossentropy, metrics=["accuracy"])
    # plot_model(m, show_shapes=True)

    m.summary()

    # data_dir = "/blender/storage/datasets/smoking/gifs/"
    data_dir = "/blender/storage/datasets/vg_smoke"

    train_seq = SmokeGifSequence(data_dir, neg_txt='negatives.txt', pos_txt='positives.txt',
                                 input_shape_hwc=input_shape,
                                 only_temporal=True)
    # val_seq = SmokeGifSequence(data_dir, neg_txt='validate_neg.txt', pos_txt='validate_pos.txt',
    #                            input_shape_hwc=input_shape,
    #                            only_temporal=True)

    log_dir = os.path.join("./logs", os.path.basename(hdf))
    m.fit_generator(train_seq, len(train_seq), epochs=20,
                    use_multiprocessing=True, workers=5,
                    # validation_data=val_seq, validation_steps=len(val_seq),
                    verbose=1, callbacks=[TensorBoard(log_dir), ModelCheckpoint(hdf)],
                    )
Beispiel #13
0
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(x_train, y_train,
                                     batch_size=batch_size),
                        epochs=epochs,
                        validation_data=(x_test, y_test),
                        workers=4)

# Save model and weights
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)

# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])