def MakeConvNet(Size, batch_size, epochs, optimizer, learning_rate, train_list, validation_list):
    input_img = Input(shape=Size)
    model = input_img
    CurrentInput = InputData
    Channels = Size[2] #the input dim at the first layer is 1, since the input image is grayscale

    for i in range(len(NumKernels)-1): #number of layers
        NumKernel=NumKernels[i]
        FilterSize = FilterSizes[i]
        print(i)

        model = Conv2DTranspose(NumKernel, (FilterSize, FilterSize), padding='same', kernel_initializer='he_normal', use_bias=False)(model)
        print(model.shape)
        model = BatchNormalization()(model)
        print(model.shape)
        # model = Activation('relu')(model)
        model = LeakyReLU()(model)
        print(model.shape)

    model = Conv2DTranspose(4, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(model)
    print(model.shape)
    model = BatchNormalization()(model)
    print(model.shape)
    # model = Activation('relu')(model)
    model = SubpixelConv2D(2)(model)
    model = LeakyReLU()(model)
    print(model.shape)
    # print(input_img.shape)
    model = Model(input_img, model)

    adam = Adadelta()
    sgd = SGD(lr=learning_rate, momentum=0.9, decay=1e-4, nesterov=False, clipnorm=1)
    if optimizer == 0:
        model.compile(adam, loss='mean_absolute_error', metrics=[ssim, ssim_metric, PSNR])
    else:
        model.compile(sgd, loss='mean_absolute_error', metrics=[ssim, ssim_metric, PSNR])

    model.summary()

    mycallback = MyCallback(model)
    timestamp = time.strftime("%m%d-%H%M", time.localtime(time.time()))

    csv_logger = callbacks.CSVLogger('data/callbacks/training_{}.log'.format(timestamp))
    filepath="./checkpoints/weights-{epoch:03d}-{PSNR:.2f}-{ssim:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor=PSNR, verbose=1, mode='max')
    callbacks_list = [mycallback, checkpoint, csv_logger]
    
    with open('./model/subcnn_architecture_{}.json'.format(timestamp), 'w') as f:
        f.write(model.to_json())

    history = model.fit_generator(image_gen(train_list, batch_size=batch_size), 
                        steps_per_epoch=(3600)*len(train_list) // batch_size,
                        validation_data=image_gen(validation_list,batch_size=batch_size),
                        validation_steps=(3600)*len(validation_list) // batch_size,
                        epochs=epochs,
                        workers=1024,
                        callbacks=callbacks_list,
                        verbose=1)

    print("Done training!!!")
Esempio n. 2
0
    seed = 42
   )

sgd = optimizers.SGD(lr = 0.01, momentum = 0.9,clipnorm = 1.0)
adam = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999,clipnorm = 1.0)
angdiffmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

checkpoint1 = ModelCheckpoint('modelangdiff.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.7, min_delta = 0.0005,
                              patience=20, min_lr=0.0001, verbose = 1)
callbacks_list = [checkpoint1,reduce_lr]

H = angdiffmodel.fit_generator(
    training_set_angle_diff,
    steps_per_epoch=87, #No of train images / batch size
    #steps_per_epoch=1,
    epochs=1000,
    validation_data = val_set_angle_diff,
    validation_steps = 87, #No of validation images / batch size
    callbacks=callbacks_list)

!cp /content/modelangdiff_9226.h5 '/content/drive/My Drive/UT-MHAD_models/metalearner-models/fulltrainingdata'
angdiffmodel = load_model('/content/modelangdiff_9226.h5')

from sklearn.metrics import classification_report, confusion_matrix

#Confution Matrix and Classification Report
Y_pred = angdiffmodel.predict_generator(val_set_angle_diff)
y_pred = np.argmax(Y_pred, axis=1)

matrix = confusion_matrix(val_set_angle_diff.classes, y_pred,labels=None)
Esempio n. 3
0
#Image generator
from keras.preprocessing.image import ImageDataGenerator
img_generator = ImageDataGenerator(rotation_range = 20, width_shift_range = 0.2, height_shift_range = 0.2, zoom_range = 0.3)

#callback函數
from keras.callbacks import EarlyStopping, ReduceLROnPlateau

lr_adjust = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience = 4, verbose = 1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
earlystopping = EarlyStopping(monitor='val_acc', patience = 4, verbose = 1, mode='auto')

epochs = 1
batch_size = 64

Mobilenet.fit_generator(img_generator.flow(X_train, Y_train, batch_size = batch_size), 
                        steps_per_epoch=len(X_train)/12, 
                        validation_data=(X_test, Y_test), 
                        epochs = epochs,
                        callbacks = [lr_adjust, earlystopping])

"""最後輸出"""

df_testing = df_testing.drop('id', axis = 1)
df_testing = shape_CNN(df_testing)
result_final = Mobilenet.predict(df_testing)
sample_submission = pd.read_csv('sample_submission.csv', header = 0,encoding = 'unicode_escape')

for i in range(result_final.shape[0]):
  sample_submission.iloc[i,1] = int(np.argmax(result_final[i]))

from google.colab import files
sample_submission.to_csv('submit.csv', index = False)
Esempio n. 4
0
#model=Dropout(0.5)(model)
model = Dense(1024, activation='relu')(model)
#model=Dropout(0.5)(model)
output = Dense(classes, activation='softmax')(model)
model = Model(inputs=input, outputs=output)

sgd = SGD(lr=0.1, decay=0.0005, momentum=0.9, nesterov=False)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
tensorboard = TensorBoard(log_dir=r'E:\sansu\python\tensorboard')
#es=EarlyStopping(monitor='val_acc', patience=2, verbose=0,min_delta=0.002,mode='max')

model.fit_generator(train_data,
                    steps_per_epoch=(train_data.samples // train_batch_size),
                    epochs=10,
                    validation_data=test_data,
                    validation_steps=(test_data.samples // test_batch_size),
                    callbacks=[tensorboard])
#model.save(r'G:\lijiawei\alexnet_gai.h5')

# img=cv2.imread(r'G:\lijiawei\datasets\hebing\test\boring\6996.png')
# img=cv2.resize(img,(227,227))
# img=np.expand_dims(img,axis=0)
#
# def print_feature(i):
#     layer_model = Model(inputs=model.input,outputs=model.layers[i].output)
#     layer_output = layer_model.predict(img)
#     print(layer_output.shape)
#     shape=layer_output.shape[1]
#     plt.figure(i)
#     for i in range(49):
Esempio n. 5
0
reducelr = ReduceLROnPlateau(monitor='val_loss',
                             factor=0.5,
                             patience=6,
                             mode='min',
                             cooldown=0,
                             min_lr=0.5e-6)
callbacks = [checkpoint, earlystopping, reducelr]

is_load_model = True
if is_load_model:
    if os.path.exists(checkpoint_name):
        model = load_model(checkpoint_name)
model.fit_generator(load_trainset(),
                    validation_data=(test_set, test_labels),
                    steps_per_epoch=steps_per_epoch,
                    class_weight=ratio_dic,
                    epochs=200,
                    verbose=1,
                    shuffle=True,
                    callbacks=callbacks)


def metrics_all_attribute(y_test=test_labels):
    pred = model.predict(x=test_set)
    pred = pred.round()
    pred = pred.astype('int')
    y_test = y_test.astype('int')
    TP = np.bitwise_and(y_test, pred).sum()
    FN_FP = np.bitwise_xor(y_test, pred).sum()
    TP_FP = pred.sum()
    TP_FN = y_test.sum()
    accuracy = TP / FN_FP
checkpoint1 = ModelCheckpoint('modelang.h5',
                              monitor='val_accuracy',
                              verbose=1,
                              save_best_only=True,
                              mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.7,
                              min_delta=0.0005,
                              patience=20,
                              min_lr=0.0001,
                              verbose=1)
callbacks_list = [checkpoint1, reduce_lr]

H = angmodel.fit_generator(training_set_ang,
                           steps_per_epoch=87,
                           epochs=1000,
                           validation_data=val_set_ang,
                           validation_steps=87,
                           callbacks=callbacks_list)

Y_pred = angmodel.predict_generator(val_set_ang)
y_pred = np.argmax(Y_pred, axis=1)

mylabels = []

actions = ['a1', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16', 'a17', 'a18', 'a19'\
           ,'a2', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a3', 'a4', 'a5',
           'a6', 'a7', 'a8', 'a9']

mapping = {
    'a1': 'Swipe\n Left',
    'a2': 'Swipe\n Right',
Esempio n. 7
0
                                                  factor=0.5,
                                                  min_lr=0.00001)
learning_rate_reduction_consonant = ReduceLROnPlateau(
    monitor='dense_5_accuracy',
    patience=3,
    verbose=1,
    factor=0.5,
    min_lr=0.00001)
#%%
import tensorflow as tf
gpu_options = tf.GPUOptions(allow_growth=True)
session = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
epochs = 2
batch_size = 100
history = model.fit_generator(
    datagen.flow(x_train, {
        'dense_3': y_train_root,
        'dense_4': y_train_vowel,
        'dense_5': y_train_consonant
    },
                 batch_size=batch_size),
    epochs=epochs,
    validation_data=(x_test, [y_test_root, y_test_vowel, y_test_consonant]),
    steps_per_epoch=x_train.shape[0] // batch_size,
    callbacks=[
        learning_rate_reduction_root, learning_rate_reduction_vowel,
        learning_rate_reduction_consonant
    ])

histories.append(history)
Esempio n. 8
0
    directory="./input/images",
    x_col="filepath",
    y_col=['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'],
    batch_size=batch_size,
    color_mode='grayscale',
    seed=42,
    shuffle=True,
    class_mode="multi_output",
    target_size=(64, 64), keys=['dense_3', 'dense_4', 'dense_5'])

val_generator = val_datagen.flow_from_dataframe(
    dataframe=val_df,
    directory="./input/images",
    x_col="filepath",
    y_col=['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'],
    batch_size=batch_size,
    color_mode='grayscale',
    seed=42,
    shuffle=True,
    class_mode="multi_output",
    target_size=(64, 64), keys=['dense_3', 'dense_4', 'dense_5'])

history = model.fit_generator(generator=train_generator,
                              epochs=epochs, validation_data=val_generator,
                              steps_per_epoch=len(train_df) // batch_size,
                              validation_steps=len(val_df) // batch_size)
history_df = pd.DataFrame(history.history)
# history_df[['dense_2_loss', 'val_dense_2_loss']].plot()
# history_df[['dice_coef', 'val_dice_coef']].plot()

history_df.to_csv('../histories/history_convnet.csv')
Esempio n. 9
0
               activation='relu')(model)
model = BatchNormalization()(model)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(model)

model = Conv2D(384, (3, 3), strides=(1, 1), padding='same',
               activation='relu')(model)
model = Conv2D(384, (3, 3), strides=(1, 1), padding='same',
               activation='relu')(model)
model = Conv2D(256, (3, 3), strides=(1, 1), padding='same',
               activation='relu')(model)
model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(model)

model = Flatten()(model)
model = Dense(1024, activation='relu')(model)
model = Dropout(0.5)(model)
model = Dense(1024, activation='relu')(model)
model = Dropout(0.5)(model)
output = Dense(classes, activation='softmax')(model)
model = Model(inputs=input, outputs=output)

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
#tensorboard = TensorBoard(log_dir=r'E:\sansu\python\tensorboard')
#es=EarlyStopping(monitor='val_acc', patience=2, verbose=0,min_delta=0.002,mode='max')
model.fit_generator(train_data,
                    steps_per_epoch=(train_data.samples // train_batch_size),
                    epochs=40,
                    validation_data=test_data,
                    validation_steps=(test_data.samples // test_batch_size))