def model_instance_master(channels, loss_rate, clip_norm):
    input_img = Input((None, None, channels))
    model = Conv2D(64, (3, 3), padding='same',
                   kernel_initializer='he_normal')(input_img)

    # padding = 'same' should be zero padding
    for ii in range(0, 18):

        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)

    model = Activation('relu')(model)
    model = Conv2D(1, (3, 3), padding='same',
                   kernel_initializer='he_normal')(model)
    res_img = model

    output_img = add([res_img, input_img])

    model = Model(input_img, output_img)

    adam = Adam(lr=loss_rate, clipnorm=clip_norm)

    # loss function is an input argument but MSE seems to generally work well
    model.compile(optimizer=adam,
                  loss='mean_squared_error',
                  metrics=['mean_squared_error'])

    return model
Ejemplo n.º 2
0
def residual_TCN_LSTM(n_nodes,
                      conv_len,
                      n_classes,
                      n_feat,
                      max_len,
                      loss='categorical_crossentropy',
                      online=False,
                      optimizer="rmsprop",
                      depth=3,
                      return_param_str=False):
    n_layers = len(n_nodes)

    inputs = Input(shape=(max_len, n_feat))
    model = inputs
    prev = Conv1D(n_nodes[0], conv_len, padding='same')(model)

    # encoder
    for i in range(n_layers):

        for j in range(depth):
            # convolution over the temporal dimension
            current = encoder_identify_block(prev, n_nodes[i], conv_len)
            # residual connection within residual block
            if j != 0:
                model = add([prev, current])
                model = Activation('relu')(model)
            prev = current

        if i < (n_layers - 1):
            model = MaxPooling1D(2)(model)

    # decoder
    # for i in range(n_layers):
    #
    #     for j in range(depth):
    #
    #         current = decoder_identify_block(model, n_nodes[-i - 1])
    #         model = add([prev, current])
    #         prev = current
    #     model = UpSampling1D(2)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(inputs=inputs, outputs=model)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  sample_weight_mode="temporal",
                  metrics=['accuracy'])
    model.summary()

    if return_param_str:
        param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers)
        if online:
            param_str += "_online"

        return model, param_str
    else:
        return model
Ejemplo n.º 3
0
    def build_model(self):
        inputs = Input(shape=(self.img_dims[0], self.img_dims[1],
                              self.num_consecutive_frames))
        model = Conv2D(32, (8, 8), strides=(4, 4), padding='valid')(inputs)
        model = Activation('relu')(model)
        model = Conv2D(64, (4, 4), strides=(2, 2), padding='valid')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(model)
        model = Activation('relu')(model)
        model = Flatten()(model)
        model = Dense(512)(model)
        model = Activation('relu')(model)
        model = Dense(self.num_actions)(model)

        model = Model(inputs=inputs, outputs=model)
        adam = Adam(lr=self.learning_rate)
        model.compile(loss='mse', optimizer=adam)

        print("We finish building the model")
        self.model = model
Ejemplo n.º 4
0
    def build_model(self):
        inputs = Input(shape=(self.img_dims[0], self.img_dims[1],
                              self.num_consecutive_frames))
        model = Conv2D(32, (8, 8), strides=(4, 4), padding='valid')(inputs)
        model = Activation('relu')(model)
        model = Conv2D(64, (4, 4), strides=(2, 2), padding='valid')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(model)
        model = Activation('relu')(model)

        stream = Flatten()(model)

        advantage = Dense(self.num_actions, name='Advantage_stream')(stream)
        value = Dense(1, name='Value_stream')(stream)

        def mean(x):
            import keras.backend
            res = keras.backend.mean(x, axis=1, keepdims=True)
            return res

        meanRes = Lambda(function=mean, name='Mean_layer')(advantage)

        from keras.layers import Concatenate
        concatenations = []
        for i in range(self.num_actions):
            concatenations.append(meanRes)
        meanRes = Concatenate(name='Mean_broadcasting')(concatenations)

        advantage = keras.layers.subtract([advantage, meanRes], name='A_z')
        qOut = keras.layers.add([value, advantage], name='Q_out')

        model = Model(inputs=inputs, outputs=qOut)
        adam = Adam(lr=self.learning_rate)
        model.compile(loss='mse', optimizer=adam)

        print("We finish building the model")
        self.model = model
        self.target_model = Agent.copy_model(self.model)
Ejemplo n.º 5
0
def model_train(img_size,
                batch_size,
                epochs,
                optimizer,
                learning_rate,
                train_list,
                validation_list,
                style=2):

    print('Style {}.'.format(style))

    if style == 1:
        input_img = Input(shape=img_size)

        #model = Sequential()

        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       input_shape=img_size)(input_img)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(1, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal')(model)
        res_img = model

        output_img = merge.Add()([res_img, input_img])

        model = Model(input_img, output_img)

        #model.load_weights('vdsr_model_edges.h5')

        adam = Adam(lr=0.000005)
        #sgd = SGD(lr=1e-3, momentum=0.9, decay=1e-4, nesterov=False)
        sgd = SGD(lr=0.01, momentum=0.9, decay=0.001, nesterov=False)
        #model.compile(sgd, loss='mse', metrics=[PSNR, "accuracy"])
        model.compile(adam,
                      loss='mse',
                      metrics=[ssim, ssim_metric, PSNR, "accuracy"])

        model.summary()

    else:

        input_img = Input(shape=img_size)

        model = Conv2D(64, (3, 3),
                       padding='valid',
                       kernel_initializer='he_normal')(input_img)
        model_0 = Activation('relu')(model)

        total_conv = 22  # should be even number
        total_conv -= 2  # subtract first and last
        residual_block_num = 5  # should be even number

        for _ in range(residual_block_num):  # residual block
            model = Conv2D(64, (3, 3),
                           padding='same',
                           kernel_initializer='he_normal')(model_0)
            model = Activation('relu')(model)
            for _ in range(int(total_conv / residual_block_num) - 1):
                model = Conv2D(64, (3, 3),
                               padding='same',
                               kernel_initializer='he_normal')(model)
                model = Activation('relu')(model)
                model_0 = add([model, model_0])

        model = Conv2D(1, (3, 3),
                       padding='valid',
                       kernel_initializer='he_normal')(model)
        res_img = model

        input_img1 = crop(1, 2, -2)(input_img)
        input_img1 = crop(2, 2, -2)(input_img1)

        print(input_img.shape)
        print(input_img1.shape)
        output_img = merge.Add()([res_img, input_img1])
        # output_img = res_img
        model = Model(input_img, output_img)

        # model.load_weights('./vdsr_model_edges.h5')
        # adam = Adam(lr=learning_rate)
        adam = Adadelta()
        # sgd = SGD(lr=1e-7, momentum=0.9, decay=1e-2, nesterov=False)
        sgd = SGD(lr=learning_rate,
                  momentum=0.9,
                  decay=1e-4,
                  nesterov=False,
                  clipnorm=1)
        if optimizer == 0:
            model.compile(adam, loss='mse', metrics=[ssim, ssim_metric, PSNR])
        else:
            model.compile(sgd, loss='mse', metrics=[ssim, ssim_metric, PSNR])

        model.summary()

    mycallback = MyCallback(model)
    timestamp = time.strftime("%m%d-%H%M", time.localtime(time.time()))
    csv_logger = callbacks.CSVLogger(
        'data/callbacks/training_{}.log'.format(timestamp))
    filepath = "./checkpoints/weights-improvement-{epoch:03d}-{PSNR:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor=PSNR, verbose=1, mode='max')
    callbacks_list = [mycallback, checkpoint, csv_logger]

    # print('Loading training data.')
    # x = load_images(DATA_PATH)
    # print('Loading data label.')
    # y = load_images(LABEL_PATH)
    # print('Loading validation data.')
    # val = load_images(VAL_PATH)
    # print('Loading validation label.')
    # val_label = load_images(VAL_LABEL_PATH)

    # print(x.shape)
    # print(y.shape)
    # print(val.shape)
    # print(val_label.shape)

    with open('./model/vdsr_architecture.json', 'w') as f:
        f.write(model.to_json())

    # datagen = ImageDataGenerator(rotation_range=45,
    #                              zoom_range=0.15,
    #                              horizontal_flip=True,
    #                              vertical_flip=True)

    # history = model.fit_generator(datagen.flow(x, y, batch_size=batch_size),
    #                     steps_per_epoch=len(x) // batch_size,
    #                     validation_data=(val, val_label),
    #                     validation_steps=len(val) // batch_size,
    #                     epochs=epochs,
    #                     callbacks=callbacks_list,
    #                     verbose=1,
    #                     shuffle=True,
    #                     workers=256,
    #                     use_multiprocessing=True)

    history = model.fit_generator(
        image_gen(train_list, batch_size=batch_size),
        steps_per_epoch=384400 * (len(train_list)) // batch_size,
        # steps_per_epoch=4612800//batch_size,
        validation_data=image_gen(validation_list, batch_size=batch_size),
        validation_steps=384400 * (len(validation_list)) // batch_size,
        epochs=epochs,
        workers=1024,
        callbacks=callbacks_list,
        verbose=1)

    print("Done training!!!")

    print("Saving the final model ...")

    model.save('vdsr_model.h5')  # creates a HDF5 file
    del model  # deletes the existing model

    # plt.plot(history.history['accuracy'])
    # plt.plot(history.history['val_accuracy'])
    # plt.title('Model accuracy')
    # plt.ylabel('Accuracy')
    # plt.xlabel('Epoch')
    # plt.legend(['Train', 'validation'], loc='upper left')
    # # plt.show()
    # plt.savefig('accuracy.png')

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'validation'], loc='upper left')
    # plt.show()
    plt.savefig('loss.png')

    plt.plot(history.history['PSNR'])
    plt.plot(history.history['val_PSNR'])
    plt.title('Model PSNR')
    plt.ylabel('PSNR')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'validation'], loc='upper left')
    # plt.show()
    plt.savefig('PSNR.png')
Ejemplo n.º 6
0
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
model = Conv2D(1, (3, 3), padding='same', kernel_initializer='he_normal')(model)
res_img = model

output_img = add([res_img, input_img])

model = Model(input_img, output_img)

# model.load_weights('./checkpoints/weights-improvement-20-26.93.hdf5')

adam = Adam(lr=0.001, decay=1e-5, clipvalue=0.1, epsilon=1e-8)
sgd = SGD(lr=1e-2, momentum=0.9, decay=1e-4, nesterov=False)
model.compile(adam, loss='mse', metrics=[PSNR, "accuracy"])

model.summary()
# 每个epoch保存一次模型
filepath = "./checkpoints2/vdsr-{epoch:02d}-{PSNR:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor=PSNR, verbose=1, mode='max')
callbacks_list = [checkpoint]
# 记录每次训练的loss,PSNR值
train_log = CSVLogger(filename="train.log")
model.fit(x=train_input, y=train_label, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=callbacks_list, shuffle=True)

print("Done training!!!")

Ejemplo n.º 7
0
def ED_TCN(n_nodes,
           conv_len,
           n_classes,
           n_feat,
           max_len,
           loss='categorical_crossentropy',
           online=False,
           optimizer="rmsprop",
           activation='norm_relu',
           attention=True,
           return_param_str=False):
    n_layers = len(n_nodes)

    # inputs = Input(shape=(max_len, n_feat))
    inputs = Input(shape=(None, n_feat))

    # attention layer, apply weightings to input
    if attention:
        model = attention_block(inputs, 100)
    else:
        model = inputs

    # ---- Encoder ----
    for i in range(n_layers):
        # Pad beginning of sequence to prevent usage of future data
        if online: model = ZeroPadding1D((conv_len // 2, 0))(model)
        # convolution over the temporal dimension
        model = Conv1D(n_nodes[i], conv_len, padding='same')(model)
        if online: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization,
                           name="encoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

        # hidden features layer when in the last interation
        model = MaxPooling1D(2)(model)

    # ---- Decoder ----
    for i in range(n_layers):
        model = UpSampling1D(2)(model)
        if online: model = ZeroPadding1D((conv_len // 2, 0))(model)
        model = Conv1D(n_nodes[-i - 1], conv_len, padding='same')(model)
        if online: model = Cropping1D((0, conv_len // 2))(model)

        model = SpatialDropout1D(0.3)(model)

        if activation == 'norm_relu':
            model = Activation('relu')(model)
            model = Lambda(channel_normalization,
                           name="decoder_norm_{}".format(i))(model)
        elif activation == 'wavenet':
            model = WaveNet_activation(model)
        else:
            model = Activation(activation)(model)

    # Output FC layer
    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(inputs=inputs, outputs=model)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  sample_weight_mode="temporal",
                  metrics=['accuracy'])
    model.summary()

    if return_param_str:
        param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers)
        if online:
            param_str += "_online"

        return model, param_str
    else:
        return model
Ejemplo n.º 8
0
def define_model_weighted(structure, filters, dropouts, init_shape, weight_decay, num_classes, weight = 1., temperature = 1):
    """
    This model is a modified version of the previous one. The only difference is that this has two
    outputs: one with soft labels depending on the temperature (same as previous model) and an
    additional output with the hard labels (softmax with temperature 1). The soft labels are trained
    by distillation from the complex network and the hard labels with the real labels from the dataset.
    Cross-entropy loss for both outputs but with different weights, Temperature-normalized.

    Small pack: conv2d -> elu -> BN
    Big Pack: at least 1 small pack + maxPool + DropOut
    :param structure: list of integers. Each number indicates one "Big Pack", and the number itself
    the number of "small packs" in the "Big Pack". Strcture = [2,2,2] is the one of the complex network
    :param filters: list of integers, number of filters in the conv2d of each Big Pack.
    filters = [32, 64, 128] for the complex network
    :param dropouts: drop-out ratio for the drop out layer at the end of each Big Pack. for the
    complex network dropouts = [0.2, 0.3, 0.4]
    :return: model
    """

    # Check that the input of the function has sense

    assert len(structure)==len(filters)==len(dropouts), "The length of the inputs don't match"
    assert min(structure) > 0, "All the Big Packs should include at least one Small Pack"
    assert min(dropouts) > 0 or max(dropouts) < 1, "Drop outs should be between 0 an 1"

    # Definition of the network, Functional API in order to handle multiple outputs
    inp = Input(init_shape)

    for i, number_small_pack in enumerate(structure):

        for j in range(number_small_pack):
            if i + j == 0:
                model = Conv2D(filters[i], (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))(inp)
            else:
                model = Conv2D(filters[i], (3, 3), padding='same',
                               kernel_regularizer=regularizers.l2(weight_decay))(model)
            model = Activation('elu')(model)
            model = BatchNormalization()(model)

        model = MaxPooling2D(pool_size=(2,2))(model)
        model = Dropout(dropouts[i])(model)


    model = Flatten()(model)
    dense = Dense(num_classes)(model)
    out_hard = Activation('softmax', name = 'out_hard')(dense)

    out_soft = Lambda(lambda x: x / temperature)(dense)
    out_soft = Activation('softmax', name = 'out_soft')(out_soft)

    model = Model(inputs = inp, outputs = [out_hard, out_soft])

    # handle multiple losses and weights

    losses = {
        "out_hard": "categorical_crossentropy",
        "out_soft": "categorical_crossentropy",
    }
    lossWeights = {"out_soft": temperature**2, "out_hard": weight}

    model.compile(loss=losses, loss_weights = lossWeights, optimizer='adam', metrics=['accuracy'])

    return model
        model.fit(X_train, y[train_idx], batch_size=cm.bs, epochs=cm.n_ep,
                  verbose=0, callbacks=[cm.custom_stopping(value=cm.loss, verbose=2)],
                  validation_data=(X_train, y[train_idx]))


        hyper_net = LatentHyperNet(n_comp=19, model=model, layers=layers, dm_method='pls')
        hyper_net.fit(X_train, y[train_idx])
        X_train = hyper_net.transform(X_train)
        X_test = hyper_net.transform(X_test)

        inp = Input((X_train.shape[1],))
        fc = Dense(n_class)(inp)
        model = Activation('softmax')(fc)
        model = Model(inp, model)

        model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='Adadelta')
        callbacks = [cm.custom_stopping(value=cm.loss, verbose=2)]

        model.fit(X_train, y[train_idx], batch_size=len(X_train),
                  epochs=4*cm.n_ep,#The drawback of the method is that it requires more iterations to converge (loss <= cm.loss)
                   verbose=0, callbacks=callbacks, validation_data=(X_train, y[train_idx]))

        y_pred = model.predict(X_test)
        y_pred = np.argmax(y_pred, axis=1)

        y_true = np.argmax(y[test_idx], axis=1)

        acc_fold = accuracy_score(y_true, y_pred)
        avg_acc.append(acc_fold)

        recall_fold = recall_score(y_true, y_pred, average='macro')
Ejemplo n.º 10
0
               activation='relu',
               kernel_initializer='he_normal')(model)
model = Conv2D(1, (1, 1), padding='same',
               kernel_initializer='he_normal')(model)
res_img = model

#Skip Connection
output_img = add([res_img, input_img])

model = Model(input_img, output_img)
#model.load_weights('./Saved Models/sr_PBNet_model_with200epoch')

adam = Adam(lr=0.001, decay=1e-4)
sgd = SGD(lr=1e-5, momentum=0.9, decay=1e-4, nesterov=False)
#custom_loss = mae_mssim_loss(alpha=0.8)
model.compile(adam, loss='mae', metrics=[PSNR, SSIM])
model.summary()
filepath = "./saved_weights/weights-improvement-{epoch:02d}-{val_PSNR:.2f}.h5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_PSNR',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=True,
                             mode='max')
lrate = LearningRateScheduler(step_decay)
callbacks_list = [checkpoint, lrate]

print("Started training")
history = model.fit_generator(image_gen(train_list), steps_per_epoch=len(train_list) // BATCH_SIZE,  \
     validation_data=image_gen(test_list), validation_steps=len(test_list) // BATCH_SIZE,
     epochs=EPOCHS, workers=32, callbacks=callbacks_list, verbose=1)
Ejemplo n.º 11
0
def model_train(img_size, batch_size, epochs, optimizer, learning_rate, train_list, validation_list, style=2):

    print('Style {}.'.format(style))

    if style == 1:
        input_img = Input(shape=img_size)

        #model = Sequential()

        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', input_shape=img_size)(input_img)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)

        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        model = Activation('relu')(model)
        model = Conv2D(1, (3, 3), padding='same', kernel_initializer='he_normal')(model)
        res_img = model

        output_img = merge.Add()([res_img, input_img])

        model = Model(input_img, output_img)

        #model.load_weights('vdsr_model_edges.h5')

        adam = Adam(lr=0.000005)
        sgd = SGD(lr=0.01, momentum=0.9, decay=0.001, nesterov=False)
        model.compile(adam, loss='mse', metrics=[ssim, ssim_metric, PSNR, "accuracy"])

        model.summary()

    else:

        input_img = Input(shape=img_size)

        model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(input_img)
        model = BatchNormalization()(model)
        model_0 = Activation('relu')(model)

        total_conv = 22  # should be even number
        total_conv -= 2  # subtract first and last
        residual_block_num = 5  # should be even number

        for _ in range(residual_block_num):  # residual block
            model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(model_0)
            model = BatchNormalization()(model)
            model = Activation('relu')(model)
            print(_)
            for _ in range(int(total_conv/residual_block_num)-1):
                model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(model)
                model = BatchNormalization()(model)
                model = Activation('relu')(model)
                model_0 = add([model, model_0])
                print(_)

        model = Conv2DTranspose(64, (5, 5), padding='valid', kernel_initializer='he_normal', use_bias=False)(model)
        model = BatchNormalization()(model)
        model = LeakyReLU()(model)
        model = Conv2D(1, (5, 5), padding='valid', kernel_initializer='he_normal')(model)
        
        res_img = model

        #input_img1 = crop(1,22,-22)(input_img)
        #input_img1 = crop(2,22,-22)(input_img1)

        print(input_img.shape)
        output_img = merge.Add()([res_img, input_img])
        # output_img = res_img
        model = Model(input_img, output_img)

        # model.load_weights('./vdsr_model_edges.h5')
        # adam = Adam(lr=learning_rate)
        adam = Adadelta()
        sgd = SGD(lr=learning_rate, momentum=0.9, decay=1e-4, nesterov=False, clipnorm=1)
        if optimizer == 0:
            model.compile(adam, loss='mse', metrics=[ssim, ssim_metric, PSNR])
        else:
            model.compile(sgd, loss='mse', metrics=[ssim, ssim_metric, PSNR])


        model.summary()

    mycallback = MyCallback(model)
    timestamp = time.strftime("%m%d-%H%M", time.localtime(time.time()))
    csv_logger = callbacks.CSVLogger('data/callbacks/deconv/training_{}.log'.format(timestamp))
    filepath="./checkpoints/deconv/weights-improvement-{epoch:03d}-{PSNR:.2f}-{ssim:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor=PSNR, verbose=1, mode='max')
    callbacks_list = [mycallback, checkpoint, csv_logger]

    with open('./model/deconv/vdsr_architecture.json', 'w') as f:
        f.write(model.to_json())

    history = model.fit_generator(image_gen(train_list, batch_size=batch_size), 
                        steps_per_epoch=(409600//8)*len(train_list) // batch_size,
                        validation_data=image_gen(validation_list,batch_size=batch_size),
                        validation_steps=(409600//8)*len(validation_list) // batch_size,
                        epochs=epochs,
                        workers=1024,
                        callbacks=callbacks_list,
                        verbose=1)

    print("Done training!!!")

    print("Saving the final model ...")

    model.save('vdsr_model.h5')  # creates a HDF5 file 
    del model  # deletes the existing model


    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'validation'], loc='upper left')
    # plt.show()
    plt.savefig('loss.png')

    plt.plot(history.history['PSNR'])
    plt.plot(history.history['val_PSNR'])
    plt.title('Model PSNR')
    plt.ylabel('PSNR')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'validation'], loc='upper left')
    # plt.show()
    plt.savefig('PSNR.png')
Ejemplo n.º 12
0
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(model)
model = Activation('relu')(model)
model = Conv2D(1, (3, 3), padding='same', kernel_initializer='he_normal')(model)
res_img = model

output_img = add([res_img, input_img])

model = Model(input_img, output_img)

# model.load_weights('./checkpoints/weights-improvement-20-26.93.hdf5')

adam = Adam(lr=0.00001)
sgd = SGD(lr=1e-5, momentum=0.9, decay=1e-4, nesterov=False)
model.compile(adam, loss='mse', metrics=[PSNR, "accuracy"])

model.summary()

filepath="./checkpoints/weights-improvement-{epoch:02d}-{PSNR:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor=PSNR, verbose=1, mode='max')
callbacks_list = [checkpoint]

model.fit_generator(image_gen(train_list), steps_per_epoch=len(train_list) // BATCH_SIZE,  \
					validation_data=image_gen(test_list), validation_steps=len(train_list) // BATCH_SIZE, \
					epochs=EPOCHS, workers=8, callbacks=callbacks_list)

print("Done training!!!")

print("Saving the final model ...")