def main(id):

    img_path = '/media/lq/C13E-1ED0/dataset/UCF_Crimes/Imgs/RoadAccidents/'
    weights_dir = '/home/lq/Documents/Thesis/Thesis/weight'
    model_weight_filename = os.path.join(weights_dir, 'sports1M_weights_tf.h5')
    # model_weight_filename = '/media/lq/C13E-1ED0/dataset/UCF_Crimes/results/adam_temporal_1m/weights/weights.16.hdf5'
    N_classes = 2
    batch_size = 16
    epochs = 32
    input_shape = (16, 120, 160, 3)

    windows_length = 16  #clip

    model = c3d_temportal_model(input_shape, N_classes)

    def my_l2(y_true, y_pred):
        loss = tf.nn.l2_loss(y_pred)
        return tf.reduce_mean(loss)  #loss

    # Setting the Learning rate multipliers
    LR_mult_dict = {}
    LR_mult_dict['conv1'] = 1
    LR_mult_dict['conv2'] = 1
    LR_mult_dict['conv3a'] = 1
    LR_mult_dict['conv3b'] = 1
    LR_mult_dict['conv4a'] = 1
    LR_mult_dict['conv4b'] = 1
    LR_mult_dict['conv5a'] = 1
    LR_mult_dict['conv5b'] = 1
    LR_mult_dict['fc6'] = 1
    LR_mult_dict['fc7'] = 1
    LR_mult_dict['fc8'] = 10

    # Setting up optimizer
    base_lr = 0.000005
    adam = Adam(lr=base_lr, decay=0.00005, multipliers=LR_mult_dict)
    # sgd = SGD(lr=base_lr, decay=0.00005, multipliers=LR_mult_dict)
    opt = adam
    loss_weights = [1, 0.001]

    convLayers = ['conv1', 'conv2', 'conv3a', 'conv3b', 'conv4a',
                  'conv4b']  #, 'conv5a', 'conv5b']
    for layer in convLayers:
        model.get_layer(layer).trainable = False
    model.compile(loss=['categorical_crossentropy', my_l2],
                  loss_weights=loss_weights,
                  optimizer=opt,
                  metrics=['accuracy'])
    print('loading weight: {}'.format(model_weight_filename))

    model.load_weights(model_weight_filename,
                       by_name=True,
                       skip_mismatch=True,
                       reshape=True)
    model.summary()

    from dataUtil import load_train_data, load_val_data
    train_AS_windows, train_A_windows, train_BG_windows = load_train_data(
    )  # load train data

    N_train_samples = len(train_AS_windows) * 2
    N_train_iterations = N_train_samples // batch_size

    val_AS_windows, val_A_windows, val_BG_windows = load_val_data(
    )  # load val data

    N_val_samples = len(val_A_windows) * 2
    # N_val_samples = len(val_AS_windows) << 1
    N_val_iterations = N_val_samples // batch_size
    # ####################################
    print("--#train AS windows: " + str(len(train_AS_windows)) +
          " #train A windows: " + str(len(train_A_windows)) +
          " #train BG windows: " + str(len(train_BG_windows)))

    # ##################################

    result_dir = '/media/lq/C13E-1ED0/dataset/UCF_Crimes/results/adam_temporal_{}/'.format(
        id)
    best_weight_dir = result_dir + 'weights'
    best_weight_name = best_weight_dir + '/weights.{epoch:02d}.hdf5'
    if not os.path.isdir(best_weight_dir):
        os.makedirs(best_weight_dir)
    if not os.path.exists(best_weight_dir):
        os.makedirs(result_dir)
    desp = result_dir + 'desp.txt'
    with open(desp, 'w') as f:
        f.write('batch size: {}\nbase_lr: {} \ntrain_samples:{} \n '.format(
            batch_size, base_lr, N_train_samples))
        f.write('loss_weight:{}\n'.format(loss_weights))
        f.write('init_weight: {}\n'.format(model_weight_filename))
        f.write("batch: {}\n".format(train_batch_generator))
    # callbacks
    csv_logger = CSVLogger(result_dir + '/log.csv', separator=',')
    checkpointer = ModelCheckpoint(filepath=best_weight_name,
                                   verbose=1,
                                   save_best_only=False,
                                   save_weights_only=True)
    # NAME = "THUMOS-{}".format(int(time.time()))
    log_dir = os.path.join(result_dir, 'log')
    tbCallBack = callbacks.TensorBoard(log_dir=log_dir,
                                       batch_size=batch_size,
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)

    train_generator = train_batch_generator(train_AS_windows, train_A_windows,
                                            train_BG_windows, windows_length,
                                            batch_size, N_train_iterations,
                                            N_classes, img_path)
    val_generator = val_batch_generator([], val_A_windows, val_BG_windows,
                                        windows_length, batch_size,
                                        N_val_iterations, N_classes, img_path)

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=N_train_iterations,
        epochs=epochs,
        callbacks=[csv_logger, tbCallBack, checkpointer],
        validation_data=val_generator,
        validation_steps=N_val_iterations,
        verbose=1)

    plot_history(history, result_dir)
Ejemplo n.º 2
0
    def train(self, iterations=5000, batch_size=16, save_it=[2500], id=0):
        #load the real data
        train_AS_windows, train_A_windows, train_BG_windows = load_train_data(
        )  # load train data

        #real image generator for discriminator (AS + non AS)
        disc_batch_generator = batch_generator_AS_A_BG_2_1_1(
            AS_windows=train_AS_windows,
            A_windows=train_A_windows,
            BG_windows=train_BG_windows,
            windows_length=self.length,
            batch_size=batch_size,
            N_iterations=iterations,
            N_classes=self.n_classes + 1,
            img_path=self.image_path)
        #real image generator for generator (AS only)
        gen_batch_generator = batch_generator_AS(AS_windows=train_AS_windows,
                                                 windows_length=self.length,
                                                 batch_size=batch_size,
                                                 N_iterations=iterations,
                                                 N_classes=self.n_classes + 1,
                                                 img_path=self.image_path)

        #logs
        result_dir = '/media/lq/C13E-1ED0/dataset/UCF_Crimes/results/gan_{}/'.format(
            id)
        weight_dir = os.path.join(result_dir, 'weights')
        if not os.path.isdir(weight_dir):
            os.makedirs(weight_dir)
        if not os.path.exists(weight_dir):
            os.makedirs(result_dir)
        log_dir = os.path.join(result_dir, 'logs')
        desp = os.path.join(result_dir, 'desp.txt')
        with open(desp, 'w') as f:
            f.write("c3d weights: {}\n".format(self.c3d_weights))
            f.write("disc_optimizer: {}\n".format(
                self.disc_optimizer.get_config()))
            f.write("gen_optimizer: {}\n".format(
                self.gen_optimizer.get_config()))
        callback = callbacks.TensorBoard(log_dir=log_dir,
                                         batch_size=batch_size,
                                         histogram_freq=0,
                                         write_graph=True,
                                         write_images=True)
        callback.set_model(self.GAN)
        # loss_names = ['disc_train_loss_real', 'disc_train_acc_real', 'disc_train_loss_fake', 'disc_train_acc_fake', 'gen_train_loss']
        loss_names = ['disc_train_loss', 'disc_train_acc', 'gen_train_loss']
        for cnt in tqdm(range(iterations)):
            '''discriminator'''
            #Sample random points in the latent space
            random_latent_vectors = np.random.standard_normal(
                size=(batch_size // 2, self.latent_dim))
            #Decode them to fake images
            generated_features = self.G.predict(random_latent_vectors)

            #real images
            real_images, real_labels = next(disc_batch_generator)
            real_features = self.fixed_c3d.predict(real_images)

            fake_labels = np.ones(batch_size // 2) * (
                self.n_classes)  #n_classes=21, 0=>BG, 1-20=>actions, 21=>fake
            fake_labels = np_utils.to_categorical(fake_labels,
                                                  self.n_classes + 1)

            combined_features = np.concatenate(
                [generated_features, real_features])
            labels = np.concatenate([fake_labels, real_labels])

            # Add random noise to the labels - important trick!
            # labels += 0.05 * np.random.random(labels.shape)
            d_loss, d_acc = self.D.train_on_batch(combined_features, labels)
            # d_loss_real, d_acc_real = self.D.train_on_batch(real_features, real_labels)
            # d_loss_fake, d_acc_fake = self.D.train_on_batch(generated_features, fake_labels)
            '''generator (via the gan model, where the discriminator weights are frozen)'''
            random_latent_vectors = np.random.standard_normal(
                size=(batch_size, self.latent_dim))

            real_AS_images, real_AS_labels = next(gen_batch_generator)
            real_AS_features = self.fixed_c3d.predict(real_AS_images)

            g_loss = self.GAN.train_on_batch(
                [random_latent_vectors, real_AS_features],
                [real_AS_labels])  #the labels are not used

            #tensorboard log
            # logs = [d_loss_real, d_acc_real, d_loss_fake, d_acc_fake, g_loss]
            logs = [d_loss, d_acc, g_loss]
            write_log(callback, loss_names, logs, cnt)
            if cnt in save_it:
                self.save_weights(weight_dir, cnt)
            tqdm.write(
                'iteration: {}, [Discriminator :: d_loss: {}, d_acc: {}], [ Generator :: loss: {}]'
                .format(cnt, d_loss, d_acc, g_loss))
        self.save_weights(weight_dir, iterations)
        print('done')
def main(id):
    from data import videoPaths as path
    img_path = path.VALIDATION_IMAGES_PATH
    weights_dir = '/home/lq/Documents/Thesis/Thesis/weight'
    model_weight_filename = os.path.join(weights_dir, 'sports1M_weights_tf.h5')

    N_classes = 20 + 1
    batch_size = 24
    epochs = 16
    # input_shape = (16,112,112,3)
    windows_length = 16

    model = c3d_model.get_model()

    # Setting the Learning rate multipliers
    LR_mult_dict = {}
    LR_mult_dict['conv1'] = 1
    LR_mult_dict['conv2'] = 1
    LR_mult_dict['conv3a'] = 1
    LR_mult_dict['conv3b'] = 1
    LR_mult_dict['conv4a'] = 1
    LR_mult_dict['conv4b'] = 1
    LR_mult_dict['conv5a'] = 1
    LR_mult_dict['conv5b'] = 1
    LR_mult_dict['fc6'] = 1
    LR_mult_dict['fc7'] = 1
    LR_mult_dict['fc8'] = 10

    # Setting up optimizer
    base_lr = 0.00001
    adam = Adam(lr=base_lr, decay=0.00005, multipliers=LR_mult_dict)
    opt = adam
    # convLayers = ['conv1','conv2','conv3a','conv3b','conv4a']
    # for layer in convLayers:
    #     model.get_layer(layer).trainable = False
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    model.load_weights(model_weight_filename,
                       by_name=True,
                       skip_mismatch=True,
                       reshape=True)
    ######################
    # for layer in model.layers:
    #     weights = layer.get_weights()

    #     print(layer)

    # l = model.get_layer(name='fc8')
    # list = l.get_weights()
    # for l in list:
    #     print(l.shape)
    #     print(l[10])
    # return 0
    #####################

    model.summary()

    from dataUtil import load_train_data, load_val_data
    train_AS_windows, train_A_windows, train_BG_windows = load_train_data(
    )  # load train data

    # N_A_samples = len(train_A_windows)
    # N_batch_groups = N_A_samples // (batch_size//2 + batch_size//4)
    # N_train_iterations = N_batch_groups * 2

    #N_train_samples = len(train_AS_windows) *2 << 1 #  half AS, half non-AS
    N_train_samples = len(train_AS_windows) * 2
    N_train_iterations = N_train_samples // batch_size

    val_AS_windows, val_A_windows, val_BG_windows = load_val_data(
    )  # load val data

    N_val_samples = len(val_AS_windows) * 2
    # N_val_samples = len(val_AS_windows) << 1
    N_val_iterations = N_val_samples // batch_size
    # ####################################
    print("--#train AS windows: " + str(len(train_AS_windows)) +
          " #train A windows: " + str(len(train_A_windows)) +
          " #train BG windows: " + str(len(train_BG_windows)))
    print("-N_val_samples:" + str(N_val_samples) + "\n--#val AS windows: " +
          str(len(val_AS_windows)) + " #val A windows: " +
          str(len(val_A_windows)) + " #val BG windows: " +
          str(len(val_BG_windows)))
    # ##################################

    result_dir = './results/adam_c3d_{}/'.format(id)
    best_weight_dir = result_dir + 'weights'
    best_weight_name = best_weight_dir + '/weights.{epoch:02d}-{val_loss:.3f}.hdf5'
    if not os.path.isdir(best_weight_dir):
        os.makedirs(best_weight_dir)
    if not os.path.exists(best_weight_dir):
        os.makedirs(result_dir)
    desp = result_dir + 'desp.txt'
    with open(desp, 'w') as f:
        f.write(
            'batch size: {}\nbase_lr: {} \ntrain_samples:{} \nval_samples:{}\n '
            .format(batch_size, base_lr, N_train_samples, N_val_samples))
        f.write('init_weiht: {}'.format(model_weight_filename))
        f.write('batch_generator: {}'.format(train_batch_generator))

    # callbacks
    csv_logger = CSVLogger(result_dir + '/log.csv', separator=',')
    checkpointer = ModelCheckpoint(filepath=best_weight_name,
                                   verbose=1,
                                   save_best_only=False,
                                   save_weights_only=True)
    # NAME = "THUMOS-{}".format(int(time.time()))
    log_dir = os.path.join(result_dir, 'log')
    tbCallBack = callbacks.TensorBoard(log_dir=log_dir,
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)
    train_generator = train_batch_generator(train_AS_windows, train_A_windows,
                                            train_BG_windows, windows_length,
                                            batch_size, N_train_iterations,
                                            N_classes, img_path)
    val_generator = val_batch_generator(val_AS_windows, val_A_windows,
                                        val_BG_windows, windows_length,
                                        batch_size, N_val_iterations,
                                        N_classes, img_path)

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=N_train_iterations,
        epochs=epochs,
        callbacks=[csv_logger, tbCallBack, checkpointer],
        validation_data=val_generator,
        validation_steps=N_val_iterations,
        verbose=1)

    plot_history(history, result_dir)