def test():
    with tf.Session() as sess:
        model = conv_lstm_deconv_model()
        model.build_model()
        init = tf.global_variables_initializer()
        sess.run(init)

        log_directory_creation(sess)

        # data read iterator
        data = datasets(batch_size=model.batch_size,
                        heigth=heigth,
                        width=width)

        global_step = 0
        for X_batch, y_batch, filenames in data.test_next_batch():
            # print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
            if not is_correct_batch_shape(X_batch, y_batch, model, "test"):
                # global step not increased !
                continue

            input_data = np.zeros_like(X_batch)
            input_data[:, 0] = X_batch[:, 0]

            for i in range(model.timesteps):
                output_predicted = sess.run(
                    model.model_output, feed_dict={model.inputs: input_data})
                if (i < (model.timesteps - 1)):
                    input_data[:, i + 1] = output_predicted[:, i]
                    print("global step ", global_step, " time step ", i)

            data.frame_ext.generate_output_video(output_predicted, filenames)

            global_step += 1
            print("test step ", global_step)
Beispiel #2
0
def test_wrapper():
    with tf.Session() as sess:
        model = seq2seq_model()

# Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()
    sess.run(init)

    # clear logs !
    log_directory_creation(sess)

    batch_size, heigth, width = model.batch_size, model.H, model.W
    enc_timesteps = model.enc_timesteps
    dec_timesteps = model.dec_timesteps
    timesteps = model.timesteps

    data = datasets(batch_size=batch_size,
                    height=heigth,
                    width=width,
                    custom_test_size=custom_test_size,
                    time_frame=timesteps + 1,
                    interval=interval)

    test_count_iter = 0
    test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
    test_count_iter = test(sess,
                           model,
                           data,
                           test_writer,
                           test_count_iter,
                           is_store_output=True)
    test_writer.close()
Beispiel #3
0
def test_wrapper():
    with tf.Session() as sess:
        model = SkipAutoEncoder(heigth_train, width_train, heigth_test,
                                width_test)

        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # summary !
        test_writer = tf.summary.FileWriter(log_dir_file_path + "test",
                                            sess.graph)

        global_step = 0
        test_count_iter = 0
        val_loss_seen = float("inf")

        # data read iterator
        data = datasets(batch_size=batch_size,
                        height=heigth,
                        width=width,
                        custom_test_size=custom_test_size,
                        time_frame=timesteps,
                        interval=interval)

        test_count_iter = test(sess,
                               model,
                               data,
                               test_writer,
                               test_count_iter,
                               is_store_output=True)
Beispiel #4
0
def test_wrapper():
    with tf.Session() as sess:
        disc_model = Discriminator(heigth, width,
                                   disc_scale_level_feature_maps,
                                   disc_scale_level_kernel_size,
                                   disc_fc_layer_units)
        gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test,
                                      width_test, scale_level_feature_maps,
                                      scale_level_kernel_size)

        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # summary !
        gen_train_writer = tf.summary.FileWriter(
            log_dir_file_path + "gen_train", sess.graph)
        des_train_writer = tf.summary.FileWriter(
            log_dir_file_path + "des_train", sess.graph)
        test_writer = tf.summary.FileWriter(log_dir_file_path + "test",
                                            sess.graph)
        val_writer = tf.summary.FileWriter(log_dir_file_path + "val",
                                           sess.graph)

        global_step = 0
        gen_count_iter = 0
        des_count_iter = 0
        val_count_iter = 0
        test_count_iter = 0
        val_loss_seen = float("inf")

        # data read iterator
        data = datasets(batch_size=batch_size,
                        height=heigth,
                        width=width,
                        custom_test_size=custom_test_size,
                        time_frame=timesteps,
                        interval=interval)

        test_count_iter = test(sess,
                               gen_model,
                               data,
                               test_writer,
                               test_count_iter,
                               is_store_output=True)
Beispiel #5
0
def test_aslan_data(aslan_log):
    d = bg.datasets()
    videos = [line.rstrip('\n') for line in open('../../all_videos.txt')]

    for video in videos:
        if 'aslan' in video.lower():
            try:
                videodata = skvideo.io.vread(video)
                n = videodata.shape[0]
                print("Checking video {} with frames {}".format(video, n))
                max_n = max(max_n, n)
                min_n = min(min_n, n)
                if n < 35:
                    aslan_log.write(video + '\n')

            except:
                aslan_log.write(video + '\n')
def train():
    global best_l2_loss
    with tf.Session() as sess:
        # conv lstm model
        model = conv_lstm_deconv_model()
        model.build_model()
        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # Tensorflow Summary
        tf.summary.scalar("train_l2_loss", model.l2_loss)
        summary_merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(log_dir_file_path + "/train",
                                             sess.graph)
        test_writer = tf.summary.FileWriter(log_dir_file_path + "/test",
                                            sess.graph)
        global_step = 0

        while True:
            try:
                # data read iterator
                data = datasets(batch_size=model.batch_size,
                                heigth=heigth,
                                width=width)

                for X_batch, y_batch, _ in data.train_next_batch():
                    # print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
                    if not is_correct_batch_shape(X_batch, y_batch, model,
                                                  "train"):
                        # global step not increased !
                        continue
                    _, summary = sess.run([model.optimizer, summary_merged],
                                          feed_dict={
                                              model.inputs: X_batch,
                                              model.outputs_exp: y_batch
                                          })
                    # print ("summary ... ",global_step)
                    train_writer.add_summary(summary, global_step)

                    if global_step % checkpoint_iterations == 0:
                        save_model_session(
                            sess, iterations + "conv_lstm_deconv_model")

                    if global_step % best_model_iterations == 0:
                        val_l2_loss_history = list()
                        batch_counter = 0
                        # iterate on validation batch ...
                        for X_val, y_val, _ in data.val_next_batch():
                            batch_counter += 1
                            # print ("X_val", X_val.shape, "y_val", y_val.shape)
                            if not is_correct_batch_shape(
                                    X_val, y_val, model,
                                    "val_" + str(batch_counter)):
                                continue
                            test_summary, val_l2_loss = sess.run(
                                [summary_merged, model.l2_loss],
                                feed_dict={
                                    model.inputs: X_val,
                                    model.outputs_exp: y_val
                                })
                            test_writer.add_summary(test_summary, global_step)
                            val_l2_loss_history.append(val_l2_loss)
                        temp_loss = sum(val_l2_loss_history) * 1.0 / len(
                            val_l2_loss_history)

                        # save if better !
                        if best_l2_loss > temp_loss:
                            best_l2_loss = temp_loss
                            save_model_session(sess,
                                               best + "conv_lstm_deconv_model")

                    print("Iteration ", global_step, " best_l2_loss ",
                          best_l2_loss)
                    global_step += 1
            except:
                pass  # ignore problems and continue looping ...

        train_writer.close()
        test_writer.close()
Beispiel #7
0
def train():
    global best_loss
    with tf.Session() as sess:

        disc_model = Discriminator(heigth, width,
                                   disc_scale_level_feature_maps,
                                   disc_scale_level_kernel_size,
                                   disc_fc_layer_units)
        gen_model = GenerativeNetwork(heigth_train, width_train, heigth_test,
                                      width_test, scale_level_feature_maps,
                                      scale_level_kernel_size)

        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # summary !
        gen_train_writer = tf.summary.FileWriter(
            log_dir_file_path + "gen_train", sess.graph)
        des_train_writer = tf.summary.FileWriter(
            log_dir_file_path + "des_train", sess.graph)
        test_writer = tf.summary.FileWriter(log_dir_file_path + "test",
                                            sess.graph)
        val_writer = tf.summary.FileWriter(log_dir_file_path + "val",
                                           sess.graph)

        global_step = 0
        gen_count_iter = 0
        des_count_iter = 0
        val_count_iter = 0
        test_count_iter = 0
        val_loss_seen = float("inf")

        while True:
            try:
                # data read iterator
                data = datasets(batch_size=batch_size,
                                height=heigth,
                                width=width,
                                custom_test_size=custom_test_size,
                                time_frame=timesteps,
                                interval=interval)

                for X_batch, y_batch, _ in data.train_next_batch():
                    # print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
                    if not is_correct_batch_shape(X_batch, y_batch, "train"):
                        # global step not increased !
                        continue
                    for each_timesteps in range(
                            time_frames_to_consider,
                            timesteps - time_frames_to_consider):

                        input_train = X_batch[:, each_timesteps -
                                              time_frames_to_consider:
                                              each_timesteps, :, :, :]
                        input_train = images_to_channels(input_train)

                        output_train = X_batch[:, each_timesteps, :, :, :]

                        disc_summary_real, disc_summary_fake, gen_summary = alternate_disc_gen_training(
                            sess, disc_model, gen_model, input_train,
                            output_train)

                        gen_train_writer.add_summary(gen_summary,
                                                     gen_count_iter)
                        gen_count_iter += 1
                        des_train_writer.add_summary(disc_summary_real,
                                                     des_count_iter)
                        des_count_iter += 1
                        des_train_writer.add_summary(disc_summary_fake,
                                                     des_count_iter)
                        des_count_iter += 1

                    if global_step % checkpoint_iterations == 0:
                        save_model_session(sess, iterations + "gan_model")

                    if global_step % best_model_iterations == 0:
                        val_count_iter, curr_loss = validation(
                            sess, gen_model, data, val_writer, val_count_iter)
                        if curr_loss < val_loss_seen:
                            val_loss_seen = curr_loss
                            save_model_session(sess, best + "gan_model")

                    if global_step % test_model_iterations == 0:
                        test_count_iter = test(sess, gen_model, data,
                                               test_writer, test_count_iter)

                    print("Iteration ", global_step, " best_loss ",
                          val_loss_seen)
                    global_step += 1

            except:
                print("error occur ... skipping ... !")

        train_writer.close()
        test_writer.close()
Beispiel #8
0
def train():
    global best_gdl_l2_loss
    with tf.Session() as sess:
        # conv lstm model
        model = seq2seq_model()
        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # Tensorflow Summary
        train_writer = tf.summary.FileWriter(log_dir_file_path + "train",
                                             sess.graph)
        test_writer = tf.summary.FileWriter(log_dir_file_path + "test",
                                            sess.graph)
        val_writer = tf.summary.FileWriter(log_dir_file_path + "val",
                                           sess.graph)

        global_step = 0
        train_count_iter = 0
        val_count_iter = 0
        test_count_iter = 0
        val_loss_seen = float("inf")

        batch_size, heigth, width = model.batch_size, model.H, model.W
        enc_timesteps = model.enc_timesteps
        dec_timesteps = model.dec_timesteps
        timesteps = model.timesteps

        while True:
            try:
                # data read iterator
                # added one becuase output predict one more frame ahead than input
                data = datasets(batch_size=batch_size,
                                height=heigth,
                                width=width,
                                custom_test_size=custom_test_size,
                                time_frame=timesteps + 1,
                                interval=interval)

                for X_batch, y_batch, _ in data.train_next_batch():
                    # print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
                    if not is_correct_batch_shape(X_batch, y_batch, model,
                                                  model.H, model.W, "train"):
                        # global step not increased !
                        continue

                    input_data = X_batch[:, :timesteps]
                    outputs_exp = X_batch[:, -dec_timesteps:]
                    _, train_summary_merged = sess.run(
                        [model.optimizer, model.train_summary_merged],
                        feed_dict={
                            model.inputs: input_data,
                            model.outputs_exp: outputs_exp
                        })

                    train_writer.add_summary(train_summary_merged,
                                             train_count_iter)
                    train_count_iter += 1

                    if global_step % checkpoint_iterations == 0:
                        save_model_session(sess, iterations + "seq2seq_model")
                    if global_step % best_model_iterations == 0:
                        val_count_iter, curr_loss = validation(
                            sess, model, data, val_writer, val_count_iter)
                        if curr_loss < val_loss_seen:
                            val_loss_seen = curr_loss
                            save_model_session(sess, best + "seq2seq_model")
                    if global_step % test_model_iterations == 0:
                        test_count_iter = test(sess,
                                               model,
                                               data,
                                               test_writer,
                                               test_count_iter,
                                               is_store_output=False)

                    print("Iteration ", global_step, " best_loss ",
                          val_loss_seen)
                    global_step += 1

            except:
                print("Something went wrong !"
                      )  # ignore problems and continue looping ...

        train_writer.close()
        test_writer.close()
        val_writer.close()
Beispiel #9
0
def train():
    global best_loss
    with tf.Session() as sess:

        model = SkipAutoEncoder(heigth_train, width_train, heigth_test,
                                width_test)

        # Initialize the variables (i.e. assign their default value)
        init = tf.global_variables_initializer()
        sess.run(init)

        # clear logs !
        log_directory_creation(sess)

        # summary !
        train_writer = tf.summary.FileWriter(log_dir_file_path + "train",
                                             sess.graph)
        test_writer = tf.summary.FileWriter(log_dir_file_path + "test",
                                            sess.graph)
        val_writer = tf.summary.FileWriter(log_dir_file_path + "val",
                                           sess.graph)

        global_step = 0
        train_count_iter = 0
        val_count_iter = 0
        test_count_iter = 0
        val_loss_seen = float("inf")

        while True:
            try:
                # data read iterator
                data = datasets(batch_size=batch_size,
                                height=heigth,
                                width=width,
                                custom_test_size=custom_test_size,
                                time_frame=timesteps,
                                interval=interval)

                for X_batch, y_batch, _ in data.train_next_batch():
                    # print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
                    if not is_correct_batch_shape(X_batch, y_batch, "train"):
                        # global step not increased !
                        continue
                    for each_timesteps in range(
                            time_frames_to_consider,
                            timesteps - time_frames_to_consider):

                        input_train = X_batch[:, each_timesteps -
                                              time_frames_to_consider:
                                              each_timesteps, :, :, :]
                        input_train = images_to_channels(input_train)

                        output_train = X_batch[:, each_timesteps, :, :, :]

                        _, train_summary = sess.run(
                            [model.step, model.train_summary_merged],
                            feed_dict={
                                model.input_train: input_train,
                                model.output_train: output_train
                            })

                        train_writer.add_summary(train_summary,
                                                 train_count_iter)
                        train_count_iter += 1

                    if global_step % checkpoint_iterations == 0:
                        save_model_session(
                            sess, iterations + "skip_autoencoder_model")

                    if global_step % best_model_iterations == 0:
                        val_count_iter, curr_loss = validation(
                            sess, model, data, val_writer, val_count_iter)
                        if curr_loss < val_loss_seen:
                            val_loss_seen = curr_loss
                            save_model_session(sess,
                                               best + "skip_autoencoder_model")

                    if global_step % test_model_iterations == 0:
                        test_count_iter = test(sess, model, data, test_writer,
                                               test_count_iter)

                    print("Iteration ", global_step, " best_loss ",
                          val_loss_seen)
                    global_step += 1

            except:
                print("error occur ... skipping ... !")

        train_writer.close()
        test_writer.close()
        val_writer.close()