Esempio n. 1
0
def main(argv=None):

    # FLAGS.save_dir += FLAGS.dataset_name
    # FLAGS.gen_frm_dir += FLAGS.dataset_name
    # if tf.io.gfile.exists(FLAGS.save_dir):
    #     tf.io.gfile.rmtree(FLAGS.save_dir)
    # tf.io.gfile.makedirs(FLAGS.save_dir)
    # if tf.io.gfile.exists(FLAGS.gen_frm_dir):
    #     tf.io.gfile.rmtree(FLAGS.gen_frm_dir)
    # tf.io.gfile.makedirs(FLAGS.gen_frm_dir)

    FLAGS.save_dir += FLAGS.dataset_name + str(
        FLAGS.seq_length) + FLAGS.num_hidden
    print(FLAGS.save_dir)
    # FLAGS.best_model = FLAGS.save_dir + '/best.ckpt'
    FLAGS.best_model = FLAGS.save_dir + f'/best_channels{FLAGS.img_channel}.ckpt'
    # FLAGS.best_model = FLAGS.save_dir + f'/best_channels{FLAGS.img_channel}_weighted.ckpt'
    # FLAGS.save_dir += FLAGS.dataset_name
    FLAGS.pretrained_model = FLAGS.save_dir

    process_data_dir = os.path.join(FLAGS.valid_data_paths, FLAGS.dataset_name,
                                    'process_0.5')
    node_pos_file_2in1 = os.path.join(process_data_dir, 'node_pos_0.5.npy')
    node_pos = np.load(node_pos_file_2in1)

    test_data_paths = os.path.join(FLAGS.valid_data_paths, FLAGS.dataset_name,
                                   FLAGS.dataset_name + '_' + FLAGS.mode)
    sub_files = preprocess.list_filenames(test_data_paths, [])

    output_path = f'./Results/predrnn/t{FLAGS.test_time}_{FLAGS.mode}/'
    # output_path = f'./Results/predrnn/t14/'
    preprocess.create_directory_structure(output_path)
    # The following indicies are the start indicies of the 3 images to predict in the 288 time bins (0 to 287)
    # in each daily test file. These are time zone dependent. Berlin lies in UTC+2 whereas Istanbul and Moscow
    # lie in UTC+3.
    utcPlus2 = [30, 69, 126, 186, 234]
    utcPlus3 = [57, 114, 174, 222, 258]
    indicies = utcPlus3
    if FLAGS.dataset_name == 'Berlin':
        indicies = utcPlus2

    print("Initializing models", flush=True)
    model = Model()

    step = 6
    se_total = 0.
    se_1 = 0.
    se_2 = 0.
    se_3 = 0.
    gt_list = []
    pred_list = []
    mavg_list = []
    for f in sub_files:
        with h5py.File(os.path.join(test_data_paths, f), 'r') as h5_file:
            data = h5_file['array'][()]
            # Query the Moving Average Data
            prev_data = [data[y - step:y] for y in indicies]
            prev_data = np.stack(prev_data, axis=0)
            # type casting
            # prev_data = prev_data.astype(np.float32) / 255.0
            # mavg_pred = cast_moving_avg(prev_data)
            # mavg_list.append(mavg_pred)

            # get relevant training data pieces
            data = [
                data[y - FLAGS.input_length:y + FLAGS.seq_length -
                     FLAGS.input_length] for y in indicies
            ]
            data = np.stack(data, axis=0)
            # select the data channel as wished
            data = data[..., :FLAGS.img_channel]

            # all validation data is applied
            # data = np.reshape(data,(-1, FLAGS.seq_length,
            #                     FLAGS.img_height*FLAGS.patch_size_height, FLAGS.img_width*FLAGS.patch_size_width, 3))
            # type casting
            test_dat = data.astype(np.float32) / 255.0
            test_dat = preprocess.reshape_patch(test_dat,
                                                FLAGS.patch_size_width,
                                                FLAGS.patch_size_height)
            batch_size = data.shape[0]
            mask_true = np.zeros(
                (batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
                 FLAGS.img_height, FLAGS.img_width, FLAGS.patch_size_height *
                 FLAGS.patch_size_width * FLAGS.img_channel))
            img_gen = model.test(test_dat, mask_true, batch_size)
            # concat outputs of different gpus along batch
            # img_gen = np.concatenate(img_gen)
            img_gen = img_gen[0]
            img_gen = np.maximum(img_gen, 0)
            img_gen = np.minimum(img_gen, 1)
            img_gen = preprocess.reshape_patch_back(img_gen,
                                                    FLAGS.patch_size_width,
                                                    FLAGS.patch_size_height)
            img_gt = data[:, FLAGS.input_length:, ...].astype(
                np.float32) / 255.0

            gt_list.append(img_gt)
            pred_list.append(img_gen)
            se_total += np.sum((img_gt - img_gen)**2)

            se_1 += np.sum((img_gt[..., 0] - img_gen[..., 0])**2)
            se_2 += np.sum((img_gt[..., 1] - img_gen[..., 1])**2)
            # se_3 += np.sum((img_gt[..., 2] - img_gen[..., 2]) ** 2)

            img_gen = np.uint8(img_gen * 255)
            outfile = os.path.join(output_path, FLAGS.dataset_name,
                                   FLAGS.dataset_name + '_test', f)
            preprocess.write_data(img_gen, outfile)

    # mse = se_total / (len(indicies) * len(sub_files) * 495 * 436 * 3 * 3)
    #
    # mse1 = se_1 / (len(indicies) * len(sub_files) * 495 * 436 * 3)
    # mse2 = se_2 / (len(indicies) * len(sub_files) * 495 * 436 * 3)
    # # mse3 = se_3 / (len(indicies) * len(sub_files) * 495 * 436 * 3)
    # print(FLAGS.dataset_name)
    # print("MSE: ", mse)
    # print("MSE_vol: ", mse1)
    # print("MSE_sp: ", mse2)
    # # print("MSE_hd: ", mse3)
    #
    # pred_list = np.stack(pred_list, axis=0)
    # gt_list = np.stack(gt_list, axis=0)
    # mavg_list = np.stack(mavg_list, axis=0)
    #
    # array_mse = masked_mse_np(mavg_list, gt_list, np.nan)
    # print(f'MAVG {step} MSE: ', array_mse)
    #
    # # adapt pred on non_zero mavg pred only
    # pred_list_copy = np.zeros_like(pred_list)
    # pred_list_copy[mavg_list > 0] = pred_list[mavg_list > 0]
    #
    # array_mse = masked_mse_np(pred_list_copy, gt_list, np.nan)
    # print(f'PRED+MAVG {step} MSE: ', array_mse)
    #
    # # Evaluate on nodes
    # # Check MSE on node_pos
    # img_gt_node = gt_list[:, :, :, node_pos[:, 0], node_pos[:, 1], :].astype(np.float32)
    # img_gen_node = pred_list[:, :, :, node_pos[:, 0], node_pos[:, 1], :].astype(np.float32)
    # mse_node_all = masked_mse_np(img_gen_node, img_gt_node, np.nan)
    # mse_node_volume = masked_mse_np(img_gen_node[..., 0], img_gt_node[..., 0], np.nan)
    # mse_node_speed = masked_mse_np(img_gen_node[..., 1], img_gt_node[..., 1], np.nan)
    # mse_node_direction = masked_mse_np(img_gen_node[..., 2], img_gt_node[..., 2], np.nan)
    # print("Results on Node Pos: ")
    # print("MSE: ", mse_node_all)
    # print("Volume mse: ", mse_node_volume)
    # print("Speed mse: ", mse_node_speed)
    # print("Direction mse: ", mse_node_direction)
    #
    # print("Evaluating on Condensed Graph....")
    # seq_length = np.shape(gt_list)[2]
    # img_height = np.shape(gt_list)[3]
    # img_width = np.shape(gt_list)[4]
    # num_channels = np.shape(gt_list)[5]
    # gt_list = np.reshape(gt_list, [-1, seq_length,
    #                             int(img_height / FLAGS.patch_size_height), FLAGS.patch_size_height,
    #                             int(img_width / FLAGS.patch_size_width), FLAGS.patch_size_width,
    #                             num_channels])
    # gt_list = np.transpose(gt_list, [0, 1, 2, 4, 3, 5, 6])
    #
    # pred_list = np.reshape(pred_list, [-1, seq_length,
    #                                int(img_height / FLAGS.patch_size_height), FLAGS.patch_size_height,
    #                                int(img_width / FLAGS.patch_size_width), FLAGS.patch_size_width,
    #                                num_channels])
    # pred_list = np.transpose(pred_list, [0, 1, 2, 4, 3, 5, 6])
    #
    # node_pos = preprocess.construct_road_network_from_grid_condense(FLAGS.patch_size_height, FLAGS.patch_size_width,
    #                                                                 test_data_paths)
    #
    # img_gt_node = gt_list[:, :, node_pos[:, 0], node_pos[:, 1], ...].astype(np.float32)
    # img_gen_node = pred_list[:, :, node_pos[:, 0], node_pos[:, 1], ...].astype(np.float32)
    # mse_node_all = masked_mse_np(img_gen_node, img_gt_node, np.nan)
    # mse_node_volume = masked_mse_np(img_gen_node[..., 0], img_gt_node[..., 0], np.nan)
    # mse_node_speed = masked_mse_np(img_gen_node[..., 1], img_gt_node[..., 1], np.nan)
    # mse_node_direction = masked_mse_np(img_gen_node[..., 2], img_gt_node[..., 2], np.nan)
    # print("MSE: ", mse_node_all)
    # print("Volume mse: ", mse_node_volume)
    # print("Speed mse: ", mse_node_speed)
    # print("Direction mse: ", mse_node_direction)

    print("Finished...")
Esempio n. 2
0
def main(argv=None):
    if tf.gfile.Exists(FLAGS.save_dir):
        tf.gfile.DeleteRecursively(FLAGS.save_dir)
    tf.gfile.MakeDirs(FLAGS.save_dir)
    if tf.gfile.Exists(FLAGS.gen_frm_dir):
        tf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)
    tf.gfile.MakeDirs(FLAGS.gen_frm_dir)

    train_data_paths = os.path.join(
        FLAGS.train_data_paths, FLAGS.dataset_name,
        'train_speed_down_sample{}.npz'.format(FLAGS.down_sample))
    valid_data_paths = os.path.join(
        FLAGS.valid_data_paths, FLAGS.dataset_name,
        'valid_speed_down_sample{}.npz'.format(FLAGS.down_sample))
    # load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name, train_data_paths, valid_data_paths,
        FLAGS.batch_size, True, FLAGS.down_sample, FLAGS.input_length,
        FLAGS.seq_length - FLAGS.input_length)

    cities = ['Berlin', 'Istanbul', 'Moscow']
    # The following indicies are the start indicies of the 3 images to predict in the 288 time bins (0 to 287)
    # in each daily test file. These are time zone dependent. Berlin lies in UTC+2 whereas Istanbul and Moscow
    # lie in UTC+3.
    utcPlus2 = [30, 69, 126, 186, 234]
    utcPlus3 = [57, 114, 174, 222, 258]
    indicies = utcPlus3
    if FLAGS.dataset_name == 'Berlin':
        indicies = utcPlus2

    dims = train_input_handle.dims
    FLAGS.img_height = dims[-2]
    FLAGS.img_width = dims[-1]
    print("Initializing models", flush=True)
    model = Model()
    lr = FLAGS.lr

    delta = 0.00002
    base = 0.99998
    eta = 1

    for itr in range(1, FLAGS.max_iterations + 1):
        if train_input_handle.no_batch_left():
            train_input_handle.begin(do_shuffle=True)
        ims = train_input_handle.get_batch()
        ims = preprocess.reshape_patch(ims, FLAGS.patch_size)

        if itr < 50000:
            eta -= delta
        else:
            eta = 0.0
        random_flip = np.random.random_sample(
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1))
        true_token = (random_flip < eta)
        #true_token = (random_flip < pow(base,itr))
        ones = np.ones((FLAGS.img_height, FLAGS.img_width,
                        int(FLAGS.patch_size**2 * FLAGS.img_channel)))
        zeros = np.zeros((int(FLAGS.img_height), int(FLAGS.img_width),
                          int(FLAGS.patch_size**2 * FLAGS.img_channel)))
        mask_true = []
        for i in range(FLAGS.batch_size):
            for j in range(FLAGS.seq_length - FLAGS.input_length - 1):
                if true_token[i, j]:
                    mask_true.append(ones)
                else:
                    mask_true.append(zeros)
        mask_true = np.array(mask_true)
        mask_true = np.reshape(
            mask_true,
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
             int(FLAGS.img_height), int(FLAGS.img_width),
             int(FLAGS.patch_size**2 * FLAGS.img_channel)))
        cost = model.train(ims, lr, mask_true)
        if FLAGS.reverse_input:
            ims_rev = ims[:, ::-1]
            cost += model.train(ims_rev, lr, mask_true)
            cost = cost / 2

        if itr % FLAGS.display_interval == 0:
            print('itr: ' + str(itr), flush=True)
            print('training loss: ' + str(cost), flush=True)

        if itr % FLAGS.test_interval == 0:
            print('test...', flush=True)
            test_input_handle.begin(do_shuffle=False)
            res_path = os.path.join(FLAGS.gen_frm_dir, str(itr))
            os.mkdir(res_path)
            avg_mse = 0
            batch_id = 0
            img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                img_mse.append(0)
                ssim.append(0)
                psnr.append(0)
                fmae.append(0)
                sharp.append(0)
            mask_true = np.zeros(
                (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
                 FLAGS.img_height, FLAGS.img_width,
                 FLAGS.patch_size**2 * FLAGS.img_channel))
            while (test_input_handle.no_batch_left() == False):
                batch_id = batch_id + 1
                test_ims = test_input_handle.get_batch()
                test_dat = preprocess.reshape_patch(test_ims, FLAGS.patch_size)
                img_gen = model.test(test_dat, mask_true)

                # concat outputs of different gpus along batch
                img_gen = np.concatenate(img_gen)
                img_gen = preprocess.reshape_patch_back(
                    img_gen, FLAGS.patch_size)
                # MSE per frame
                for i in range(FLAGS.seq_length - FLAGS.input_length):
                    x = test_ims[:, i + FLAGS.input_length, :, :, 0]
                    gx = img_gen[:, i, :, :, 0]
                    fmae[i] += metrics.batch_mae_frame_float(gx, x)
                    gx = np.maximum(gx, 0)
                    gx = np.minimum(gx, 1)
                    mse = np.square(x - gx).sum()
                    img_mse[i] += mse
                    avg_mse += mse

                    real_frm = np.uint8(x * 255)
                    pred_frm = np.uint8(gx * 255)
                    psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
                    for b in range(FLAGS.batch_size):
                        sharp[i] += np.max(
                            cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                        # score, _ = compare_ssim(pred_frm[b],real_frm[b],full=True)
                        # ssim[i] += score

                # save prediction examples
                if batch_id <= 10:
                    path = os.path.join(res_path, str(batch_id))
                    os.mkdir(path)
                    for i in range(FLAGS.seq_length):
                        name = 'gt' + str(i + 1) + '.png'
                        file_name = os.path.join(path, name)
                        img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
                        cv2.imwrite(file_name, img_gt)
                    for i in range(FLAGS.seq_length - FLAGS.input_length):
                        name = 'pd' + str(i + 1 + FLAGS.input_length) + '.png'
                        file_name = os.path.join(path, name)
                        img_pd = img_gen[0, i, :, :, :]
                        img_pd = np.maximum(img_pd, 0)
                        img_pd = np.minimum(img_pd, 1)
                        img_pd = np.uint8(img_pd * 255)
                        cv2.imwrite(file_name, img_pd)
                test_input_handle.next()
            avg_mse = avg_mse / (batch_id * FLAGS.batch_size)
            print('mse per seq: ' + str(avg_mse), flush=True)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(img_mse[i] / (batch_id * FLAGS.batch_size))
            psnr = np.asarray(psnr, dtype=np.float32) / batch_id
            fmae = np.asarray(fmae, dtype=np.float32) / batch_id
            sharp = np.asarray(
                sharp, dtype=np.float32) / (FLAGS.batch_size * batch_id)
            print('psnr per frame: ' + str(np.mean(psnr)), flush=True)
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(psnr[i], flush=True)
            print('fmae per frame: ' + str(np.mean(fmae)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(fmae[i], flush=True)
            print('sharpness per frame: ' + str(np.mean(sharp)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(sharp[i], flush=True)

            # test with file
            valid_data_path = os.path.join(
                FLAGS.train_data_paths, FLAGS.dataset_name,
                '{}_validation'.format(FLAGS.dataset_name))
            files = list_filenames(valid_data_path)
            output_all = []
            labels_all = []
            for f in files:
                valid_file = valid_data_path + '/' + f
                valid_input, raw_output = datasets_factory.test_validation_provider(
                    valid_file,
                    indicies,
                    down_sample=FLAGS.down_sample,
                    seq_len=FLAGS.input_length,
                    horizon=FLAGS.seq_length - FLAGS.input_length)
                valid_input = valid_input.astype(np.float) / 255.0
                labels_all.append(raw_output)
                num_tests = len(indicies)
                num_partitions = int(np.ceil(num_tests / FLAGS.batch_size))
                for i in range(num_partitions):
                    valid_input_i = valid_input[i * FLAGS.batch_size:(i + 1) *
                                                FLAGS.batch_size]
                    num_input_i = valid_input_i.shape[0]
                    if num_input_i < FLAGS.batch_size:
                        zeros_fill_in = np.zeros(
                            (FLAGS.batch_size - num_input_i, FLAGS.seq_length,
                             FLAGS.img_height, FLAGS.img_width,
                             FLAGS.img_channel))
                        valid_input_i = np.concatenate(
                            [valid_input_i, zeros_fill_in], axis=0)
                    img_gen = model.test(valid_input_i, mask_true)
                    output_all.append(img_gen[0][:num_input_i])

            output_all = np.concatenate(output_all, axis=0)
            labels_all = np.concatenate(labels_all, axis=0)
            origin_height = labels_all.shape[-2]
            origin_width = labels_all.shape[-3]
            output_resize = []
            for i in range(output_all.shape[0]):
                output_i = []
                for j in range(output_all.shape[1]):
                    tmp_data = output_all[i, j, 1, :, :]
                    tmp_data = cv2.resize(tmp_data,
                                          (origin_height, origin_width))
                    tmp_data = np.expand_dims(tmp_data, axis=0)
                    output_i.append(tmp_data)
                output_i = np.stack(output_i, axis=0)
                output_resize.append(output_i)
            output_resize = np.stack(output_resize, axis=0)

            output_resize *= 255.0
            labels_all = np.expand_dims(labels_all[..., 1], axis=2)
            valid_mse = masked_mse_np(output_resize, labels_all, np.nan)

            print("validation mse is ", valid_mse, flush=True)

        if itr % FLAGS.snapshot_interval == 0:
            model.save(itr)

        train_input_handle.next()
Esempio n. 3
0
def main(argv=None):

    # FLAGS.save_dir += FLAGS.dataset_name + str(FLAGS.seq_length) + FLAGS.num_hidden + 'squash'
    heading_dict = {1: 1, 2: 85, 3: 170, 4: 255, 0: 0}
    heading = FLAGS.heading
    loss_func = 'L1+L2+VALID'
    # FLAGS.save_dir += FLAGS.dataset_name + str(FLAGS.seq_length) + FLAGS.num_hidden + 'squash' + loss_func + str(
    #     heading)
    # FLAGS.pretrained_model = FLAGS.save_dir

    test_data_paths = os.path.join(FLAGS.valid_data_paths, FLAGS.dataset_name,
                                   FLAGS.dataset_name + '_' + FLAGS.mode)
    sub_files = preprocess.list_filenames(test_data_paths, [])

    output_path = f'../Results/t{FLAGS.test_time}_{FLAGS.mode}/{FLAGS.loss_func}'
    # output_path = f'./Results/predrnn/t14/'
    # preprocess.create_directory_structure(output_path)

    # The following indicies are the start indicies of the 3 images to predict in the 288 time bins (0 to 287)
    # in each daily test file. These are time zone dependent. Berlin lies in UTC+2 whereas Istanbul and Moscow
    # lie in UTC+3.
    utcPlus2 = [30, 69, 126, 186, 234]
    utcPlus3 = [57, 114, 174, 222, 258]
    heading_table = np.array([[0, 0], [-1, 1], [1, 1], [-1, -1], [1, -1]],
                             dtype=np.float32)

    indicies = utcPlus3
    if FLAGS.dataset_name == 'Berlin':
        indicies = utcPlus2

    epsilon = 0.2 * 255
    capsule_combine_list = []
    gt_list = []
    mavg_list = []
    for f in sub_files:
        with h5py.File(os.path.join(test_data_paths, f), 'r') as h5_file:
            data = h5_file['array'][()]
            # get relevant training data pieces
            data = [
                data[y - FLAGS.input_length:y + FLAGS.seq_length -
                     FLAGS.input_length] for y in indicies
            ]
            test_ims = np.stack(data, axis=0)
            gt = test_ims[:, FLAGS.input_length:, :, :, :]
            capsule_combine = np.zeros_like(
                test_ims[:, FLAGS.input_length:, :, :, :2], dtype=np.float32)

            mavg_results = cast_moving_avg(test_ims[:, :FLAGS.input_length,
                                                    ...])
            for heading_i in range(1, 5):
                outfile = os.path.join(output_path, FLAGS.dataset_name,
                                       FLAGS.dataset_name + '_test',
                                       f'{heading_i}' + f)
                with h5py.File(outfile, 'r') as heading_file:
                    capsule_i = heading_file['array'][()]
                    capsule_combine += capsule_i

                    gt_head_pos = gt[..., 2] == heading_dict[heading_i]
                    gt_head = np.zeros_like(gt)
                    gt_head[gt_head_pos] = gt[gt_head_pos]

                    val_results_speed = np.sqrt(capsule_i[..., 0]**2 +
                                                capsule_i[..., 1]**2) * 255.0

                    # print("val speed: ", val_results_speed, flush=True)
                    val_results_heading = np.zeros_like(capsule_i[..., 1])
                    val_results_heading[(capsule_i[..., 0] > 0)
                                        & (capsule_i[..., 1] > 0)] = 85.0
                    val_results_heading[(capsule_i[..., 0] > 0)
                                        & (capsule_i[..., 1] < 0)] = 255.0
                    val_results_heading[(capsule_i[..., 0] < 0)
                                        & (capsule_i[..., 1] < 0)] = 170.0
                    val_results_heading[(capsule_i[..., 0] < 0)
                                        & (capsule_i[..., 1] > 0)] = 1.0

                    # val_results_heading[mavg_results[:, :, :, :, 1] < epsilon] = \
                    #     mavg_results[:, :, :, :, 2][mavg_results[:, :, :, :, 1] < epsilon]

                    gen_speed_heading = np.stack(
                        [val_results_speed, val_results_heading], axis=-1)

                    print("Heading ", heading_i)
                    print("MSE on all pixels")
                    mse = masked_mse_np(gen_speed_heading,
                                        gt_head[..., 1:],
                                        null_val=np.nan)
                    speed_mse = masked_mse_np(gen_speed_heading[..., 0],
                                              gt_head[..., 1],
                                              null_val=np.nan)
                    direction_mse = masked_mse_np(gen_speed_heading[..., 1],
                                                  gt_head[..., 2],
                                                  null_val=np.nan)
                    print("The output mse is ", mse / 255**2)
                    print("The speed mse is ", speed_mse / 255**2)
                    print("The direction mse is ", direction_mse / 255**2)

                    print("MSE on heading POS")
                    mse = masked_mse_np(gen_speed_heading,
                                        gt_head[..., 1:],
                                        null_val=0.0)
                    speed_mse = masked_mse_np(gen_speed_heading[..., 0],
                                              gt_head[..., 1],
                                              null_val=0.0)
                    direction_mse = masked_mse_np(gen_speed_heading[..., 1],
                                                  gt_head[..., 2],
                                                  null_val=0.0)
                    print("The output mse is ", mse / 255**2)
                    print("The speed mse is ", speed_mse / 255**2)
                    print("The direction mse is ", direction_mse / 255**2)

            mavg_list.append(mavg_results)
            gt_list.append(gt)
            capsule_combine_list.append(capsule_combine)

    gt_list = np.stack(gt_list, axis=0)
    capsule_combine_list = np.stack(capsule_combine_list, axis=0)
    mavg_list = np.stack(mavg_list, axis=0)

    # print("img_gen shape is ", gx.shape)
    val_results_speed = np.sqrt(capsule_combine_list[..., 0]**2 +
                                capsule_combine_list[..., 1]**2) * 255.0
    # print("val speed: ", val_results_speed, flush=True)
    val_results_heading = np.zeros_like(capsule_combine_list[..., 1])
    val_results_heading[(capsule_combine_list[..., 0] > 0)
                        & (capsule_combine_list[..., 1] > 0)] = 85.0
    val_results_heading[(capsule_combine_list[..., 0] > 0)
                        & (capsule_combine_list[..., 1] < 0)] = 255.0
    val_results_heading[(capsule_combine_list[..., 0] < 0)
                        & (capsule_combine_list[..., 1] < 0)] = 170.0
    val_results_heading[(capsule_combine_list[..., 0] < 0)
                        & (capsule_combine_list[..., 1] > 0)] = 1.0
    gen_speed_heading = np.stack([val_results_speed, val_results_heading],
                                 axis=-1)

    #
    # print("Evaluate on every pixels....")
    speed_threshold = mavg_list[..., 1] < 200

    gen_speed_heading[..., 1][speed_threshold] = mavg_list[...,
                                                           2][speed_threshold]
    mse = masked_mse_np(gen_speed_heading, gt_list[..., 1:], null_val=np.nan)
    speed_mse = masked_mse_np(gen_speed_heading[..., 0],
                              gt_list[..., 1],
                              null_val=np.nan)
    direction_mse = masked_mse_np(gen_speed_heading[..., 1],
                                  gt_list[..., 2],
                                  null_val=np.nan)
    print("The output mse is ", mse / 255**2)
    print("The speed mse is ", speed_mse / 255**2)
    print("The direction mse is ", direction_mse / 255**2)

    # print("Evaluate on valid pixels for Transformation...")
    print("Moving Avg is ")
    mse = masked_mse_np(mavg_list, gt_list, null_val=np.nan)
    speed_mse = masked_mse_np(mavg_list[..., 1],
                              gt_list[..., 1],
                              null_val=np.nan)
    direction_mse = masked_mse_np(mavg_list[..., 2],
                                  gt_list[..., 2],
                                  null_val=np.nan)
    print("The output mse is ", mse / 255**2)
    print("The speed mse is ", speed_mse / 255**2)
    print("The direction mse is ", direction_mse / 255**2)
Esempio n. 4
0
    def preprocessing(self, down_sample=1, seq_len=12, horizon=3):

        current_dir = os.path.dirname(self.paths[0])
        raw_data_dir = os.path.join(current_dir, self.name)
        files = list_filenames(raw_data_dir)
        num_days = len(files)
        time_slots = 288
        input_raw_data = [[], [], []]
        for f in files:
            try:
                fr = h5py.File(raw_data_dir + '/' + f, 'r')
                data = fr.get('array').value
                fr.close()
            except:
                continue
            data = np.transpose(data, (0, 3, 1, 2))
            for j in range(data.shape[1]):
                for i in range(data.shape[0]):
                    tmp_data = data[i, j, :, :]
                    n_rows, n_cols = tmp_data.shape
                    # down sample the image
                    tmp_data = cv2.resize(
                        tmp_data,
                        (n_cols // down_sample, n_rows // down_sample))
                    input_raw_data[j].append(tmp_data)

        input_raw_data_channel_1 = np.stack(input_raw_data[0],
                                            axis=0)  # volume
        input_raw_data_channel_2 = np.stack(input_raw_data[1], axis=0)  # speed
        input_raw_data_channel_3 = np.stack(input_raw_data[2],
                                            axis=0)  # heading

        # expand dims on axis1
        input_raw_data_channel_1 = np.expand_dims(input_raw_data_channel_1,
                                                  axis=1)
        input_raw_data_channel_2 = np.expand_dims(input_raw_data_channel_2,
                                                  axis=1)
        input_raw_data_channel_3 = np.expand_dims(input_raw_data_channel_3,
                                                  axis=1)

        resulted_cols = input_raw_data_channel_1.shape[-1]
        resulted_rows = input_raw_data_channel_1.shape[-2]
        dims = np.array([[1, resulted_rows, resulted_cols]], np.int)

        # construct clips
        num_batches_one_day = time_slots // (seq_len + horizon)
        input_starts = np.arange(0, time_slots, (seq_len + horizon),
                                 np.int)[:num_batches_one_day]
        output_starts = np.arange(seq_len, time_slots, (seq_len + horizon),
                                  np.int)
        day_index = np.repeat(np.arange(num_days),
                              num_batches_one_day) * time_slots
        input_starts = np.tile(input_starts, num_days) + day_index
        output_starts = np.tile(output_starts, num_days) + day_index

        input_seq_lens = np.array([seq_len] * (num_days * num_batches_one_day),
                                  np.int)
        output_horizons = np.array([horizon] *
                                   (num_days * num_batches_one_day), np.int)
        input_clips = np.stack([input_starts, input_seq_lens], axis=1)
        output_clips = np.stack([output_starts, output_horizons], axis=1)
        clips = np.stack([input_clips, output_clips], axis=0)

        # save different files
        volume_path = current_dir + '/{}_volume_down_sample{}.npz'.format(
            self.mode, down_sample)
        speed_path = current_dir + '/{}_speed_down_sample{}.npz'.format(
            self.mode, down_sample)
        heading_path = current_dir + '/{}_heading_down_sample{}.npz'.format(
            self.mode, down_sample)
        np.savez(volume_path,
                 dims=dims,
                 clips=clips,
                 input_raw_data=input_raw_data_channel_1)
        np.savez(speed_path,
                 dims=dims,
                 clips=clips,
                 input_raw_data=input_raw_data_channel_2)
        np.savez(heading_path,
                 dims=dims,
                 clips=clips,
                 input_raw_data=input_raw_data_channel_3)
Esempio n. 5
0
def main(argv=None):

    # FLAGS.save_dir += FLAGS.dataset_name + str(FLAGS.seq_length) + FLAGS.num_hidden + 'squash'
    heading_dict = {1: 1, 2: 85, 3: 170, 4: 255, 0: 0}
    heading = FLAGS.heading
    loss_func = FLAGS.loss_func
    FLAGS.save_dir += FLAGS.dataset_name + str(
        FLAGS.seq_length
    ) + FLAGS.num_hidden + 'squash' + FLAGS.loss_func + str(heading)
    FLAGS.pretrained_model = FLAGS.save_dir

    test_data_paths = os.path.join(FLAGS.valid_data_paths, FLAGS.dataset_name,
                                   FLAGS.dataset_name + '_' + FLAGS.mode)
    sub_files = preprocess.list_filenames(test_data_paths, [])

    output_path = f'./Results/predrnn/t{FLAGS.test_time}_{FLAGS.mode}/{FLAGS.loss_func}'
    # output_path = f'./Results/predrnn/t14/'
    preprocess.create_directory_structure(output_path)

    # The following indicies are the start indicies of the 3 images to predict in the 288 time bins (0 to 287)
    # in each daily test file. These are time zone dependent. Berlin lies in UTC+2 whereas Istanbul and Moscow
    # lie in UTC+3.
    utcPlus2 = [30, 69, 126, 186, 234]
    utcPlus3 = [57, 114, 174, 222, 258]
    heading_table = np.array([[0, 0], [-1, 1], [1, 1], [-1, -1], [1, -1]],
                             dtype=np.float32)

    indicies = utcPlus3
    if FLAGS.dataset_name == 'Berlin':
        indicies = utcPlus2

    # dims = train_input_handle.dims
    print("Initializing models", flush=True)
    model = Model()

    avg_mse = 0
    gt_list = []
    pred_list = []
    pred_list_all = []
    pred_vec = []
    move_avg = []

    for f in sub_files:
        with h5py.File(os.path.join(test_data_paths, f), 'r') as h5_file:
            data = h5_file['array'][()]
            # get relevant training data pieces
            data = [
                data[y - FLAGS.input_length:y + FLAGS.seq_length -
                     FLAGS.input_length] for y in indicies
            ]
            test_ims = np.stack(data, axis=0)
            batch_size = len(indicies)
            gt_list.append(test_ims[:, FLAGS.input_length:, :, :, 1:])

            tem_data = test_ims.copy()
            heading_image = test_ims[:, :, :, :, 2]
            heading_image = (heading_image // 85).astype(np.int8) + 1
            heading_image[tem_data[:, :, :, :, 2] == 0] = 0

            # convert the data into speed vectors
            heading_selected = np.zeros_like(heading_image, np.int8)
            heading_selected[heading_image == heading] = heading
            heading_image = heading_selected
            heading_image = heading_table[heading_image]
            speed_on_axis = np.expand_dims(
                test_ims[:, :, :, :, 1].astype(np.float32) / 255.0 /
                np.sqrt(2),
                axis=-1)
            test_ims = speed_on_axis * heading_image

            test_dat = preprocess.reshape_patch(test_ims,
                                                FLAGS.patch_size_width,
                                                FLAGS.patch_size_height)

            mask_true = np.zeros(
                (batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
                 FLAGS.img_height, FLAGS.img_width, FLAGS.patch_size_height *
                 FLAGS.patch_size_width * FLAGS.img_channel))

            img_gen = model.test(test_dat, mask_true, batch_size)

            # concat outputs of different gpus along batch
            img_gen = np.concatenate(img_gen)
            img_gen = preprocess.reshape_patch_back(img_gen,
                                                    FLAGS.patch_size_width,
                                                    FLAGS.patch_size_height)

            outfile = os.path.join(output_path, FLAGS.dataset_name,
                                   FLAGS.dataset_name + '_test',
                                   f'{heading}' + f)
            preprocess.write_data(img_gen, outfile)