コード例 #1
0
def main():
    args = configs()
    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
        args.summary_path = os.path.join(args.summary_path, args.training_instance)
    else:
        args.load_path = os.path.join(args.load_path,
                                      "evflownet_{}".format(datetime.now()
                                                            .strftime("%m%d_%H%M%S")))
        args.summary_path = os.path.join(args.summary_path,
                                         "evflownet_{}".format(datetime.now()
                                                               .strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)
    if not os.path.exists(args.summary_path):
        os.makedirs(args.summary_path)

    # Fix the random seed for reproducibility.
    # Remove this if you are using this code for something else!
    tf.set_random_seed(12345)
        
    event_img_loader, prev_img_loader, next_img_loader, _, n_ima = get_loader(
        args.data_path, args.batch_size, args.image_width, args.image_height,
        split='train',
        shuffle=True)
    print("Number of images: {}".format(n_ima))
    
    trainer = EVFlowNet(args,
                        event_img_loader,
                        prev_img_loader,
                        next_img_loader,
                        n_ima,
                        is_training=True)
    trainer.train()
コード例 #2
0
 def __init__(self, data_folder_path, split, count_only=False, time_only=False, skip_frames=False):
     self._data_folder_path = data_folder_path
     self._split = split
     self._count_only = count_only
     self._time_only = time_only
     self._skip_frames = skip_frames
     self.args = configs()
     self.event_data_paths, self.n_ima = self.read_file_paths(self._data_folder_path, self._split)
コード例 #3
0
ファイル: clean_data.py プロジェクト: Keyspan/exploring_nlp
Created on Tue Aug 15 20:42:42 2017

@author: py
"""

import nltk

from nltk.tokenize import ToktokTokenizer

import csv

import itertools

from config import configs

config = configs()

train_size = config.train_size


class clean_data():
    def __init__(self):

        data_path = config.data_path

        ratio = config.freq_ratio

        start_vocabs = config.start_vocabs

        self.buckets = config.buckets
コード例 #4
0
def main():
    args = configs()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    log_interval = args.logging_interval

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(
            args.load_path,
            "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)

    # TODO: remove this part
    voxel_method = {
        'method': 'k_events',
        'k': 20000,
        't': 0.5,
        'sliding_window_w': 20000,
        'sliding_window_t': 0.1
    }

    print("Load Data Begin. ")
    gt_path = "/mnt/Data3/mvsec/data/outdoor_day1/outdoor_day1_gt.hdf5"  # outdoor2

    # h5Dataset = DynamicH5Dataset('data/office.h5', voxel_method=voxel_method)
    h5Dataset = DynamicH5Dataset('data/office_spiral.h5',
                                 voxel_method=voxel_method)
    # h5Dataset = DynamicH5Dataset('data/outdoor_day2_data.h5', voxel_method=voxel_method)
    h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset,
                                               batch_size=1,
                                               num_workers=6,
                                               shuffle=False)
    camIntrinsic = np.array([[223.9940010790056, 0, 170.7684322973841],
                             [0, 223.61783486959376, 128.18711828338436],
                             [0, 0, 1]])
    predict_camera_frame = []
    gt_interpolated = []

    # model
    print("Load Model Begin. ")
    EVFlowNet_model = EVFlowNet(args).to(device)
    EVFlowNet_model.load_state_dict(
        torch.load('data/model/evflownet_0922_032701_office_spiral/model1'))

    # optimizer
    optimizer = torch.optim.Adam(EVFlowNet_model.parameters(),
                                 lr=args.initial_learning_rate,
                                 weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(
        optimizer=optimizer, gamma=args.learning_rate_decay)
    loss_fun = TotalLoss(args.smoothness_weight, args.photometric_loss_weight)

    EVFlowNet_model.eval()
    print("Start Evaluation. ")
    for iteration, item in enumerate(tqdm(h5DataLoader)):

        # if iteration < 100:
        #     continue
        if iteration > 500:
            break

        voxel = item['voxel'].to(device)
        events = item['events'].to(device)
        frame = item['frame'].to(device)
        frame_ = item['frame_'].to(device)
        num_events = item['num_events'].to(device)
        flow_dict = EVFlowNet_model(voxel)

        sensor_size = (176, 240)
        image_name = "results/img_{:07d}.png".format(iteration)

        events_vis = events[0].detach().cpu()
        flow_vis = flow_dict["flow3"][0].detach().cpu()

        # Compose the event image and warp the event image with flow
        ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(
            events_vis, flow_vis, sensor_size)

        start_t = item['timestamp_begin'].cpu().numpy()[0]
        end_t = item['timestamp_end'].cpu().numpy()[0]

        # Convert to numpy format
        ev_img_raw = torch_to_numpy(ev_img[0])
        ev_img_bgn = torch_to_numpy(ev_img[1])
        ev_img_end = torch_to_numpy(ev_img[2])
        ev_bgn_xs = torch_to_numpy(ev_bgn[0])
        ev_bgn_ys = torch_to_numpy(ev_bgn[1])
        ev_end_xs = torch_to_numpy(ev_end[0])
        ev_end_ys = torch_to_numpy(ev_end[1])

        timestamps_before = torch_to_numpy(timestamps[0])
        timestamps_after = torch_to_numpy(timestamps[1])
        frame_vis = torch_to_numpy(item['frame'][0])
        frame_vis_ = torch_to_numpy(item['frame_'][0])
        flow_vis = torch_to_numpy(flow_dict["flow3"][0])

        p1 = np.dstack([ev_bgn_xs, ev_bgn_ys]).squeeze()
        p2 = np.dstack([ev_end_xs, ev_end_ys]).squeeze()

        E, mask = cv2.findEssentialMat(p1,
                                       p2,
                                       cameraMatrix=camIntrinsic,
                                       method=cv2.RANSAC,
                                       prob=0.999,
                                       threshold=1.5)
        points, R, t, mask = cv2.recoverPose(E, p1, p2, mask=mask)

        S = np.eye(4)
        S[0:3, 0:3] = R
        S[0:3, 3] = np.squeeze(t)
        predict_camera_frame.append(S)

        cvshow_all_eval(ev_img_raw, ev_img_bgn, ev_img_end, (ev_bgn_xs, ev_bgn_ys), \
            (ev_end_xs, ev_end_ys), timestamps_before, timestamps_after, frame_vis, \
            frame_vis_, flow_vis, image_name, sensor_size)

    # gt_interpolated = np.array(gt_interpolated)
    # gt_interpolated = relative_to_absolute_pose(gt_interpolated)

    predict_camera_frame = np.array(predict_camera_frame)
    predict_world_frame = relative_to_absolute_pose(predict_camera_frame)

    # gt_interpolated = np.array(gt_interpolated)
    # visualize_trajectory(gt_interpolated)
    # relative_pose_error(gt_interpolated, predict_camera_frame)
    visualize_trajectory(predict_world_frame)
コード例 #5
0
        values_batch = tf.train.batch([
            events, n_events, event_image, prev_image, next_image, timestamps
        ],
                                      num_threads=4,
                                      batch_size=batch_size,
                                      capacity=10 * batch_size,
                                      dynamic_pad=True)

        return (values_batch[0], values_batch[1], values_batch[2],
                values_batch[3], values_batch[4], values_batch[5], n_ima)


if __name__ == "__main__":
    from config import configs

    args = configs()
    with tf.Session() as sess:

        results = get_loader(args.data_path,
                             args.batch_size,
                             args.image_width,
                             args.image_height,
                             split='test',
                             shuffle=False,
                             rotation=False,
                             sequence=args.sequences,
                             num_epochs=1)
        events, lengths, event_image = results[:3]
        timestamps = results[5]

        sess.run(tf.global_variables_initializer())
コード例 #6
0
def main():
    args = configs()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    log_interval = args.logging_interval

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(
            args.load_path,
            "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)

    # TODO: remove this part
    voxel_method = {
        'method': 'k_events',
        'k': 60000,
        't': 0.5,
        'sliding_window_w': 2500,
        'sliding_window_t': 0.1
    }

    # EventDataset = EventData(args.data_path, 'train')
    # EventDataLoader = torch.utils.data.DataLoader(dataset=EventDataset, batch_size=args.batch_size, shuffle=True)

    # h5Dataset = DynamicH5Dataset('data/office.h5', voxel_method=voxel_method)
    h5Dataset = DynamicH5Dataset('data/outdoor_day1_data.h5',
                                 voxel_method=voxel_method)
    # h5Dataset = DynamicH5Dataset('data/office_spiral.h5', voxel_method=voxel_method)
    # h5Dataset = DynamicH5Dataset('data/indoor_flying1_data.h5', voxel_method=voxel_method)
    h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset,
                                               batch_size=6,
                                               num_workers=6,
                                               shuffle=True)

    # model
    EVFlowNet_model = EVFlowNet(args).to(device)
    # EVFlowNet_model.load_state_dict(torch.load('data/saver/evflownet_0906_041812_outdoor_dataset1/model1'))

    # optimizer
    optimizer = torch.optim.Adam(EVFlowNet_model.parameters(),
                                 lr=args.initial_learning_rate,
                                 weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(
        optimizer=optimizer, gamma=args.learning_rate_decay)
    loss_fun = TotalLoss(args.smoothness_weight, args.photometric_loss_weight)

    EVFlowNet_model.train()
    for epoch in range(100):
        total_loss = 0.0
        running_loss = 0.0
        print(f'****************** epoch: {epoch} ******************')

        for iteration, item in enumerate(tqdm(h5DataLoader)):

            voxel = item['voxel'].to(device)
            events = item['events'].to(device)
            frame = item['frame'].to(device)
            frame_ = item['frame_'].to(device)
            num_events = item['num_events'].to(device)

            optimizer.zero_grad()
            flow_dict = EVFlowNet_model(voxel)
            loss, ev_loss, smooth_loss, ph_loss = loss_fun(
                flow_dict, events, frame, frame_, num_events, EVFlowNet_model)

            if iteration % log_interval == 0:
                print(
                    f'iteration: {iteration} avg loss: {running_loss//log_interval} event loss: {int(ev_loss)} smooth loss: {int(smooth_loss)}, photo loss: {int(ph_loss)}'
                )
                running_loss = 0.0
                # sensor_size = (176, 240)
                sensor_size = (256, 336)
                image_name = "results/img_{:03}_{:07d}.png".format(
                    epoch, iteration)

                events_vis = events[0].detach().cpu()
                flow_vis = flow_dict["flow3"][0].detach().cpu()

                # Compose the event image and warp the event image with flow
                ev_img, ev_img_ = warp_events_with_flow_torch(
                    events_vis, flow_vis, sensor_size)

                # Convert to numpy format
                ev_img = torch_to_numpy(ev_img)
                ev_img_ = torch_to_numpy(ev_img_)
                frame_vis = torch_to_numpy(item['frame'][0])
                frame_vis_ = torch_to_numpy(item['frame_'][0])
                flow_vis = torch_to_numpy(flow_dict["flow3"][0])

                cvshow_all(ev_img, flow_vis, frame_vis, frame_vis_, ev_img_,
                           image_name, sensor_size)

            if iteration % 1000 == 999:
                print("scheduler.step()")
                scheduler.step()
                torch.save(EVFlowNet_model.state_dict(),
                           args.load_path + '/model%d' % epoch)

            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            running_loss += loss.item()

        if epoch % 4 == 3:
            print("scheduler.step()")
            scheduler.step()

        torch.save(EVFlowNet_model.state_dict(),
                   args.load_path + '/model%d' % epoch)
        print(f'Epoch {epoch} - Avg loss: {total_loss / len(h5DataLoader)}')
コード例 #7
0
def main():
    args = configs()

    args.restore_path = None
    if args.training_instance:
        if ".ckpt" in args.training_instance:
            training_dir, _ = os.path.splitext(args.training_instance)
            args.restore_path = args.training_instance
        else:
            args.restore_path = tf.train.latest_checkpoint(
                args.training_instance)
            training_dir = args.training_instance
        print("Restoring checkpoint:", args.restore_path)

        args.load_path = os.path.join(args.load_path, training_dir)
        args.summary_path = os.path.join(args.summary_path, training_dir)
    else:
        args.load_path = os.path.join(
            args.load_path,
            "evflownet_{}_{}".format(datetime.now().strftime("%m%d_%H%M%S"),
                                     args.exp_name))
        args.summary_path = os.path.join(
            args.summary_path,
            "evflownet_{}_{}".format(datetime.now().strftime("%m%d_%H%M%S"),
                                     args.exp_name))

        os.makedirs(args.load_path)
        dump_to_yaml(args, os.path.join(args.load_path, "args.yaml"))

    if args.sacred:
        sacred_exp = Experiment(args.exp_name)
        sacred_exp.captured_out_filter = apply_backspaces_and_linefeeds
        conf = vars(args)
        conf.update({'log_dir': args.load_path})
        conf.update({'summary_path': args.summary_path})
        sacred_exp.add_config(mongo_compatible(conf))

        if not args.mongodb_disable:
            url = "{0.mongodb_url}:{0.mongodb_port}".format(args)
            db_name = args.mongodb_name

            overwrite = None
            if args.restore_path is not None:
                client = pymongo.MongoClient(url)
                database = client[db_name]
                runs = database["runs"]
                matches = runs.find({"config.log_dir": args.load_path})
                if matches.count() > 1:
                    raise ValueError(
                        "Multiple MongoDB entries found with the specified path!"
                    )
                elif matches.count() == 0:
                    raise ValueError(
                        "No MongoDB entriy found with the specified path!")
                else:
                    overwrite = matches[0]['_id']

            print(
                colored('Connect to MongoDB@{}:{}'.format(url, db_name),
                        "green"))
            sacred_exp.observers.append(
                MongoObserver.create(url=url,
                                     db_name=db_name,
                                     overwrite=overwrite))

    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)
    if not os.path.exists(args.summary_path):
        os.makedirs(args.summary_path)

    # Fix the random seed for reproducibility.
    # Remove this if you are using this code for something else!
    tf.set_random_seed(12345)

    if args.do_aug_rewind:
        if args.no_aug_rot is False:
            raise ValueError(
                "no_aug_rot = False Not supported when do_aug_rewind = True")

        print("Using Event Loader for rewind augmentation!")
        loader_vals = get_loader_events(
            args.data_path,
            args.batch_size,
            args.image_width,
            args.image_height,
            split='train',
            shuffle=True,
            sequence=args.sequences,
            rotation=not args.no_aug_rot,
            rewind=args.do_aug_rewind,
            flip_updown=args.do_aug_flip_updown,
            nskips=args.loader_n_skips,
            binarize_polarity=args.loader_binarize_polarity)
        (events_loader, lengths_loader, event_img_loader, prev_img_loader,
         next_img_loader, _, rot_angle, crop_bbox, n_ima) = loader_vals
    else:
        event_img_loader, prev_img_loader, next_img_loader, _, n_ima = get_loader(
            args.data_path,
            args.batch_size,
            args.image_width,
            args.image_height,
            split='train',
            shuffle=True,
            sequence=args.sequences,
            rotation=not args.no_aug_rot,
            flip_updown=args.do_aug_flip_updown,
            nskips=args.loader_n_skips,
            gzip=args.gzip)
    print("Number of images: {}".format(n_ima))

    trainer = EVFlowNet(args,
                        event_img_loader,
                        prev_img_loader,
                        next_img_loader,
                        n_ima,
                        is_training=True)

    if args.sacred:

        @sacred_exp.main
        def train_wrapped():
            return trainer.train()

        sacred_exp.run()
    else:
        trainer.train()
コード例 #8
0
def main():
    args = configs()

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(args.load_path,
                                      "evflownet_{}".format(datetime.now()
                                                            .strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)

    EventDataset = EventData(args.data_path, 'train')
    EventDataLoader = torch.utils.data.DataLoader(dataset=EventDataset, batch_size=args.batch_size, shuffle=True)

    # model
    EVFlowNet_model = EVFlowNet(args).cuda()

    #para = np.load('D://p.npy', allow_pickle=True).item()
    #EVFlowNet_model.load_state_dict(para)
    EVFlowNet_model.load_state_dict(torch.load(args.load_path+'/../model'))

    #EVFlowNet_parallelmodel = torch.nn.DataParallel(EVFlowNet_model)
    # optimizer
    optimizer = torch.optim.Adam(EVFlowNet_model.parameters(), lr=args.initial_learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=args.learning_rate_decay)
    loss_fun = TotalLoss(args.smoothness_weight)

    iteration = 0
    size = 0
    EVFlowNet_model.train()
    for epoch in range(100):
        loss_sum = 0.0
        print('*****************************************')
        print('epoch:'+str(epoch))
        for event_image, prev_image, next_image, _ in tqdm(EventDataLoader):
            event_image = event_image.cuda()
            prev_image = prev_image.cuda()
            next_image = next_image.cuda()

            optimizer.zero_grad()
            flow_dict = EVFlowNet_model(event_image)

            loss = loss_fun(flow_dict, prev_image, next_image, EVFlowNet_model)
            
            loss.backward()
            optimizer.step()
            loss_sum += loss.item()
            iteration += 1
            size += 1
            print(loss)
            if iteration % 100 == 0:
                print('iteration:', iteration)
                print('loss:', loss_sum/100)
                loss_sum = 0.0
            torch.save(EVFlowNet_model.state_dict(), args.load_path+'/model%d'%epoch)
        if epoch % 4 == 3:
            scheduler.step()
        print('iteration:', iteration)
        print('loss:', loss_sum/size)
        size = 0
コード例 #9
0
ファイル: ys.py プロジェクト: msencer/selenium-ys
 def __init__(self):
     self.USER_NAME = configs().user['email']
     self.PASSWORD = configs().user['password']
     self.ADDRESSID = configs().ys['address']
     self.PAYMENTMETHOD = configs().ys['payment']
コード例 #10
0
import torch
from config import configs
from model.model import Model
from dataloader import loader_data
from utils import *
from torchnet import meter

DEVICE = torch.device('cuda: 0' if torch.cuda.is_available() else 'cpu')

if __name__ == '__main__':
    opt = configs()
    experiment_dir = os.path.join(opt.checkpoints_dir, opt.name)
    mkdirs(experiment_dir)

    opt.display_server = 'http://xxx.xx.xxx.xxx'  # IP
    opt.display_port = 1128
    opt.display_env = 'check'
    vis = Visualizer(server=opt.display_server,
                     port=opt.display_port,
                     env=opt.display_env)

    loss_meter = meter.AverageValueMeter()
    loss_file = os.path.join(experiment_dir, 'loss.txt')
    if os.path.exists(loss_file):
        os.remove(loss_file)
        print("Delete the obsolete loss files: %s!" % loss_file)

    data = loader_data(opt, TrainOrTest='train')
    model = Model(opt, DEVICE)
    model.initial()
コード例 #11
0
def main():
    args = configs()

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(
            args.load_path,
            "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)

    # EventDataset = EventData(args.data_path, 'train')
    # EventDataLoader = torch.utils.data.DataLoader(dataset=EventDataset, batch_size=args.batch_size, shuffle=True)

    h5Dataset = DynamicH5Dataset(
        '/home/mingyip/Documents/EVFlowNet-pytorch/data/outdoor_day1_data.h5')
    h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset,
                                               batch_size=6,
                                               num_workers=6,
                                               shuffle=True)

    # model
    EVFlowNet_model = EVFlowNet(args).cuda()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # optimizer
    optimizer = torch.optim.Adam(EVFlowNet_model.parameters(),
                                 lr=args.initial_learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(
        optimizer=optimizer, gamma=args.learning_rate_decay)
    loss_fun = TotalLoss(args.smoothness_weight)

    iteration = 0
    size = 0
    EVFlowNet_model.train()
    for epoch in range(100):
        loss_sum = 0.0
        print('*****************************************')
        print('epoch:' + str(epoch))
        for iteration, item in enumerate(tqdm(h5DataLoader)):

            voxel = item['voxel'].to(device)

            xs, ys, ts, ps = item['events']
            xs = xs.to(device)
            ys = ys.to(device)
            ts = ts.to(device)
            ps = ps.to(device)

            if iteration % 100 == 0:
                xs_ = xs[0].clone().detach().unsqueeze(0)
                ys_ = ys[0].clone().detach().unsqueeze(0)
                ts_ = ts[0].clone().detach().unsqueeze(0)
                ps_ = ps[0].clone().detach().unsqueeze(0)

            optimizer.zero_grad()
            flow_dict = EVFlowNet_model(voxel)

            loss = loss_fun(flow_dict, (xs, ys, ts, ps), None, None,
                            EVFlowNet_model)

            if iteration % 100 == 0:
                print('iteration:', iteration)
                print('loss:', loss_sum / 100)
                loss_sum = 0.0

                flow = flow_dict["flow3"].clone().detach()
                # flow = -1 * flow[0].unsqueeze(0)
                flow_x = flow[0, 0]
                flow_y = flow[0, 1]

                print(flow.shape,
                      torch.mean(flow_x[flow_x > 0]).item(),
                      torch.mean(flow_x[flow_x < 0]).item(),
                      torch.mean(flow_y[flow_y > 0]).item(),
                      torch.mean(flow_y[flow_y < 0]).item())

                voxel_ = voxel.cpu().numpy().squeeze()
                voxel_ = np.sum(voxel_, axis=0)

                vis_events_and_flows(
                    voxel_, (xs_, ys_, ts_, ps_),
                    None,
                    None,
                    flow,
                    sensor_size=flow.shape[-2:],
                    image_name="results/img{:07d}.png".format(epoch * 10000 +
                                                              iteration))

            loss.backward()
            optimizer.step()
            loss_sum += loss.item()
            iteration += 1
            size += 1

        # if iteration % 100 == 99:
        scheduler.step()
        torch.save(EVFlowNet_model.state_dict(),
                   args.load_path + '/model%d' % epoch)

        print('iteration:', iteration)
        print('loss:', loss_sum / size)
        size = 0
コード例 #12
0
def main():
    args = configs()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    log_interval = args.logging_interval

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(args.load_path, "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)



    for ep in experiment_params:

        rpe_stats = []
        rre_stats = []
        trans_e_stats = []


        rpe_rre_info = []
        trans_e_info = []
        gt_interpolated = []
        predict_camera_frame = []
        predict_ts = []


        print(f"{ep['name']}")
        base_path = f"results/{ep['name']}"
        if not os.path.exists(base_path):
            os.makedirs(base_path)


        if ep['dataset'] == 'outdoor1':
            dataset_param = outdoor1_params
        elif ep['dataset'] == 'outdoor2':
            dataset_param = outdoor2_params
        elif ep['dataset'] == 'poster_6dof':
            dataset_param = poster_6dof_params
        elif ep['dataset'] == 'hdr_boxes':
            dataset_param = hdr_boxes_params
        elif ep['dataset'] == 'poster_translation':
            dataset_param = poster_translation_params
        elif ep['dataset'] == 'indoor1':
            dataset_param = indoor1_params
        elif ep['dataset'] == 'indoor2':
            dataset_param = indoor2_params
        elif ep['dataset'] == 'indoor3':
            dataset_param = indoor3_params
        elif ep['dataset'] == 'indoor4':
            dataset_param = indoor4_params            
        elif ep['dataset'] == 'outdoor_night1':
            dataset_param = outdoor_night1_params
        elif ep['dataset'] == 'outdoor_night2':
            dataset_param = outdoor_night2_params
        elif ep['dataset'] == 'outdoor_night3':
            dataset_param = outdoor_night3_params


        with open(f"{base_path}/config.txt", "w") as f:
            f.write("experiment params:\n")
            f.write(pprint.pformat(ep))
            f.write("\n\n\n")
            f.write("dataset params:\n")
            f.write(pprint.pformat(dataset_param))



        print("Load Data Begin. ")
        start_frame = ep['start_frame']
        end_frame = ep['end_frame']
        model_path = ep['model']
        voxel_method = ep['voxel_method']
        select_events = ep['select_events']
        voxel_threshold = ep['voxel_threshold']
        findE_threshold = ep['findE_threshold']
        findE_prob = ep['findE_prob']
        reproject_err_threshold = ep['reproject_err_threshold']


        # Set parameters
        gt_path = dataset_param['gt_path']
        sensor_size = dataset_param['sensor_size']
        camIntrinsic = dataset_param['camera_intrinsic']
        h5Dataset = DynamicH5Dataset(dataset_param['dataset_path'], voxel_method=voxel_method)
        h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset, batch_size=1, num_workers=6, shuffle=False)
        
        # model
        print("Load Model Begin. ")
        EVFlowNet_model = EVFlowNet(args).to(device)
        EVFlowNet_model.load_state_dict(torch.load(model_path))
        EVFlowNet_model.eval()
        # EVFlowNet_model.load_state_dict(torch.load('data/model/evflownet_1001_113912_outdoor2_5k/model0'))

        # optimizer
        optimizer = torch.optim.Adam(EVFlowNet_model.parameters(), lr=args.initial_learning_rate, weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=args.learning_rate_decay)
        loss_fun = TotalLoss(args.smoothness_weight, args.photometric_loss_weight)



        print("Start Evaluation. ")    
        for iteration, item in enumerate(tqdm(h5DataLoader)):

            if iteration < start_frame:
                continue

            if iteration > end_frame:
                break

            voxel = item['voxel'].to(device)
            events = item['events'].to(device)
            frame = item['frame'].to(device)
            frame_ = item['frame_'].to(device)
            num_events = item['num_events'].to(device)
            image_name = "{}/img_{:07d}.png".format(base_path, iteration)

            events_vis = events[0].detach().cpu()
            flow_dict = EVFlowNet_model(voxel)
            flow_vis = flow_dict["flow3"][0].detach().cpu()

            # Compose the event images and warp the events with flow
            if select_events == 'only_pos':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)

            elif select_events == 'only_neg':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

            elif select_events == 'mixed':
                ev_bgn_pos, ev_end_pos, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)
                ev_bgn_neg, ev_end_neg, ev_img_neg, timestamps_neg = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

                ev_bgn_x = torch.cat([ev_bgn_pos[0], ev_bgn_neg[0]])
                ev_bgn_y = torch.cat([ev_bgn_pos[1], ev_bgn_neg[1]])
                ev_end_x = torch.cat([ev_end_pos[0], ev_end_neg[0]])
                ev_end_y = torch.cat([ev_end_pos[1], ev_end_neg[1]])
                ev_bgn = (ev_bgn_x, ev_bgn_y)
                ev_end = (ev_end_x, ev_end_y)


            start_t = item['timestamp_begin'].cpu().numpy()[0]
            end_t = item['timestamp_end'].cpu().numpy()[0]

            # Convert to numpy format
            ev_img_raw = torch_to_numpy(ev_img[0])
            ev_img_bgn = torch_to_numpy(ev_img[1])
            ev_img_end = torch_to_numpy(ev_img[2])
            ev_bgn_xs = torch_to_numpy(ev_bgn[0])
            ev_bgn_ys = torch_to_numpy(ev_bgn[1])
            ev_end_xs = torch_to_numpy(ev_end[0])
            ev_end_ys = torch_to_numpy(ev_end[1])

            timestamps_before = torch_to_numpy(timestamps[0])
            timestamps_after = torch_to_numpy(timestamps[1])
            frame_vis = torch_to_numpy(item['frame'][0])
            frame_vis_ = torch_to_numpy(item['frame_'][0])
            flow_vis = torch_to_numpy(flow_dict["flow3"][0])


            METHOD = "opencv"
            # METHOD = "opengv"

            if METHOD == "opencv":

                ######### Opencv (calculate R and t) #########
                p1 = np.dstack([ev_bgn_xs, ev_bgn_ys]).squeeze()
                p2 = np.dstack([ev_end_xs, ev_end_ys]).squeeze()

                E, mask = cv2.findEssentialMat(p1, p2, cameraMatrix=camIntrinsic, method=cv2.RANSAC, prob=findE_prob, threshold=findE_threshold)
                points, R, t, mask1, triPoints = cv2.recoverPose(E, p1, p2, cameraMatrix=camIntrinsic, mask=mask, distanceThresh=5000)

            elif METHOD == "opengv":

                #### Calculate bearing vector manually ####
                ev_bgn_xs_undistorted = (ev_bgn_xs - 170.7684322973841) / 223.9940010790056
                ev_bgn_ys_undistorted = (ev_bgn_ys - 128.18711828338436) / 223.61783486959376
                ev_end_xs_undistorted = (ev_end_xs - 170.7684322973841) / 223.9940010790056
                ev_end_ys_undistorted = (ev_end_ys - 128.18711828338436) / 223.61783486959376

                bearing_p1 = np.dstack([ev_bgn_xs_undistorted, ev_bgn_ys_undistorted, np.ones_like(ev_bgn_xs)]).squeeze()
                bearing_p2 = np.dstack([ev_end_xs_undistorted, ev_end_ys_undistorted, np.ones_like(ev_end_xs)]).squeeze()

                bearing_p1 /= np.linalg.norm(bearing_p1, axis=1)[:, None]
                bearing_p2 /= np.linalg.norm(bearing_p2, axis=1)[:, None]

                bearing_p1 = bearing_p1.astype('float64')
                bearing_p2 = bearing_p2.astype('float64')

                # focal_length = 223.75
                # reproject_err_threshold = 0.1
                ransac_threshold = 1e-6
                ransac_transformation = pyopengv.relative_pose_ransac(bearing_p1, bearing_p2, "NISTER", threshold=ransac_threshold, iterations=1000, probability=0.999)
                R = ransac_transformation[:, 0:3]
                t = ransac_transformation[:, 3]

            # Interpolate Tw1 and Tw2
            Tw1 = get_interpolated_gt_pose(gt_path, start_t)
            Tw2 = get_interpolated_gt_pose(gt_path, end_t)
            Tw2_inv = inverse_se3_matrix(Tw2)

            # r1 = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
            # r2 = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
            # r3 = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
            # r4 = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])
            # r5 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
            # r6 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
            # t = r5 @ t

            normed_t = t.squeeze() / np.linalg.norm(t)
            gt_t = Tw2[0:3, 3] - Tw1[0:3, 3]
            normed_gt = gt_t / np.linalg.norm(gt_t)

            # print()
            # print(np.rad2deg(np.arccos(np.dot(normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r1@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r2@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r3@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r4@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r5@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r6@normed_t, normed_gt))))


            rpe = np.rad2deg(np.arccos(np.dot(normed_t, normed_gt)))
            # raise

            # if iteration == 121:
            #     print(Tw1)
            #     print(Tw2)
            #     print(Tw2_inv)
            #     print(Tw2_inv @ Tw1)

            predict_ts.append(start_t)

            # Store gt vector for later visulizaiton
            gt_interpolated.append(Tw1)
            gt_scale = np.linalg.norm(Tw2[0:3, 3] - Tw1[0:3, 3])
            pd_scale = np.linalg.norm(t)

            t *= gt_scale / pd_scale  # scale translation vector with gt_scale

            # Predicted relative pose 
            P = create_se3_matrix_with_R_t(R, t)
            P_inv = inverse_se3_matrix(P)
            # print(P @ P_inv)




            # Calculate the rpe
            E = Tw2_inv @ Tw1 @ P
            trans_e = np.linalg.norm(E[0:3, 3])

            E_inv = Tw2_inv @ Tw1 @ P_inv
            trans_e_inv = np.linalg.norm(E_inv[0:3, 3])


            # print(Tw2_inv @ Tw1)
            # print(P_inv)
            # print(E_inv)
            # print()

            # print() 
            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(Tw1[0:3, 3] - Tw2[0:3, 3] - t.T)

            # print()


            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3])))
            # print(np.arccos(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3]))))
            # raise

            rre = np.linalg.norm(logm(E[:3, :3]))
            rre_inv = np.linalg.norm(logm(E_inv[:3, :3]))

            rpe_stats.append(rpe)
            rre_stats.append(rre_inv)
            rpe_rre_info.append([rpe, rre, rre_inv])


            if trans_e_inv/gt_scale > 1.85:
                predict_camera_frame.append(P)

                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e_stats.append(trans_e/gt_scale)
            else:                
                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e = trans_e_inv
                predict_camera_frame.append(P_inv)

                trans_e_stats.append(trans_e_inv/gt_scale)

            # raise

            # if trans_e/gt_scale > 1.85:

            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale)


            #     trans_e = trans_e_inv
            #     predict_camera_frame.append(P_inv)
            # else:
            #     predict_camera_frame.append(P)


            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale)


            cvshow_all_eval(ev_img_raw, ev_img_bgn, ev_img_end, (ev_bgn_xs, ev_bgn_ys), \
                (ev_end_xs, ev_end_ys), timestamps_before, timestamps_after, frame_vis, \
                frame_vis_, flow_vis, image_name, sensor_size, trans_e, gt_scale)


            predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
            visualize_trajectory(predict_world_frame, "{}/path_{:07d}.png".format(base_path, iteration))
            visualize_trajectory(np.array(gt_interpolated), "{}/gt_path_{:07d}.png".format(base_path, iteration))


        rpe_stats = np.array(rpe_stats)
        rre_stats = np.array(rre_stats)
        trans_e_stats = np.array(trans_e_stats)

        with open(f"{base_path}/final_stats.txt", "w") as f:
            f.write("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15\n")
            f.write(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n\n")
            
            print("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15")
            print(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n")

            f.write("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1\n")
            f.write(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            print("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1")
            print(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            f.write("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            f.write(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")

            print("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            print(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")




        with open(f"{base_path}/rpe_rre.txt", "w") as f:
            for row in rpe_rre_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")

        with open(f"{base_path}/trans_e.txt", "w") as f:
            for row in trans_e_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")
        
        with open(f"{base_path}/predict_pose.txt", "w") as f:
            for p in predict_world_frame:
                f.write(f"{p}\n")
        
        with open(f"{base_path}/gt_pose.txt", "w") as f:
            for p in np.array(gt_interpolated):
                f.write(f"{p}\n")

        with open(f"{base_path}/predict_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, predict_world_frame):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        with open(f"{base_path}/gt_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, np.array(gt_interpolated)):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        rotation_matrix_to_quaternion

        predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path00.png", show=True)
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path01.png", rotate='x')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path02.png", rotate='y')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path03.png", rotate='z')