def clean_data(self):
        dataset_dir = self.config['dataset_dir']
        work_dir = self.config['work_dir']

        # set log file and timer
        log_file = os.path.join(work_dir, 'logs/log_clean_data.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('Data cleaning Module')
        local_timer.start()

        # clean data
        cleaned_data_dir = os.path.join(work_dir, 'cleaned_data')
        if os.path.exists(cleaned_data_dir):  # remove cleaned_data_dir
            shutil.rmtree(cleaned_data_dir)
        os.mkdir(cleaned_data_dir)

        # check if dataset_dir is a list or tuple
        if not (isinstance(dataset_dir, list) or isinstance(dataset_dir, tuple)):
            dataset_dir = [dataset_dir, ]
        clean_data(dataset_dir, cleaned_data_dir)

        # stop local timer
        local_timer.mark('Data cleaning done')
        logging.info(local_timer.summary())
Beispiel #2
0
    def clean_data_general(self, ift):
        dataset_dir = self.config["dataset_dir"]
        work_dir = self.config["work_dir"]

        # set log file and timer
        log_file = os.path.join(work_dir, "logs/log_clean_data.txt")
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer("Data cleaning Module")
        local_timer.start()

        # clean data
        cleaned_data_dir = os.path.join(work_dir, "cleaned_data")
        if os.path.exists(cleaned_data_dir):  # remove cleaned_data_dir
            shutil.rmtree(cleaned_data_dir)
        os.mkdir(cleaned_data_dir)

        # check if dataset_dir is a list or tuple
        if not (isinstance(dataset_dir, list)
                or isinstance(dataset_dir, tuple)):
            dataset_dir = [
                dataset_dir,
            ]
        clean_data_general(dataset_dir, cleaned_data_dir, ift=ift)

        # stop local timer
        local_timer.mark("Data cleaning done")
        logging.info(local_timer.summary())
    def __init__(self):
        # Create logger
        self.__logger = logging.getLogger('file_crawler')
        self.__logger.setLevel(logging.INFO)
        logging_handler = logging.StreamHandler()
        logging_formatter = logging.Formatter(LOGGING_FORMAT)
        logging_handler.setFormatter(logging_formatter)
        self.__logger.addHandler(logging_handler)

        self.__manager = FileCrawlerManager()
        self.__manager.start()

        # Create process-safe args object
        self.__cli_args = get_cli_args(self.__manager)

        # FIXME: This doesn't work correctly on Windows.
        # According to the documentation here: https://docs.python.org/2.7/library/multiprocessing.html#logging
        # None of the configuration will be inherited by the child processes except for the level, but in practice
        # I'm seeing that even the level isn't being inherited. I tried a few different ways to pass down the level,
        # but none of them worked.
        if self.__cli_args.verbose:
            self.__logger.setLevel(logging.DEBUG)

        self.__timer = Timer(
            start_message="Starting to scan %s for files matching %s" %
            (self.__cli_args.root_dir, self.__cli_args.keyword.pattern))

        self.__dir_queue = self.__manager.Queue()
        self.__file_queue = self.__manager.Queue()

        self.__results = self.__manager.FileCrawlerResults(
            self.__logger.getEffectiveLevel())

        self.__processes = list()
        self._create_processes()
Beispiel #4
0
    def run_crop_image_general(
        self,
        ift,
        oft,
        execute_parallel,
        remove_aux_file,
        apply_tone_mapping,
        joint_tone_mapping,
    ):
        work_dir = self.config["work_dir"]

        # set log file
        log_file = os.path.join(work_dir, "logs/log_crop_image.txt")
        self.logger.set_log_file(log_file)

        # create a local timer
        local_timer = Timer("Image cropping module")
        local_timer.start()

        # crop image and tone map
        image_crop_general(
            work_dir,
            ift,
            oft,
            execute_parallel,
            remove_aux_file,
            apply_tone_mapping,
            joint_tone_mapping,
        )

        # stop local timer
        local_timer.mark("image cropping done")
        logging.info(local_timer.summary())
    def run_inspect_sfm_perspective(self):
        work_dir = os.path.abspath(self.config['work_dir'])

        log_file = os.path.join(work_dir, 'logs/log_inspect_sfm_perspective.txt')
        self.logger.set_log_file(log_file)
        local_timer = Timer('inspect sfm')
        local_timer.start()

        # inspect sfm perspective
        sfm_dir = os.path.join(work_dir, 'colmap/sfm_perspective')

        for subdir in ['tri', 'tri_ba']:
            dir = os.path.join(sfm_dir, subdir)
            logging.info('\ninspecting {} ...'.format(dir))

            inspect_dir = os.path.join(sfm_dir, 'inspect_' + subdir)
            if os.path.exists(inspect_dir):
                shutil.rmtree(inspect_dir)

            db_path = os.path.join(sfm_dir, 'database.db')
            sfm_inspector = SparseInspector(dir, db_path, inspect_dir, camera_model='PERSPECTIVE')
            sfm_inspector.inspect_all()

        # stop local timer
        local_timer.mark('inspect sfm perspective done')
        logging.info(local_timer.summary())
Beispiel #6
0
    def __init__(self, sess, model, data, logger):
        super(CTPNTrainer, self).__init__(sess, model, data, logger)
        self.imdb = data.load_imdb('voc_2007_trainval')
        self.roidb = data.get_training_roidb(self.imdb)
        self.pretrained_model = cfg.PRETRAINED_MODEL if cfg.PRETRAINED_MODEL else None

        #        print('Computing bounding-box regression targets...')
        #        if cfg.TRAIN.BBOX_REG:
        #            self.bbox_means, self.bbox_stds = data.add_bbox_regression_targets(self.roidb)
        #        print('done')
        self.timer = Timer()
    def train(self):

        curr_iter = self.curr_iter
        data_loader = self.data_loader
        data_loader_iter = self.data_loader.__iter__()
        data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()

        total_loss = 0
        total_num = 0.0

        while (curr_iter < self.config.opt.max_iter):

            curr_iter += 1
            epoch = curr_iter / len(self.data_loader)
            batch_loss, batch_pos_loss, batch_neg_loss = self._train_iter(
                data_loader_iter, [data_meter, data_timer, total_timer])
            total_loss += batch_loss
            total_num += 1

            if curr_iter % self.lr_update_freq == 0 or curr_iter == 1:
                lr = self.scheduler.get_last_lr()
                self.scheduler.step()
                if self.is_master:
                    logging.info(f" Epoch: {epoch}, LR: {lr}")
                    self._save_checkpoint(curr_iter,
                                          'checkpoint_' + str(curr_iter))

            if curr_iter % self.config.trainer.stat_freq == 0 and self.is_master:
                self.writer.add_scalar('train/loss', batch_loss, curr_iter)
                self.writer.add_scalar('train/pos_loss', batch_pos_loss,
                                       curr_iter)
                self.writer.add_scalar('train/neg_loss', batch_neg_loss,
                                       curr_iter)
                logging.info(
                    "Train Epoch: {:.3f} [{}/{}], Current Loss: {:.3e}".format(
                        epoch, curr_iter, len(self.data_loader), batch_loss) +
                    "\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}, LR: {}"
                    .format(data_meter.avg, total_timer.avg - data_meter.avg,
                            total_timer.avg, self.scheduler.get_last_lr()))
                data_meter.reset()
                total_timer.reset()
    def run_aggregate_3d(self):
        work_dir = self.config['work_dir']
        # set log file
        log_file = os.path.join(work_dir, 'logs/log_aggregate_3d.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('3D aggregation module')
        local_timer.start()

        aggregate_3d.run_fuse(work_dir)

        # stop local timer
        local_timer.mark('3D aggregation done')
        logging.info(local_timer.summary())
Beispiel #9
0
def extract_features_batch(model, config, source_path, target_path, voxel_size,
                           device):
    folders = get_folder_list(source_path)
    assert len(
        folders) > 0, f"Could not find 3DMatch folders under {source_path}"
    logging.info(folders)
    list_file = os.path.join(target_path, "list.txt")
    f = open(list_file, "w")
    timer, tmeter = Timer(), AverageMeter()
    num_feat = 0
    model.eval()

    for fo in folders:
        if 'evaluation' in fo:
            continue
        files = get_file_list(fo, ".ply")
        fo_base = os.path.basename(fo)
        f.write("%s %d\n" % (fo_base, len(files)))
        for i, fi in enumerate(files):
            # Extract features from a file
            pcd = o3d.io.read_point_cloud(fi)
            save_fn = "%s_%03d" % (fo_base, i)
            if i % 100 == 0:
                logging.info(f"{i} / {len(files)}: {save_fn}")

            timer.tic()
            xyz_down, feature = extract_features(model,
                                                 xyz=np.array(pcd.points),
                                                 rgb=None,
                                                 normal=None,
                                                 voxel_size=voxel_size,
                                                 device=device,
                                                 skip_check=True)
            t = timer.toc()
            if i > 0:
                tmeter.update(t)
                num_feat += len(xyz_down)

            np.savez_compressed(os.path.join(target_path, save_fn),
                                points=np.array(pcd.points),
                                xyz=xyz_down,
                                feature=feature.detach().cpu().numpy())
            if i % 20 == 0 and i > 0:
                # 最后一项算的是每个点的特征提取时间
                logging.info(
                    f'Average time: {tmeter.avg}, FPS: {num_feat / tmeter.sum}, time / feat: {tmeter.sum / num_feat}, '
                )

    f.close()
    def run_crop_image(self):
        work_dir = self.config['work_dir']

        # set log file
        log_file = os.path.join(work_dir, 'logs/log_crop_image.txt')
        self.logger.set_log_file(log_file)

        # create a local timer
        local_timer = Timer('Image cropping module')
        local_timer.start()

        # crop image and tone map
        image_crop(work_dir)

        # stop local timer
        local_timer.mark('image cropping done')
        logging.info(local_timer.summary())
    def run_aggregate_2p5d(self):
        work_dir = self.config['work_dir']
        # set log file
        log_file = os.path.join(work_dir, 'logs/log_aggregate_2p5d.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('2.5D aggregation module')
        local_timer.start()

        max_processes = -1
        if 'aggregate_max_processes' in self.config:
            max_processes = self.config['aggregate_max_processes']

        aggregate_2p5d.run_fuse(work_dir, max_processes=max_processes)

        # stop local timer
        local_timer.mark('2.5D aggregation done')
        logging.info(local_timer.summary())
Beispiel #12
0
def calibrate_neighbors(dataset,
                        config,
                        collate_fn,
                        keep_ratio=0.8,
                        samples_threshold=2000):
    timer = Timer()
    last_display = timer.total_time

    # From config parameter, compute higher bound of neighbors number in a neighborhood
    hist_n = int(np.ceil(4 / 3 * np.pi * (config.deform_radius + 1)**3))
    neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)

    # Get histogram of neighborhood sizes i in 1 epoch max.
    for i in range(len(dataset)):
        timer.tic()
        batched_input = collate_fn([dataset[i]],
                                   config,
                                   neighborhood_limits=[hist_n] * 5)

        # update histogram
        counts = [
            torch.sum(neighb_mat < neighb_mat.shape[0], dim=1).numpy()
            for neighb_mat in batched_input['neighbors']
        ]
        hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
        neighb_hists += np.vstack(hists)
        timer.toc()

        if timer.total_time - last_display > 0.1:
            last_display = timer.total_time
            print(f"Calib Neighbors {i:08d}: timings {timer.total_time:4.2f}s")

        if np.min(np.sum(neighb_hists, axis=1)) > samples_threshold:
            break

    cumsum = np.cumsum(neighb_hists.T, axis=0)
    percentiles = np.sum(cumsum < (keep_ratio * cumsum[hist_n - 1, :]), axis=0)

    neighborhood_limits = percentiles
    print('\n')

    return neighborhood_limits
    def run_colmap_mvs(self, window_radius=3):
        work_dir = self.config['work_dir']
        mvs_dir = os.path.join(work_dir, 'colmap/mvs')

        # set log file
        log_file = os.path.join(work_dir, 'logs/log_mvs.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('Colmap MVS Module')
        local_timer.start()

        # first run PMVS without filtering
        run_photometric_mvs(mvs_dir, window_radius)

        # next do forward-backward checking and filtering
        run_consistency_check(mvs_dir, window_radius)

        # stop local timer
        local_timer.mark('Colmap MVS done')
        logging.info(local_timer.summary())
    def run_derive_approx(self):
        work_dir = self.config['work_dir']

        # set log file to 'logs/log_derive_approx.txt'
        log_file = os.path.join(work_dir, 'logs/log_derive_approx.txt')
        self.logger.set_log_file(log_file)

        # create a local timer
        local_timer = Timer('Derive Approximation Module')
        local_timer.start()

        # derive approximations for later uses
        appr = CameraApprox(work_dir)

        appr.approx_affine_latlonalt()
        appr.approx_perspective_enu()

        # stop local timer
        local_timer.mark('Derive approximation done')
        logging.info(local_timer.summary())
Beispiel #15
0
    def ctpn(self, sess, net, image_name):
        """
        :param sess: 会话
        :param net: 创建的测试网络
        :param image_name: 所要测试的单张图片的目录
        :return:
        """
        timer = Timer()
        timer.tic()

        # 读取图片
        image = cv2.imread(image_name)
        shape = image.shape[:2]  # 获取高,宽
        # resize_im,返回缩放后的图片和相应的缩放比。缩放比定义为 修改后的图/原图
        img, scale = TestClass.resize_im(image,
                                         scale=self._cfg.TEST.SCALE,
                                         max_scale=self._cfg.TEST.MAX_SCALE)

        # 将图片去均值化
        im_orig = img.astype(np.float32, copy=True)
        im_orig -= self._cfg.TRAIN.PIXEL_MEANS

        # 将缩放和去均值化以后的图片,放入网络进行前向计算,获取分数和对应的文本片段,该片段为映射到最原始图片的坐标
        scores, boxes = TestClass.test_ctpn(sess, net, im_orig, scale)

        # 此处调用了一个文本检测器
        textdetector = TextDetector(self._cfg)
        """
        输入参数分别为:
        N×4矩阵,每行为一个已经映射回最初的图片的文字片段坐标
        N维向量,对应的分数
        两维向量,分别为最原始图片的高宽
        返回:
        一个N×9的矩阵,表示N个拼接以后的完整的文本框。每一行,前八个元素一次是左上,右上,左下,右下的坐标,最后一个元素是文本框的分数
        """
        boxes = textdetector.detect(boxes, scores, shape)
        self.draw_boxes(image, image_name, boxes, scale)
        timer.toc()
        print(('Detection took {:.3f}s for '
               '{:d} object proposals').format(timer.total_time,
                                               boxes.shape[0]))
    def run_colmap_sfm_perspective(self, weight=0.01):
        work_dir = os.path.abspath(self.config['work_dir'])
        sfm_dir = os.path.join(work_dir, 'colmap/sfm_perspective')
        if not os.path.exists(sfm_dir):
            os.mkdir(sfm_dir)

        log_file = os.path.join(work_dir, 'logs/log_sfm_perspective.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('Colmap SfM Module, perspective camera')
        local_timer.start()

        # create a hard link to avoid copying of images
        if os.path.exists(os.path.join(sfm_dir, 'images')):
            os.unlink(os.path.join(sfm_dir, 'images'))
        os.symlink(os.path.relpath(os.path.join(work_dir, 'colmap/subset_for_sfm/images'), sfm_dir),
                   os.path.join(sfm_dir, 'images'))
        init_camera_file = os.path.join(work_dir, 'colmap/subset_for_sfm/perspective_dict.json')
        colmap_sfm_perspective.run_sfm(work_dir, sfm_dir, init_camera_file, weight)

        # stop local timer
        local_timer.mark('Colmap SfM done')
        logging.info(local_timer.summary())
from lib.spark import spark, sc
from lib.plotly import py
from lib.timer import Timer
import plotly.graph_objs as go
from pyspark.ml.feature import VectorAssembler
from scipy.spatial import ConvexHull


with Timer('read', 'Reading data'):
    df = df_base = spark.read.csv('data/yellow_tripdata_2016-01.csv', header=True, inferSchema=True)


with Timer('sample', 'Sampling data'):
    df = df.sample(False, 0.005)

from lib.process import process

with Timer('process', 'Cleaning invalid data'):
    df = process(df)

from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
from pyspark import StorageLevel

K = 6
N = 24//K
groups = {i: range(i*N, i*N+N) for i in range(K)}


@udf(returnType=IntegerType())
def get_group(d):
  def _train_epoch(self, epoch):
    config = self.config

    gc.collect()
    self.model.train()

    # Epoch starts from 1
    total_loss = 0
    total_num = 0.0
    data_loader = self.data_loader
    data_loader_iter = self.data_loader.__iter__()
    iter_size = self.iter_size
    data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()
    pos_dist_meter, neg_dist_meter = AverageMeter(), AverageMeter()
    start_iter = (epoch - 1) * (len(data_loader) // iter_size)
    for curr_iter in range(len(data_loader) // iter_size):
      self.optimizer.zero_grad()
      batch_loss = 0
      data_time = 0
      total_timer.tic()
      for iter_idx in range(iter_size):
        data_timer.tic()
        input_dict = data_loader_iter.next()
        data_time += data_timer.toc(average=False)

        # pairs consist of (xyz1 index, xyz0 index)
        sinput0 = ME.SparseTensor(
            input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.device)
        F0 = self.model(sinput0).F

        sinput1 = ME.SparseTensor(
            input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.device)
        F1 = self.model(sinput1).F

        pos_pairs = input_dict['correspondences']
        loss, pos_dist, neg_dist = self.triplet_loss(
            F0,
            F1,
            pos_pairs,
            num_pos=config.triplet_num_pos * config.batch_size,
            num_hn_samples=config.triplet_num_hn * config.batch_size,
            num_rand_triplet=config.triplet_num_rand * config.batch_size)
        loss /= iter_size
        loss.backward()
        batch_loss += loss.item()
        pos_dist_meter.update(pos_dist)
        neg_dist_meter.update(neg_dist)

      self.optimizer.step()
      gc.collect()

      torch.cuda.empty_cache()

      total_loss += batch_loss
      total_num += 1.0
      total_timer.toc()
      data_meter.update(data_time)

      if curr_iter % self.config.stat_freq == 0:
        self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)
        logging.info(
            "Train Epoch: {} [{}/{}], Current Loss: {:.3e}, Pos dist: {:.3e}, Neg dist: {:.3e}"
            .format(epoch, curr_iter,
                    len(self.data_loader) //
                    iter_size, batch_loss, pos_dist_meter.avg, neg_dist_meter.avg) +
            "\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}".format(
                data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))
        pos_dist_meter.reset()
        neg_dist_meter.reset()
        data_meter.reset()
        total_timer.reset()
  def _train_epoch(self, epoch):
    gc.collect()
    self.model.train()
    # Epoch starts from 1
    total_loss = 0
    total_num = 0.0
    data_loader = self.data_loader
    data_loader_iter = self.data_loader.__iter__()
    iter_size = self.iter_size
    data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()
    start_iter = (epoch - 1) * (len(data_loader) // iter_size)
    for curr_iter in range(len(data_loader) // iter_size):
      self.optimizer.zero_grad()
      batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0

      data_time = 0
      total_timer.tic()
      for iter_idx in range(iter_size):
        data_timer.tic()
        input_dict = data_loader_iter.next()
        data_time += data_timer.toc(average=False)

        sinput0 = ME.SparseTensor(
            input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.device)
        F0 = self.model(sinput0).F

        sinput1 = ME.SparseTensor(
            input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.device)

        F1 = self.model(sinput1).F

        pos_pairs = input_dict['correspondences']
        pos_loss, neg_loss = self.contrastive_hardest_negative_loss(
            F0,
            F1,
            pos_pairs,
            num_pos=self.config.num_pos_per_batch * self.config.batch_size,
            num_hn_samples=self.config.num_hn_samples_per_batch *
            self.config.batch_size)

        pos_loss /= iter_size
        neg_loss /= iter_size
        loss = pos_loss + self.neg_weight * neg_loss
        loss.backward()

        batch_loss += loss.item()
        batch_pos_loss += pos_loss.item()
        batch_neg_loss += neg_loss.item()

      self.optimizer.step()
      gc.collect()

      torch.cuda.empty_cache()

      total_loss += batch_loss
      total_num += 1.0
      total_timer.toc()
      data_meter.update(data_time)

      if curr_iter % self.config.stat_freq == 0:
        self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)
        self.writer.add_scalar('train/pos_loss', batch_pos_loss, start_iter + curr_iter)
        self.writer.add_scalar('train/neg_loss', batch_neg_loss, start_iter + curr_iter)
        logging.info(
            "Train Epoch: {} [{}/{}], Current Loss: {:.3e} Pos: {:.3f} Neg: {:.3f}"
            .format(epoch, curr_iter,
                    len(self.data_loader) //
                    iter_size, batch_loss, batch_pos_loss, batch_neg_loss) +
            "\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}".format(
                data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))
        data_meter.reset()
        total_timer.reset()
  def _valid_epoch(self):
    # Change the network to evaluation mode
    self.model.eval()
    self.val_data_loader.dataset.reset_seed(0)
    num_data = 0
    hit_ratio_meter, feat_match_ratio, loss_meter, rte_meter, rre_meter = AverageMeter(
    ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
    data_timer, feat_timer, matching_timer = Timer(), Timer(), Timer()
    tot_num_data = len(self.val_data_loader.dataset)
    if self.val_max_iter > 0:
      tot_num_data = min(self.val_max_iter, tot_num_data)
    data_loader_iter = self.val_data_loader.__iter__()

    for batch_idx in range(tot_num_data):
      data_timer.tic()
      input_dict = data_loader_iter.next()
      data_timer.toc()

      # pairs consist of (xyz1 index, xyz0 index)
      feat_timer.tic()
      sinput0 = ME.SparseTensor(
          input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.device)
      F0 = self.model(sinput0).F

      sinput1 = ME.SparseTensor(
          input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.device)
      F1 = self.model(sinput1).F
      feat_timer.toc()

      matching_timer.tic()
      xyz0, xyz1, T_gt = input_dict['pcd0'], input_dict['pcd1'], input_dict['T_gt']
      xyz0_corr, xyz1_corr = self.find_corr(xyz0, xyz1, F0, F1, subsample_size=5000)
      T_est = te.est_quad_linear_robust(xyz0_corr, xyz1_corr)

      loss = corr_dist(T_est, T_gt, xyz0, xyz1, weight=None)
      loss_meter.update(loss)

      rte = np.linalg.norm(T_est[:3, 3] - T_gt[:3, 3])
      rte_meter.update(rte)
      rre = np.arccos((np.trace(T_est[:3, :3].t() @ T_gt[:3, :3]) - 1) / 2)
      if not np.isnan(rre):
        rre_meter.update(rre)

      hit_ratio = self.evaluate_hit_ratio(
          xyz0_corr, xyz1_corr, T_gt, thresh=self.config.hit_ratio_thresh)
      hit_ratio_meter.update(hit_ratio)
      feat_match_ratio.update(hit_ratio > 0.05)
      matching_timer.toc()

      num_data += 1
      torch.cuda.empty_cache()

      if batch_idx % 100 == 0 and batch_idx > 0:
        logging.info(' '.join([
            f"Validation iter {num_data} / {tot_num_data} : Data Loading Time: {data_timer.avg:.3f},",
            f"Feature Extraction Time: {feat_timer.avg:.3f}, Matching Time: {matching_timer.avg:.3f},",
            f"Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},",
            f"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}"
        ]))
        data_timer.reset()

    logging.info(' '.join([
        f"Final Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},",
        f"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}"
    ]))
    return {
        "loss": loss_meter.avg,
        "rre": rre_meter.avg,
        "rte": rte_meter.avg,
        'feat_match_ratio': feat_match_ratio.avg,
        'hit_ratio': hit_ratio_meter.avg
    }
  def _train_epoch(self, epoch):
    gc.collect()
    self.model.train()
    # Epoch starts from 1
    total_loss = 0
    total_num = 0.0

    data_loader = self.data_loader
    data_loader_iter = self.data_loader.__iter__()

    iter_size = self.iter_size
    start_iter = (epoch - 1) * (len(data_loader) // iter_size)

    data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()

    # Main training
    for curr_iter in range(len(data_loader) // iter_size):
      self.optimizer.zero_grad()
      batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0

      data_time = 0
      total_timer.tic()
      for iter_idx in range(iter_size):
        # Caffe iter size
        data_timer.tic()
        input_dict = data_loader_iter.next()
        data_time += data_timer.toc(average=False)

        # pairs consist of (xyz1 index, xyz0 index)
        sinput0 = ME.SparseTensor(
            input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.device)
        F0 = self.model(sinput0).F

        sinput1 = ME.SparseTensor(
            input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.device)
        F1 = self.model(sinput1).F

        N0, N1 = len(sinput0), len(sinput1)

        pos_pairs = input_dict['correspondences']
        neg_pairs = self.generate_rand_negative_pairs(pos_pairs, max(N0, N1), N0, N1)
        pos_pairs = pos_pairs.long().to(self.device)
        neg_pairs = torch.from_numpy(neg_pairs).long().to(self.device)

        neg0 = F0.index_select(0, neg_pairs[:, 0])
        neg1 = F1.index_select(0, neg_pairs[:, 1])
        pos0 = F0.index_select(0, pos_pairs[:, 0])
        pos1 = F1.index_select(0, pos_pairs[:, 1])

        # Positive loss
        pos_loss = (pos0 - pos1).pow(2).sum(1)

        # Negative loss
        neg_loss = F.relu(self.neg_thresh -
                          ((neg0 - neg1).pow(2).sum(1) + 1e-4).sqrt()).pow(2)

        pos_loss_mean = pos_loss.mean() / iter_size
        neg_loss_mean = neg_loss.mean() / iter_size

        # Weighted loss
        loss = pos_loss_mean + self.neg_weight * neg_loss_mean
        loss.backward(
        )  # To accumulate gradient, zero gradients only at the begining of iter_size
        batch_loss += loss.item()
        batch_pos_loss += pos_loss_mean.item()
        batch_neg_loss += neg_loss_mean.item()

      self.optimizer.step()

      torch.cuda.empty_cache()

      total_loss += batch_loss
      total_num += 1.0
      total_timer.toc()
      data_meter.update(data_time)

      # Print logs
      if curr_iter % self.config.stat_freq == 0:
        self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)
        self.writer.add_scalar('train/pos_loss', batch_pos_loss, start_iter + curr_iter)
        self.writer.add_scalar('train/neg_loss', batch_neg_loss, start_iter + curr_iter)
        logging.info(
            "Train Epoch: {} [{}/{}], Current Loss: {:.3e} Pos: {:.3f} Neg: {:.3f}"
            .format(epoch, curr_iter,
                    len(self.data_loader) //
                    iter_size, batch_loss, batch_pos_loss, batch_neg_loss) +
            "\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}".format(
                data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))
        data_meter.reset()
        total_timer.reset()
Beispiel #22
0
    def _valid_epoch(self, data_loader_iter):
        # Change the network to evaluation mode
        self.model.eval()
        num_data = 0
        hit_ratio_meter, reciprocity_ratio_meter = AverageMeter(
        ), AverageMeter()
        reciprocity_hit_ratio_meter = AverageMeter()
        data_timer, feat_timer = Timer(), Timer()
        tot_num_data = len(self.val_data_loader.dataset)
        if self.val_max_iter > 0:
            tot_num_data = min(self.val_max_iter, tot_num_data)

        for curr_iter in range(tot_num_data):
            data_timer.tic()
            input_dict = self.get_data(data_loader_iter)
            data_timer.toc()

            # pairs consist of (xyz1 index, xyz0 index)
            feat_timer.tic()
            with torch.no_grad():
                F0 = self.model(input_dict['img0'].to(self.device))
                F1 = self.model(input_dict['img1'].to(self.device))
            feat_timer.toc()

            # Test self.num_pos_per_batch * self.batch_size features only.
            _, _, H0, W0 = F0.shape
            _, _, H1, W1 = F1.shape
            for batch_idx, pair in enumerate(input_dict['pairs']):
                N = len(pair)
                sel = np.random.choice(N,
                                       min(N, self.config.num_pos_per_batch),
                                       replace=False)
                curr_pair = pair[sel]
                w0, h0, w1, h1 = torch.floor(curr_pair.t() /
                                             self.out_tensor_stride).long()
                feats0 = F0[batch_idx, :, h0, w0]
                nn_inds1 = find_nn_gpu(feats0,
                                       F1[batch_idx, :].view(F1.shape[1], -1),
                                       nn_max_n=self.config.nn_max_n,
                                       transposed=True)

                # Convert the index to coordinate: BxCxHxW
                xs1 = nn_inds1 % W1
                ys1 = nn_inds1 // W1

                # Test reciprocity
                nn_inds0 = find_nn_gpu(F1[batch_idx, :, ys1, xs1],
                                       F0[batch_idx, :].view(F0.shape[1], -1),
                                       nn_max_n=self.config.nn_max_n,
                                       transposed=True)

                # Convert the index to coordinate: BxCxHxW
                xs0 = nn_inds0 % W0
                ys0 = nn_inds0 // W0

                dist_sq = (w1 - xs1)**2 + (h1 - ys1)**2
                is_correct = dist_sq < (self.config.ucn_inlier_threshold_pixel
                                        / self.out_tensor_stride)**2
                hit_ratio_meter.update(is_correct.sum().item() /
                                       len(is_correct))

                # Recipocity test result
                dist_sq_nn = (w0 - xs0)**2 + (h0 - ys0)**2
                mask = dist_sq_nn < (self.config.ucn_inlier_threshold_pixel /
                                     self.out_tensor_stride)**2
                reciprocity_ratio_meter.update(mask.sum().item() /
                                               float(len(mask)))
                reciprocity_hit_ratio_meter.update(
                    is_correct[mask].sum().item() / (mask.sum().item() + eps))

                torch.cuda.empty_cache()
                # visualize_image_correspondence(input_dict['img0'][batch_idx, 0].numpy() + 0.5,
                #                                input_dict['img1'][batch_idx, 0].numpy() + 0.5,
                #                                F0[batch_idx], F1[batch_idx], curr_iter,
                #                                self.config)

            num_data += 1

            if num_data % 100 == 0:
                logging.info(', '.join([
                    f"Validation iter {num_data} / {tot_num_data} : Data Loading Time: {data_timer.avg:.3f}",
                    f"Feature Extraction Time: {feat_timer.avg:.3f}, Hit Ratio: {hit_ratio_meter.avg}",
                    f"Reciprocity Ratio: {reciprocity_ratio_meter.avg}, Reci Filtered Hit Ratio: {reciprocity_hit_ratio_meter.avg}"
                ]))
                data_timer.reset()

        logging.info(', '.join([
            f"Validation : Data Loading Time: {data_timer.avg:.3f}",
            f"Feature Extraction Time: {feat_timer.avg:.3f}, Hit Ratio: {hit_ratio_meter.avg}",
            f"Reciprocity Ratio: {reciprocity_ratio_meter.avg}, Reci Filtered Hit Ratio: {reciprocity_hit_ratio_meter.avg}"
        ]))

        return {
            'hit_ratio': hit_ratio_meter.avg,
            'reciprocity_ratio': reciprocity_ratio_meter.avg,
            'reciprocity_hit_ratio': reciprocity_hit_ratio_meter.avg,
        }
Beispiel #23
0
    def _train_epoch(self, epoch, data_loader_iter):
        # Epoch starts from 1
        total_loss = 0
        total_num = 0.0
        iter_size = self.iter_size
        data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()
        for curr_iter in range(self.train_max_iter):
            self.optimizer.zero_grad()
            batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0

            data_time = 0
            total_timer.tic()
            for iter_idx in range(iter_size):
                data_timer.tic()
                input_dict = self.get_data(data_loader_iter)
                data_time += data_timer.toc(average=False)

                F0 = self.model(input_dict['img0'].to(self.device))
                F1 = self.model(input_dict['img1'].to(self.device))

                pos_loss, neg_loss = self.contrastive_loss(
                    input_dict['img0'].numpy() + 0.5,
                    input_dict['img1'].numpy() + 0.5,
                    F0,
                    F1,
                    input_dict['pairs'],
                    num_pos=self.config.num_pos_per_batch,
                    num_hn_samples=self.config.num_hn_samples_per_batch)

                pos_loss /= iter_size
                neg_loss /= iter_size
                loss = pos_loss + self.neg_weight * neg_loss
                loss.backward()

                batch_loss += loss.item()
                batch_pos_loss += pos_loss.item()
                batch_neg_loss += neg_loss.item()

            self.optimizer.step()
            gc.collect()

            torch.cuda.empty_cache()

            total_loss += batch_loss
            total_num += 1.0
            total_timer.toc()
            data_meter.update(data_time)
            torch.cuda.empty_cache()

            if curr_iter % self.config.stat_freq == 0:
                self.writer.add_scalar('train/loss', batch_loss, curr_iter)
                self.writer.add_scalar('train/pos_loss', batch_pos_loss,
                                       curr_iter)
                self.writer.add_scalar('train/neg_loss', batch_neg_loss,
                                       curr_iter)
                logging.info(
                    "Train epoch {}, iter {}, Current Loss: {:.3e} Pos: {:.3f} Neg: {:.3f}"
                    .format(epoch, curr_iter, batch_loss, batch_pos_loss,
                            batch_neg_loss) +
                    "\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}"
                    .format(data_meter.avg, total_timer.avg -
                            data_meter.avg, total_timer.avg))
                data_meter.reset()
                total_timer.reset()
import numpy as np
from lib.timer import Timer
import breakout_detection
import runners.data_loader as data_loader
import matplotlib.pyplot as plt

SAMPLE_FILE_PATH = '../data/demo7.csv'

if __name__ == '__main__':
    sw = Timer()
    data = data_loader.load_data(SAMPLE_FILE_PATH)
    sw.start()
    edm_multi = breakout_detection.EdmMulti()
    max_snp = max(max(data.values), 1)
    # Z = [x/float(max_snp) for x in data.values]
    Z = [x for x in data.values]
    edm_multi.evaluate(Z, min_size=24, beta=0.001, degree=1)
    print(sw.elapsed(f'data length: {len(data.values)},  using time:'))
    plt.plot(np.asarray(data.index).tolist(), Z)
    result = edm_multi.getLoc()
    print(result)
    for i in result:
        plt.axvline(np.asarray(data.index).tolist()[i], color='#FF4E24')
        # plt.plot(np.asarray(data.index).tolist()[i], np.asarray(data.values).tolist()[i], 'ro')
    plt.show()
    def run_reparam_depth(self):
        work_dir = self.config['work_dir']

        # set log file
        log_file = os.path.join(work_dir, 'logs/log_reparam_depth.txt')
        self.logger.set_log_file(log_file)
        # create a local timer
        local_timer = Timer('reparametrize depth')
        local_timer.start()

        # prepare dense reconstruction
        colmap_dir = os.path.join(work_dir, 'colmap')

        mvs_dir = os.path.join(colmap_dir, 'mvs')
        if not os.path.exists(mvs_dir):
            os.mkdir(mvs_dir)

        # link to sfm_perspective
        if os.path.exists(os.path.join(mvs_dir, 'images')):
           os.unlink(os.path.join(mvs_dir, 'images'))
        os.symlink(os.path.relpath(os.path.join(colmap_dir, 'sfm_perspective/images'), mvs_dir),
                  os.path.join(mvs_dir, 'images'))

        if os.path.exists(os.path.join(mvs_dir, 'sparse')):
           os.unlink(os.path.join(mvs_dir, 'sparse'))
        os.symlink(os.path.relpath(os.path.join(colmap_dir, 'sfm_perspective/tri_ba'), mvs_dir),
                   os.path.join(mvs_dir, 'sparse'))

        # compute depth ranges and generate last_rows.txt
        reparam_depth(os.path.join(mvs_dir, 'sparse'), mvs_dir, camera_model='perspective')

        # prepare stereo directory
        stereo_dir = os.path.join(mvs_dir, 'stereo')
        for subdir in [stereo_dir, 
                       os.path.join(stereo_dir, 'depth_maps'),
                       os.path.join(stereo_dir, 'normal_maps'),
                       os.path.join(stereo_dir, 'consistency_graphs')]:
            if not os.path.exists(subdir):
                os.mkdir(subdir)

        # write patch-match.cfg and fusion.cfg
        image_names = sorted(os.listdir(os.path.join(mvs_dir, 'images')))

        with open(os.path.join(stereo_dir, 'patch-match.cfg'), 'w') as fp:
            for img_name in image_names:
                fp.write(img_name + '\n__auto__, 20\n')
                
                # use all images
                # fp.write(img_name + '\n__all__\n')

                # randomly choose 20 images
                # from random import shuffle
                # candi_src_images = [x for x in image_names if x != img_name]
                # shuffle(candi_src_images)
                # max_src_images = 10
                # fp.write(img_name + '\n' + ', '.join(candi_src_images[:max_src_images]) + '\n')

        with open(os.path.join(stereo_dir, 'fusion.cfg'), 'w') as fp:
            for img_name in image_names:
                fp.write(img_name + '\n')

        # stop local timer
        local_timer.mark('reparam depth done')
        logging.info(local_timer.summary())
Beispiel #26
0
def main(config):
    test_loader = make_data_loader(
        config, config.test_phase, 1, num_threads=config.test_num_workers, shuffle=True)

    num_feats = 1

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    Model = load_model(config.model)
    model = Model(
        num_feats,
        config.model_n_out,
        bn_momentum=config.bn_momentum,
        conv1_kernel_size=config.conv1_kernel_size,
        normalize_feature=config.normalize_feature)
    checkpoint = torch.load(config.save_dir + '/checkpoint.pth')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()

    success_meter, rte_meter, rre_meter = AverageMeter(), AverageMeter(), AverageMeter()
    data_timer, feat_timer, reg_timer = Timer(), Timer(), Timer()

    test_iter = test_loader.__iter__()
    N = len(test_iter)
    n_gpu_failures = 0

    # downsample_voxel_size = 2 * config.voxel_size

    for i in range(len(test_iter)):
        data_timer.tic()
        try:
            data_dict = test_iter.next()
        except ValueError:
            n_gpu_failures += 1
            logging.info(f"# Erroneous GPU Pair {n_gpu_failures}")
            continue
        data_timer.toc()
        xyz0, xyz1 = data_dict['pcd0'], data_dict['pcd1']
        T_gth = data_dict['T_gt']
        xyz0np, xyz1np = xyz0.numpy(), xyz1.numpy()

        pcd0 = make_open3d_point_cloud(xyz0np)
        pcd1 = make_open3d_point_cloud(xyz1np)

        with torch.no_grad():
            feat_timer.tic()
            sinput0 = ME.SparseTensor(
                data_dict['sinput0_F'].to(device), coordinates=data_dict['sinput0_C'].to(device))
            F0 = model(sinput0).F.detach()
            sinput1 = ME.SparseTensor(
                data_dict['sinput1_F'].to(device), coordinates=data_dict['sinput1_C'].to(device))
            F1 = model(sinput1).F.detach()
            feat_timer.toc()

        feat0 = make_open3d_feature(F0, 32, F0.shape[0])
        feat1 = make_open3d_feature(F1, 32, F1.shape[0])

        reg_timer.tic()
        distance_threshold = config.voxel_size * 1.0
        ransac_result = o3d.registration.registration_ransac_based_on_feature_matching(
            pcd0, pcd1, feat0, feat1, distance_threshold,
            o3d.registration.TransformationEstimationPointToPoint(False), 4, [
                o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
                o3d.registration.CorrespondenceCheckerBasedOnDistance(
                    distance_threshold)
            ], o3d.registration.RANSACConvergenceCriteria(4000000, 10000))
        T_ransac = torch.from_numpy(
            ransac_result.transformation.astype(np.float32))
        reg_timer.toc()

        # Translation error
        rte = np.linalg.norm(T_ransac[:3, 3] - T_gth[:3, 3])
        rre = np.arccos((np.trace(T_ransac[:3, :3].t() @ T_gth[:3, :3]) - 1) / 2)

        # Check if the ransac was successful. successful if rte < 2m and rre < 5◦
        # http://openaccess.thecvf.com/content_ECCV_2018/papers/Zi_Jian_Yew_3DFeat-Net_Weakly_Supervised_ECCV_2018_paper.pdf
        if rte < 2:
            rte_meter.update(rte)

        if not np.isnan(rre) and rre < np.pi / 180 * 5:
            rre_meter.update(rre)

        if rte < 2 and not np.isnan(rre) and rre < np.pi / 180 * 5:
            success_meter.update(1)
        else:
            success_meter.update(0)
            logging.info(f"Failed with RTE: {rte}, RRE: {rre}")

        if i % 10 == 0:
            logging.info(
                f"{i} / {N}: Data time: {data_timer.avg}, Feat time: {feat_timer.avg}," +
                f" Reg time: {reg_timer.avg}, RTE: {rte_meter.avg}," +
                f" RRE: {rre_meter.avg}, Success: {success_meter.sum} / {success_meter.count}"
                + f" ({success_meter.avg * 100} %)")
            data_timer.reset()
            feat_timer.reset()
            reg_timer.reset()

    logging.info(
        f"RTE: {rte_meter.avg}, var: {rte_meter.var}," +
        f" RRE: {rre_meter.avg}, var: {rre_meter.var}, Success: {success_meter.sum} " +
        f"/ {success_meter.count} ({success_meter.avg * 100} %)")
Beispiel #27
0
                    help='store program log to file, dir is ./log/')
parser.add_argument('--debug', action='store_true', help='enable debug mode')
args = parser.parse_args()

LOG_FN = 'log/scl_agent_%s.log' % str(args.sw_id) \
        if args.log2file else None
LEVEL = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    datefmt='%Y%m%d %H:%M:%S',
    level=LEVEL,
    filename=LOG_FN,
    filemode='w')
logger = logging.getLogger(__name__)

timer = Timer(logger)
streams = scl.Streams(logger)
selector = Selector()
scl2sw = scl.Scl2Sw(local_ctrl_host, local_ctrl_port, streams, logger)
if args.channel == 'udp':
    scl2scl = scl.Scl2SclUdp(streams, logger, agent_list[args.sw_id],
                             scl_agent_port, scl_proxy_port)
else:
    scl2scl = scl.Scl2SclTcp(streams, logger, agent_list[args.sw_id],
                             proxy_list, scl_proxy_port, timer)

timer.start()  # another thread, daemonize

while True:
    timer.wait(selector)
    scl2scl.wait(selector)
Beispiel #28
0
    def test_net(self, graph):

        timer = Timer()
        timer.tic()

        if os.path.exists(self._cfg.TEST.RESULT_DIR_TXT):
            shutil.rmtree(self._cfg.TEST.RESULT_DIR_TXT)
        os.makedirs(self._cfg.TEST.RESULT_DIR_TXT)

        if os.path.exists(self._cfg.TEST.RESULT_DIR_PIC):
            shutil.rmtree(self._cfg.TEST.RESULT_DIR_PIC)
        os.makedirs(self._cfg.TEST.RESULT_DIR_PIC)

        saver = tf.train.Saver()
        # 创建一个Session
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allocator_type = 'BFC'
        config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 不能太大,否则报错

        sess = tf.Session(config=config, graph=graph)

        # 获取一个Saver()实例

        # 恢复模型参数
        ckpt = tf.train.get_checkpoint_state(self._cfg.COMMON.CKPT)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restoring from {}...'.format(ckpt.model_checkpoint_path), end=' ')
            try:
                saver.restore(sess, ckpt.model_checkpoint_path)
            except:
                raise 'Check your pretrained {:s}'.format(ckpt.model_checkpoint_path)
            print('done')
        else:
            raise 'Check your pretrained {:s}'.format(self._cfg.TEST.RESULT_DIR)

        # # TODO 这里需要仔细测试一下
        # im_names = glob.glob(os.path.join(self._cfg.TEST.DATA_DIR, '*.png')) + \
        #            glob.glob(os.path.join(self._cfg.TEST.DATA_DIR, '*.jpg'))

        im_names = os.listdir(self._cfg.TEST.DATA_DIR)

        assert len(im_names) > 0, "Nothing to test"
        i = 0
        for im in im_names:
            im_name = os.path.join(self._cfg.TEST.DATA_DIR, im)
            # print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
            # print(('Testing for image {:s}'.format(im_name)))
            try:
                self.ctpn(sess, self._net, im_name)
            except NoPositiveError:
                print("Warning!!, get no region of interest in picture {}".format(im))
                continue
            except:
                print("the pic {} may has problems".format(im))
                continue
            i += 1
            if i % 10 == 0:
                timer.toc()
                print('Detection took {:.3f}s for 10 pic'.format(timer.total_time))

        # 最后关闭session
        sess.close()
Beispiel #29
0
from lib.spark import spark, sc
from lib.plotly import py
from lib.timer import Timer
from lib.process import process
import plotly.graph_objs as go
from pyspark.ml.feature import VectorAssembler
from scipy.spatial import ConvexHull
import pickle
import numpy as np

with Timer('read', 'Reading data'):
    df = df_base = spark.read.csv('data/yellow_tripdata_2016-01.csv',
                                  header=True,
                                  inferSchema=True)

with Timer('process', 'Cleaning invalid data'):
    df = process(df)

from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
from pyspark import StorageLevel

from pyspark.ml.clustering import GaussianMixture

gmm = GaussianMixture(k=1000)

result = []
with Timer('clustering', 'Computing clusters'):
    for weekday in range(7):
        for hour in range(24):
            with Timer('clustering',
Beispiel #30
0
    def _valid_epoch(self):
        # Change the network to evaluation mode
        self.model.eval()
        self.val_data_loader.dataset.reset_seed(0)
        num_data = 0
        hit_ratio_meter, feat_match_ratio, loss_meter, rte_meter, rre_meter = AverageMeter(
        ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
        data_timer, feat_timer, matching_timer = Timer(), Timer(), Timer()
        tot_num_data = len(self.val_data_loader.dataset)
        if self.val_max_iter > 0:
            tot_num_data = min(self.val_max_iter, tot_num_data)
        data_loader_iter = self.val_data_loader.__iter__()

        for batch_idx in range(tot_num_data):
            data_timer.tic()
            input_dict = data_loader_iter.next()
            data_timer.toc()

            # pairs consist of (xyz1 index, xyz0 index)
            feat_timer.tic()

            coords = input_dict['sinput0_C'].to(self.device)
            sinput0 = ME.SparseTensor(
                input_dict['sinput0_F'].to(self.device),
                coordinates=input_dict['sinput0_C'].to(self.device).type(torch.float))

            F0 = self.model(sinput0).F

            sinput1 = ME.SparseTensor(
                input_dict['sinput1_F'].to(self.device),
                coordinates=input_dict['sinput1_C'].to(self.device).type(torch.float))
            F1 = self.model(sinput1).F
            feat_timer.toc()

            matching_timer.tic()
            xyz0, xyz1, T_gt = input_dict['pcd0'], input_dict['pcd1'], input_dict['T_gt']
            xyz0_corr, xyz1_corr = self.find_corr(xyz0, xyz1, F0, F1, subsample_size=5000)

            if False:

                from sklearn.decomposition import PCA
                import open3d as o3d

                pc0 = o3d.geometry.PointCloud()
                pc0.points = o3d.utility.Vector3dVector(xyz0.numpy())
                pca = PCA(n_components=3)

                colors = pca.fit_transform(torch.cat((F0, F1), axis=0).cpu().numpy())
                colors -= colors.min()
                colors /= colors.max()
                pc0.colors = o3d.utility.Vector3dVector(colors[0:F0.shape[0]])

                o3d.io.write_point_cloud("pc0.ply", pc0)
                pc0.transform(T_gt.numpy())
                o3d.io.write_point_cloud("pc0_trans.ply", pc0)

                pc1 = o3d.geometry.PointCloud()
                pc1.points = o3d.utility.Vector3dVector(xyz1.numpy())
                pc1.colors = o3d.utility.Vector3dVector(colors[F0.shape[0]:])
                o3d.io.write_point_cloud("pc1.ply", pc1)

                ind_0 = input_dict['correspondences'][:, 0].type(torch.long)
                ind_1 = input_dict['correspondences'][:, 1].type(torch.long)

                pc1.points = o3d.utility.Vector3dVector(xyz1[ind_1].numpy())
                pc1.colors = o3d.utility.Vector3dVector(
                    colors[F0.shape[0]:][ind_1])
                o3d.io.write_point_cloud("pc1_corr.ply", pc1)

                pc0.points = o3d.utility.Vector3dVector(xyz0[ind_0].numpy())
                pc0.colors = o3d.utility.Vector3dVector(colors[:F0.shape[0]][ind_0])
                pc0.transform(T_gt.numpy())
                o3d.io.write_point_cloud("pc0_trans_corr.ply", pc0)
                import pdb
                pdb.set_trace()

            #pc0.points = o3d.utility.Vector3dVector(xyz0_corr.numpy())
            # pc0.transform(T_gt.numpy())
            #o3d.io.write_point_cloud("xyz0_corr_trans.ply" , pc0)
#
            #pc0.points = o3d.utility.Vector3dVector(xyz1_corr.numpy())
            #o3d.io.write_point_cloud("xyz1_corr_trans.ply" , pc0)

            T_est = te.est_quad_linear_robust(xyz0_corr, xyz1_corr)

            loss = corr_dist(T_est, T_gt, xyz0, xyz1, weight=None)
            loss_meter.update(loss)

            rte = np.linalg.norm(T_est[:3, 3] - T_gt[:3, 3])
            rte_meter.update(rte)
            rre = np.arccos((np.trace(T_est[:3, :3].t() @ T_gt[:3, :3]) - 1) / 2)
            if not np.isnan(rre):
                rre_meter.update(rre)

            hit_ratio = self.evaluate_hit_ratio(xyz0_corr, xyz1_corr, T_gt, thresh=self.config.hit_ratio_thresh)
            hit_ratio_meter.update(hit_ratio)
            feat_match_ratio.update(hit_ratio > 0.05)
            matching_timer.toc()

            num_data += 1
            torch.cuda.empty_cache()

            if batch_idx % 100 == 0 and batch_idx > 0:
                logging.info(' '.join([
                    f"Validation iter {num_data} / {tot_num_data} : Data Loading Time: {data_timer.avg:.3f},",
                    f"Feature Extraction Time: {feat_timer.avg:.3f}, Matching Time: {matching_timer.avg:.3f},",
                    f"Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},",
                    f"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}"
                ]))
                data_timer.reset()

        logging.info(' '.join([
            f"Final Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},",
            f"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}"
        ]))
        return {
            "loss": loss_meter.avg,
            "rre": rre_meter.avg,
            "rte": rte_meter.avg,
            'feat_match_ratio': feat_match_ratio.avg,
            'hit_ratio': hit_ratio_meter.avg
        }