コード例 #1
0
def main(unused_argv):
    for modality in FLAGS.modality:
        if modality == "CT":
            num_class = dataset_infos._ISBI_CHAOS_INFORMATION_CT.num_classes
        elif modality == "MR_T1":
            num_class = dataset_infos._ISBI_CHAOS_INFORMATION_MR_T1.num_classes
        elif modality == "MR_T2":
            num_class = dataset_infos._ISBI_CHAOS_INFORMATION_MR_T2.num_classes
        else:
            raise ValueError("Unknown data modality.")

        for sub in FLAGS.num_subject:
            for slices in FLAGS.prior_slice:
                prior_generator = build_chaos_prior(
                    output_dir=FLAGS.output_dir,
                    num_subject=sub,
                    num_slice=slices,
                    num_class=num_class,
                    modality=modality,
                    save_prior_in_npy=FLAGS.save_prior_in_npy,
                    save_prior_in_img=FLAGS.save_prior_in_images)
                data_dir = os.path.join(FLAGS.data_dir, "Train_Sets",
                                        modality.split("_")[0])
                file_list = file_utils.get_file_list(data_dir, None, None)
                _ = prior_generator(file_list)
コード例 #2
0
def _get_files(data_path, modality, img_or_label):
    """Gets files for the specified data type and dataset split.
  Args:
    data: String, desired data ('image' or 'label').
    dataset_split: String, dataset split ('train', 'val', 'test')
  Returns:
    A list of sorted file names or None when getting label for
      test set.
  """
    if "CT" in modality:
        subject_path = os.path.join(data_path, _FOLDERS_MAP[img_or_label])
    elif "MR" in modality:
        subject_path = os.path.join(data_path, _MODALITY_MAP[modality][1],
                                    _FOLDERS_MAP[img_or_label])
        if "MR_T1" in modality and _FOLDERS_MAP[img_or_label] == _FOLDERS_MAP[
                "image"]:
            subject_path = os.path.join(subject_path,
                                        _MODALITY_MAP[modality][2])
    else:
        raise ValueError("Unknown data modality")

    filenames = file_utils.get_file_list(
        subject_path,
        fileStr=_POSTFIX_MAP[modality][img_or_label],
        fileExt=[_DATA_FORMAT_MAP[img_or_label]],
        sort_files=True)
    return filenames
コード例 #3
0
 def load_path_list(self):
     for obs_idx in range(self.st_idx, self.end_idx):
         path_root = join(self.data_root, "e{}".format(obs_idx))
         path_file_list = get_file_list(path_root)
         for path_file in path_file_list:
             path = np.fromfile(path_file)
             path = path.reshape(-1, self.dim) # n, (x, y)
             self.obs2path[obs_idx].append(path)
コード例 #4
0
 def __init__(self, root, is_val=False, is_test=False):
     super(ObstacleDataSet, self).__init__()
     if is_val:
         self.data_root = root + '/val'
     elif is_test:
         self.data_root = root + '/test'
     else:
         self.data_root = root + '/train'
     self.file_names = get_file_list(self.data_root)
     self.file_names.sort()
コード例 #5
0
def main(unused_argv):
    for sub in FLAGS.num_subject:
        for slices in FLAGS.prior_slice:
            prior_generator = build_miccai_prior(
                output_dir=FLAGS.output_dir,
                num_subject=sub,
                num_slice=slices,
                num_class=dataset_infos._MICCAI_ABDOMINAL_INFORMATION.
                num_classes,
                save_prior_in_npy=FLAGS.save_prior_in_npy,
                save_prior_in_img=FLAGS.save_prior_in_images)
            data_dir = os.path.join(FLAGS.data_dir, "Train_Sets", "label/")
            file_list = file_utils.get_file_list(path=data_dir,
                                                 fileStr=None,
                                                 fileExt=None)
            _ = prior_generator(file_list)
コード例 #6
0
def main():
    mpnet_data_root = "./dataset"
    mpnet_data_root = "/home/raeyo/dev_tools/MotionPlanning/MPNet/MPNetDataset/S2D/dataset"
    cloud_data_path = join(mpnet_data_root, "obs_cloud")
    obc_file_list = []
    for i in range(10):
        target_path = join(cloud_data_path, "obc{}.dat".format(100 + i))
        obc_file_list.append(target_path)
    path_file_list = []
    for i in range(10):
        target_path = join(mpnet_data_root, "e{}".format(100 + i))
        path_list = get_file_list(target_path)
        path_file_list.append(path_list)
    
    cae_weight = "pretrained_weights/cae_weight.tar"
    mlp_weight = "pretrained_weights/mlp_weight_495.tar"
    planner = MPNetPlanner(cae_weight, mlp_weight)
    
    for i in range(100):
        target_idx = random.randint(0,9)
        target_obc = obc_file_list[target_idx]
        target_path = random.choice(path_file_list[target_idx])
        env = MPNetSimple2D(obc_file=target_obc, path_file=target_path)
        planner.reset(env)
        env.set_decoder_view(planner.obs_dec)
        # for j in range(100):
        #     next_config = planner.get_next_config()
        #     env.visualize_with_decodedview(next_config)
        #     planner.update_current_config(next_config)
        #     if planner.is_reaching_target():
        #         break
        # if planner.is_reaching_target():
        #     print("Success to reach target")
        # else:
        #     print("Fail to reach target")
        

        path = planner.planning()
    
        if path is not None:
            for conf in path:
                env.visualize_with_decodedview(conf)
            print("Success to reach target")
        else:
            print("Fail to reach target")
コード例 #7
0
def _get_files(data, data_dir, dataset_split, train_split_indices=None):
    """Gets files for the specified data type and dataset split.
  """
    filenames = file_utils.get_file_list(os.path.join(
        data_dir, _SPLIT_MAP[dataset_split], _FOLDERS_MAP[data]),
                                         fileStr=[_POSTFIX_MAP[data]],
                                         fileExt=["nii.gz"],
                                         sort_files=True)

    if train_split_indices is not None:
        if max(train_split_indices) > len(filenames):
            raise ValueError("Out of Range")

        if dataset_split == "train":
            split_indices = train_split_indices
        elif dataset_split == "val":
            split_indices = list(
                set(range(len(filenames))) - set(train_split_indices))
        filenames = [filenames[idx] for idx in split_indices]
    return filenames
コード例 #8
0
    def __init__(self, root, encoder, is_val=False):
        super(PathDataSet, self).__init__()
        
        self.data_root = join(root, "dataset")
        if is_val:
            self.st_idx = 100
            self.end_idx = self.st_idx + 10
        else:
            self.st_idx = 0
            self.end_idx = self.st_idx + 100
        self.encoder = encoder # pretrained
        self.dim = 2

        self.obs2path = {}
        self.obs_representation = self.get_obs_representation()
        self.load_path_list()

        self.dataset = self.create_dataset()
        
        self.file_names = get_file_list(self.data_root)
        self.file_names.sort()
def prepare_data(start: int,
                 end: int,
                 chunk_dirs: List[str] = None,
                 gpu='0',
                 args: Dict[str, any] = None) -> None:
    df = read_labels(args.data_dir, chunk_dirs=chunk_dirs, label=args.label)
    device = torch.device('cuda:{}'.format(gpu))
    cfg = detector_cfg(args)
    detector = init_detector(cfg, args.det_weights, device).to(device)
    detect_fn = partial(detect, model=detector, cfg=cfg, device=device)
    file_list_path = '{}_{}'.format(args.file_list_path, gpu)
    tasks = []

    def save(images: List[np.ndarray], idx: int, t0: int,
             pipe_name: str) -> int:
        meta = df.iloc[idx]
        dir_path = os.path.join(args.save_dir, meta.dir)
        file_name = '%s_%d' % (meta.name[:-4], int(meta.label))
        if len(images) > 0:
            task = subproc.submit(dump_to_disk,
                                  images,
                                  dir_path,
                                  file_name,
                                  args.img_format,
                                  scale=args.img_scale,
                                  pack=args.pack,
                                  lossy=args.lossy)
            tasks.append(task)
        else:
            print('No frames found %s/%s.mp4' % (dir_path, file_name))
        if args.verbose:
            t1 = time.time()
            print('%s | %4s| %6d| %.02f s| %s/%s' %
                  (str(device), pipe_name, idx, t1 - t0, meta.dir, meta.name))
        return t1

    def maybe_wait(tasks: List[fut.Future]) -> List[fut.Future]:
        nw = args.num_workers
        if len(tasks) > args.task_queue_depth * nw:
            old_tasks, new_tasks = tasks[:-nw], tasks[-nw:]
            fut.wait(old_tasks)
            for task in old_tasks:
                _ = task.result()
            return new_tasks
        else:
            return tasks

    with fut.ProcessPoolExecutor(args.num_workers) as subproc:
        for offset in range(start, end, args.max_open_files):
            last = min(offset + args.max_open_files, end)
            files = get_file_list(df, offset, last, args.data_dir)
            print('{} | parsing meta for {} files'.format(device, len(files)))
            files_meta = parse_meta(files)
            min_unique_res_freq = int(len(files) * 0.02)
            splits, size_factors = split_files_by_res(files_meta,
                                                      min_unique_res_freq)
            if not len(files):
                print('No files was read by {}'.format(device))
                break
            handled_files = np.zeros(len(files), dtype=np.bool)

            for s, mask in enumerate(splits):
                write_file_list(files, path=file_list_path, mask=mask)
                seq_len = int(args.num_frames / args.num_pass /
                              size_factors[s])
                pipe = VideoPipe(file_list_path,
                                 seq_len=seq_len,
                                 stride=args.stride,
                                 device_id=int(gpu))
                pipe.build()
                num_samples_read = pipe.epoch_size('reader')

                if num_samples_read > 0:
                    data_iter = DALIGenericIterator([pipe],
                                                    ['frames', 'label'],
                                                    num_samples_read,
                                                    dynamic_shape=True)
                    if args.verbose:
                        t0 = time.time()
                    prev_idx = None
                    faces = []

                    for video_batch in data_iter:
                        frames = video_batch[0]['frames'].squeeze(0)
                        read_idx = video_batch[0]['label'].item()
                        new_faces = find_faces(frames, detect_fn,
                                               args.max_face_num_thresh)
                        del video_batch, frames

                        if prev_idx is None or prev_idx == read_idx:
                            faces += new_faces
                        else:
                            t0 = save(faces, offset + prev_idx, t0, 'dali')
                            faces = new_faces
                        prev_idx = read_idx
                        handled_files[read_idx] = True
                        tasks = maybe_wait(tasks)
                    # save last video
                    save(faces, offset + read_idx, t0, 'dali')

                del pipe, data_iter
                gc.collect()

            unhandled_files = (~handled_files).nonzero()[0]
            num_bad_samples = len(unhandled_files)
            if num_bad_samples > 0:
                print('Unable to parse %d videos with DALI\n'
                      'Running fallback decoding through OpenCV...' %
                      num_bad_samples)
                for idx in unhandled_files:
                    if args.verbose:
                        t0 = time.time()
                    frames = read_frames_cv2(files[idx], args.num_frames)
                    if frames is not None:
                        faces = find_faces(frames, detect_fn,
                                           args.max_face_num_thresh)
                        t0 = save(faces, offset + idx, t0, 'cv2')
                    tasks = maybe_wait(tasks)
    print('{}: DONE'.format(device))
コード例 #10
0
 def load_data(path):
     file_list = file_utils.get_file_list(path)
     data = []
     for f in file_list:
         data.append(file_utils.read_medical_images(f))
     return np.stack(data, axis=2)