예제 #1
0
    def embed(self, force=False):
        self.tcn.switch_mode('eval')
        for root in poem(sorted(self.roots, reverse=True), "EMBEDDING"):
            for trial in poem(Tools.list_dirs(root), Tools.fname(root)):
                for pos_path in poem(Tools.list_dirs(trial),
                                     Tools.fname(trial)):
                    pos = pos_path.split('/')[-1]
                    dataset = EmbedderSet(root_dir=pos_path)
                    embeddings = {}
                    for X, paths in poem(dataset, Tools.fname(pos_path)):
                        if len(paths) > 0:
                            y = self._fwd(X)
                            if not np.isfinite(y).all():
                                pyout('before', paths)
                                sys.exit(0)

                            for ii, path in enumerate(paths):
                                embeddings[path.split('/')[-1]] = np.copy(
                                    y[ii, :]).tolist()
                    for key in embeddings:
                        if np.isnan(embeddings[key]).any():
                            pyout('after', pos_path, key)
                            sys.exit(0)

                    with open(os.path.join(trial, 'embed_' + pos + '.json'),
                              'w+') as f:
                        json.dump(embeddings, f, indent=1)
                    del dataset
                    del embeddings
 def alphapose(indir=None):
     ap = AlphaPose()
     for trial_path in Tools.list_dirs(indir):
         for pos_path in Tools.list_dirs(trial_path):
             ap.process_dir(
                 pos_path,
                 os.path.join(pos_path, '3d'))
 def __init__(self, root=None):
     self.POS = tuple([
         Tools.fname(f) for f in Tools.list_dirs(Tools.list_dirs(root)[0])
     ])
     Tools.debug(self.POS)
     if 'steady' in self.POS:
         self.POS = 'steady'
     elif 'middle' in self.POS:
         self.POS = 'middle'
     else:
         self.POS = self.POS[0]
     am = AlignMatrix(root)
     self.alignments = am.load()
예제 #4
0
 def __init__(self, device=None, state_dict_root=None, root=None):
     self.device = device
     self.POS = tuple([
         Tools.fname(f) for f in Tools.list_dirs(Tools.list_dirs(root)[0])
     ])
     self.VAE_DICT, self.EMB_SIZE = self._load_vae_dicts(root)
     am = AlignMatrix(root)
     self.alignments = am.load()
     self.VAE = [
         VAE(state_dict_path=os.path.join(state_dict_root, pos,
                                          'vae_mdl.pth')).to(device)
         for pos in poem(self.POS, "LOADING VAE MODELS")
     ]
     self.cv_hist, self.labels = self._init_hist((240, 240 * len(self.POS)),
                                                 root)
     self.lbl_dict = self._init_labels(root)
    def __init__(self,
                 root_dir=None,
                 pos=None,
                 batch_size=Config.TCN_BATCH,
                 input_size=(3, 224, 224),
                 output_size=(3, 224, 224),
                 augment=False):
        self.root = root_dir
        assert pos is not None
        self.batch_size = batch_size
        self.input_size = input_size
        self.output_size = output_size
        self.augment = augment

        frames = []

        for trial_folder in Tools.list_dirs(root_dir):
            for frame_pth in Tools.list_files(
                    os.path.join(trial_folder, pos), end='.jpg'):
                frames.append(frame_pth)

        if augment:
            random.shuffle(frames)

        self.batches = [[]]
        for frame in poem(frames, 'LOADING ' + Tools.fname(root_dir)):
            if len(self.batches[-1]) >= batch_size:
                self.batches.append([frame])
            else:
                self.batches[-1].append(frame)
    def __init__(self,
                 root_dir='./res/datasets/folding/train',
                 batch_size=Config.TCN_BATCH,
                 input_size=(3,) + Config.TCN_IMG_SIZE,
                 pos_range=Config.TCN_POS_RANGE,
                 negative_multiplier=Config.TCN_NEGATIVE_MULTIPLIER,
                 transform=None,  # unused
                 augment=False):

        self.root = root_dir
        self.batch_size = batch_size
        self.input_size = input_size
        self.pos_range = pos_range
        self.m = negative_multiplier
        self.augment = augment
        self.trial_names = []
        self.seeds = []  # use same seed when sampling eval
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.255])])

        for f in Tools.list_dirs(root_dir):
            if augment:
                self.trial_names.append(f)
            else:
                self.trial_names.append(f)
                self.seeds.append(random.randint(0, 9001))
    def wrist_dict(indir):
        datadir = indir.replace('kinect-recordings-3', 'grabbing')
        ou_dict = {}
        for trial_folder in Tools.list_dirs(indir):
            trial_name = trial_folder.split('/')[-1]

            D_l = Prep.init_joint_dict(os.path.join(
                datadir, trial_name, '0', '3d', 'alphapose-results.json'))
            D_m = Prep.init_joint_dict(os.path.join(
                datadir, trial_name, '1', '3d', 'alphapose-results.json'))
            D_r = Prep.init_joint_dict(os.path.join(
                datadir, trial_name, '2', '3d', 'alphapose-results.json'))

            ou_dict[os.path.join(trial_folder, 'color-recording-left.avi')] = \
                Prep.req_frames(D_l)
            ou_dict[os.path.join(
                trial_folder, 'color-recording-middle.avi')] = \
                Prep.req_frames(D_m)
            ou_dict[os.path.join(
                trial_folder, 'color-recording-right.avi')] = \
                Prep.req_frames(D_r)

        with open('/media/roblaundry/kinect-recordings-3/wrist_positions.json',
                  'w+') as f:
            json.dump(ou_dict, f, indent=2)
    def make_align_dict(self, root, trial_root, lock):
        dict_ = {}
        for anchor_root in Tools.list_dirs(root):
            if not trial_root == anchor_root:
                dict_[anchor_root] = self._align(trial_root, anchor_root, lock)

        with open(os.path.join(trial_root, 'alignment.json'), 'w+') as f:
            json.dump(dict_, f, indent=1)
예제 #9
0
    def load(self):
        alignment = {}
        for trial_root in Tools.tqdm_pbar(Tools.list_dirs(self.root),
                                          "LOADING ALIGNMENT DATA"):
            with open(os.path.join(trial_root, 'alignment.json'), 'r') as f:
                alignment[trial_root] = json.load(f)

        return alignment
예제 #10
0
    def make_align_dict(self, root, trial_root, lock):
        dict_ = {}
        self.root = root
        self.trial_root = trial_root
        for anchor_root in [
                f for f in Tools.list_dirs(root) if 'fake' not in f
        ]:
            self.anchor_root = anchor_root
            self.trial_root = trial_root
            if not trial_root == anchor_root:
                dict_[anchor_root] = self._align(trial_root, anchor_root, lock)

        with open(os.path.join(trial_root, 'alignment.json'), 'w+') as f:
            json.dump(dict_, f, indent=1)
예제 #11
0
 def _load_vae_dicts(self, root):
     vae_dict = {}
     emb_size = None
     for trial_folder in poem(Tools.list_dirs(root),
                              "LOADING VAE EMBEDDINGS"):
         vae_dict[trial_folder] = {}
         for pos in self.POS:
             vae_dict[trial_folder][pos] = {}
             with open(os.path.join(trial_folder, pos + '_vae.json'),
                       'r') as f:
                 data = json.load(f)
             for frm in list(data):
                 vae_dict[trial_folder][pos][frm] = data[frm]
                 if emb_size is None:
                     emb_size = len(data[frm])
     return vae_dict, emb_size
예제 #12
0
    def _construct_matrix(self, root):
        def align(trial_root, anchor_root):
            A = Aligner()
            path = A.align(trial_root, anchor_root)
            return (trial_root, anchor_root, path)

        pool = multiprocessing.Pool(Config.N_WORKERS)
        lock = multiprocessing.Manager().Lock()
        tasks = []
        for trial_root in Tools.list_dirs(root):
            tasks.append((root, trial_root, lock))

        # try:
        for _ in Tools.tqdm_pbar(pool.imap_unordered(self._align, tasks),
                                 "ALIGNING (multiprocessing pool)",
                                 total=len(tasks)):
            pass
예제 #13
0
    def _init_model(self, data_root, n_components, n_clusters):

        # Load embeddings
        trials_embeddings = []
        for trial_folder in Tools.tqdm_pbar(Tools.list_dirs(data_root),
                                            'LOAD TCN EMBEDDINGS'):
            trials_embeddings.append(self._load_embeddings(trial_folder))

        # compute distance matrix & perform dimensionality reduction
        dist_matrix = self._compute_dist_matrix(trials_embeddings)
        reduce_embd = self._reduce(dist_matrix, n_components=n_components)

        # do clustering
        labels = self._cluster(dist_matrix, n_clusters=n_clusters)

        # write results to csv
        self._write_csv(data_root, trials_embeddings, reduce_embd, labels)
 def unpack_videos(indir, oudir, devices):
     found_positive = False
     for iter_path in sorted(Tools.list_dirs(indir)):
         if True or all(Prep.video_length(v) >= 60
                        for v in Tools.list_files(iter_path)) and \
                 len(Tools.list_files(iter_path)) > 0:
             found_positive = True
             print('Unpacking', iter_path)
             iter_targ_path = os.path.join(
                 oudir, iter_path.split('/')[-1])
             Tools.makedirs(iter_targ_path, delete=True)
             for vid_path in Tools.list_files(iter_path):
                 if 'color' in vid_path:
                     if 'left' in vid_path:
                         Prep.unpack_video(
                             vid_path, os.path.join(iter_targ_path, '0'))
                     elif 'middle' in vid_path:
                         Prep.unpack_video(
                             vid_path, os.path.join(iter_targ_path, '1'))
                     elif 'right' in vid_path:
                         Prep.unpack_video(
                             vid_path, os.path.join(iter_targ_path, '2'))
                     else:
                         print('Error unpacking', vid_path)
                 elif 'depth' in vid_path:
                     if 'left' in vid_path:
                         Prep.unpack_video(
                             vid_path,
                             os.path.join(iter_targ_path, '0/3d'),
                             rm_outline=True)
                     elif 'middle' in vid_path:
                         Prep.unpack_video(
                             vid_path,
                             os.path.join(iter_targ_path, '1/3d'),
                             rm_outline=True)
                     elif 'right' in vid_path:
                         Prep.unpack_video(
                             vid_path,
                             os.path.join(iter_targ_path, '2/3d'),
                             rm_outline=True)
                     else:
                         print('Error unpacking', vid_path)
         else:
             print('Ignore', iter_path)
     return found_positive
    def copy(self, root, in_pos, ou_pos):
        """
            extract frames from video files

            Args:
                root: string - path to store video files
                in_pos: string - camera perspective (e.g. left, middle, right)
                ou_pos: string - name for camera perspective directory in
                                 dataset
        """
        for trial in Tools.tqdm_pbar(Tools.list_dirs(root), 'COPYING'):
            Tools.pyout(trial)
            fname = '_'.join(Tools.fname(trial).split('_')[:-1])
            Tools.makedirs(os.path.join(trial, ou_pos))

            vid_folder = self._find(fname)
            frames = []
            for frame in Tools.list_files(os.path.join(trial, in_pos)):
                frames.append(int(Tools.fname(frame).split('.')[0]))

            path = os.path.join(vid_folder,
                                'color-recording-' + ou_pos + '.avi')
            if not os.path.isfile(path):
                path = path.replace('.avi', '-x265.mp4')
            cap = cv2.VideoCapture(path)
            if not cap.isOpened():
                Tools.pyout("ERROR OPENING VideoCapture")
                raise FileNotFoundError('in copy(): "' + path + '"')

            ii = 0
            with tqdm(total=len(frames)) as pbar:
                while cap.isOpened():
                    ret, frame = cap.read()
                    if ret is True:
                        if ii in frames:
                            img = np.copy(frame)
                            cv2.imwrite(
                                os.path.join(trial, ou_pos,
                                             str(ii).zfill(5) + '.jpg'), img)
                            pbar.update(1)
                    else:
                        break
                    ii += 1
    def walk_dirs(self, ou_path):
        """
            find trials and store

            Args:
                ou_path: string - path to output file
        """
        recording_paths = []

        for f0 in Tools.tqdm_pbar(Tools.list_dirs(self.root), "GATHERING"):
            fname = Tools.fname(f0)
            if fname == '2017-11-09-to-encode':
                for f1 in Tools.list_dirs(f0):
                    for f2 in Tools.list_dirs(f1):
                        if self._valid_folder(f2):
                            if self._length(f2, encoded=False) >= 60:
                                recording_paths.append(f2)
            elif fname == '2017-11-06-still some parts to compress':
                for f1 in Tools.list_dirs(f0):
                    if self._valid_folder(f1):
                        if self._length(f1, encoded=False) >= 60:
                            recording_paths.append(f1)
            elif fname == 'toencode':
                for f1 in Tools.list_dirs(f0):
                    fname1 = Tools.fname(f1)
                    if "copy" not in fname1 and not fname1 == "encoded":
                        for f2 in Tools.list_dirs(f1):
                            if self._valid_folder(f2):
                                if self._length(f2, encoded=False) >= 60:
                                    recording_paths.append(f2)
            else:
                for f1 in Tools.list_dirs(f0):
                    if self._valid_folder(f1):
                        if self._length(f1) >= 60:
                            recording_paths.append(f1)

        with open(ou_path, 'w+') as f:
            f.write('\n'.join(recording_paths))
from src.labeling import Labeler
from src.utils.tools import Tools

IN_ROOT = ''

labeler = Labeler(npos=3,
                  labels=('isolated_grasping', 'unfold', 'flatten',
                          'folding_progress', 'stack'))

for trial in Tools.list_dirs(IN_ROOT):
    labeler.label_video(trial)
    def _paste(self):
        root_dir = self.dataset.root.replace('datasets', 'plots')
        for trial_pth in Tools.list_dirs(root_dir):
            l_pths = sorted(Tools.list_files(os.path.join(trial_pth, '0')))
            m_pths = sorted(Tools.list_files(os.path.join(trial_pth, '1')))
            r_pths = sorted(Tools.list_files(os.path.join(trial_pth, '2')))

            minlen = min(len(l_pths), len(m_pths), len(r_pths))
            while len(l_pths) > minlen:
                l_pths.pop(-1)
            while len(m_pths) > minlen:
                m_pths.pop(-1)
            while len(r_pths) > minlen:
                r_pths.pop(-1)

            for L, M, R in zip(l_pths, m_pths, r_pths):
                plot_l = cv2.imread(L, 1)
                plot_m = cv2.imread(M, 1)
                plot_r = cv2.imread(R, 1)

                fram_l = cv2.imread(L.replace('plots', 'datasets'), 1)
                fram_m = cv2.imread(M.replace('plots', 'datasets'), 1)
                fram_r = cv2.imread(R.replace('plots', 'datasets'), 1)

                bc = cv2.BORDER_CONSTANT
                white = [255, 255, 255]
                fram_l = cv2.copyMakeBorder(fram_l,
                                            82,
                                            83,
                                            162,
                                            163,
                                            bc,
                                            value=white)
                fram_m = cv2.copyMakeBorder(fram_m,
                                            82,
                                            83,
                                            162,
                                            163,
                                            bc,
                                            value=white)
                fram_r = cv2.copyMakeBorder(fram_r,
                                            82,
                                            83,
                                            162,
                                            163,
                                            bc,
                                            value=white)

                out = cv2.copyMakeBorder(fram_l,
                                         0,
                                         480,
                                         0,
                                         1280,
                                         bc,
                                         value=white)

                for c in range(3):
                    out[0:480, 640:1280, c] = fram_m[:, :, c]
                    out[0:480, 1280:1920, c] = fram_r[:, :, c]
                    out[480:960, 0:640, c] = plot_l[:, :, c]
                    out[480:960, 640:1280, c] = plot_m[:, :, c]
                    out[480:960, 1280:1920, c] = plot_r[:, :, c]
                cv2.imwrite(L.replace(os.path.join(trial_pth, '0'), trial_pth),
                            out)
            shutil.rmtree(os.path.join(trial_pth, '0'))
            shutil.rmtree(os.path.join(trial_pth, '1'))
            shutil.rmtree(os.path.join(trial_pth, '2'))
예제 #19
0
tasks = []
with open('fileslist.txt', 'r') as f:
    for line in f:
        tasks.append(line.replace('\n', ''))
for task in Tools.tqdm_pbar(tasks, description="SEGMENTING", total=len(tasks)):
    segmenter.segment_video(task, OU_ROOT)
os.remove('fileslist.txt')

# EXTRACT SELECTED FRAMES FROM VIDEOS
copyer = Copyer()
copyer.copy(OU_ROOT, 'left', 'middle')
copyer.copy(OU_ROOT, 'left', 'right')
copyer.copy(OU_ROOT, 'middle', 'left')

# BGR->RGB, RESIZE, AND CROP IMAGES
for trial in tqdm(Tools.list_dirs(IN_ROOT)):
    for pos in Tools.list_dirs(trial):
        for frame_pth in Tools.list_files(pos, end='.jpg'):
            if RGB:
                img = cv2.imread(frame_pth)
            else:
                img = cv2.imread(frame_pth)[:, :, ::-1]
            if (img.shape[0] == 368 and img.shape[1] == 368):
                pass
            else:
                img = cv2.resize(img, (552, 368))
                if 'left' in pos:
                    D = -398
                    img = img[:, D:D + 368, :]
                if 'middle' in pos:
                    D = 100
    def __getitem__(self, idx):
        # if val set, use same random seed
        if not self.augment:
            random.seed(self.seeds[idx])
        trial_folder = self.trial_names[idx]
        if 'fake' in trial_folder:
            return (None, None, None, trial_folder)

        X = np.zeros((self.batch_size,) + self.input_size)
        labels = np.zeros((self.batch_size))
        perspectives = np.zeros((self.batch_size,))
        paths = []
        frames_used = [-float("inf")]
        n = 0
        fails = 0
        while n < self.batch_size // 2:
            # sample two perspectives
            samples_pos = random.sample(Tools.list_dirs(trial_folder), 2)

            # sample anchor frame
            a_val, a_pth, a_idx = self._sample_frame(
                samples_pos[0], frames_used)
            # sample positive frame
            p_val, p_pth, p_idx = self._sample_frame(
                samples_pos[1], frames_used, anchor_idx=a_idx)

            # deal with failing to find a valid pair
            if not a_val or not p_val:
                fails += 1
                if fails > self.batch_size:  # give up
                    break
            else:
                # add anchor frame to batch
                paths.append(a_pth)
                img_a = Transformer.transform(
                    cv2.imread(a_pth), BGR=False)
                X[n * 2, :, :,
                    :] = self.transform(Image.fromarray(img_a)).numpy()
                labels[n * 2] = n
                perspectives[n * 2] = self.pos2num[a_pth.split('/')[-2]]

                # add positive frame to batch
                paths.append(p_pth)
                img_p = Transformer.transform(
                    cv2.imread(p_pth), BGR=False)
                X[n * 2 + 1, :, :,
                    :] = self.transform(Image.fromarray(img_p)).numpy()
                labels[n * 2 + 1] = n
                perspectives[n * 2 + 1] = self.pos2num[p_pth.split('/')[-2]]

                n += 1

        # if batch is not entirely full, cut off zero padding
        X = X[:n * 2, :, :, :]
        labels = labels[:n * 2]
        perspectives = perspectives[:n * 2]
        if X.shape[0] == 0:
            return (None, None, None, trial_folder)
        else:
            X = torch.FloatTensor(X)
            labels = torch.FloatTensor(labels)
            perspectives = torch.FloatTensor(perspectives)

            assert not Tools.contains_nan(X)
            assert not Tools.contains_nan(labels)
            assert not Tools.contains_nan(perspectives)

            return (X, labels, perspectives, paths)
import cv2
import os
import shutil

from src.utils.tools import Tools
from src.segmenter_grasping import SegmenterGrasping

IN_ROOT = ''
OU_ROOT = ''
RGB = False  # whether videos are in RGB (true) format or BGR (false) format

# MANUALLY SEGMENT RECORDINGS
s = SegmenterGrasping()
for trial in Tools.tqdm_pbar(Tools.list_dirs(IN_ROOT),
                             description="SEGMENTING"):
    s.segment(trial, os.path.join(OU_ROOT, Tools.fname(trial)))

# COPY ACTUAL FRAMES TO OU_ROOT
for trial in Tools.tqdm_pbar(Tools.list_dirs(OU_ROOT),
                             description="MOVING FRAMES"):
    for pos in Tools.list_dirs(trial):
        with open(os.path.join(pos, 'frames.txt'), 'r') as f:
            for path in f:
                shutil.copy(
                    path.replace('\n', ''),
                    os.path.join(pos, Tools.fname(path.replace('\n', ''))))
        os.remove(os.path.join(pos, 'frames.txt'))

# BGR->RGB, RESIZE, AND CROP IMAGES
for trial in Tools.tqdm_pbar(Tools.list_dirs(IN_ROOT), description="CROPPING"):
    for pos in Tools.list_dirs(trial):
예제 #22
0
    def visualize(self, in_folder, ou_folder):
        try:
            os.makedirs(ou_folder)
        except FileExistsError:
            pass
        # Tools.pyout(ou_folder)
        N_perspectives = len(Tools.list_dirs(in_folder))
        folder_name = in_folder.split('/')[-1]

        # define codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(os.path.join(
            ou_folder, in_folder.split('/')[-1] + '.mp4'),
            fourcc, 16, (240 * N_perspectives, 480))
        Tools.pyout(os.path.join(
            ou_folder, in_folder.split('/')[-1] + '.mp4'))

        # init frame
        main_frm = np.zeros((480, 240 * N_perspectives, 3), dtype=np.uint8)

        # init plots
        plots = self._init_plots(N_perspectives)

        # load embedding dicts
        dicts = self._load_embedding_dicts(in_folder)

        # loop over all frames
        frames_exec = (
            'sorted(list(' +
            ' | '.join('set(dicts[{}])'.format(ii)
                       for ii in range(len(dicts))) +
            '))')
        frames = eval(frames_exec)
        max_frm = int(frames[-1].split('.')[0])
        min_frm = int(frames[0].split('.')[0])

        for frame in poem(frames, folder_name):
            for ii, pos in enumerate(sorted(Tools.list_dirs(in_folder))):
                try:
                    dic = dicts[ii]
                    side_str = pos.split('/')[-1]
                    lpx = 240 * (ii)
                    rpx = 240 * (ii + 1)
                    plot = plots[ii]

                    # add frame from perspective to main frame
                    frame_img = cv2.imread(
                        os.path.join(in_folder, side_str, frame))
                    frame_img = cv2.resize(frame_img, (240, 240))
                    main_frm[0:240, lpx:rpx, :] = np.copy(
                        frame_img[:, :, :3])

                    # get dimred from TCN embedding of frame
                    embedding = dic[frame]
                    with warnings.catch_warnings():
                        warnings.simplefilter("ignore")
                        projection = tuple(
                            self.transform(self.reduction_model.transform(
                                np.array([embedding])).squeeze()))

                    # plot embedding in plot frame
                    plt_img = plot.plot(projection, prog=(int(
                        frame.split('.')[0]) - min_frm) / (max_frm - min_frm))

                    # add plot to main frame
                    main_frm[240:480, lpx:rpx, :] = np.copy(
                        plt_img[:, :, :3])
                except cv2.error:
                    pass

            # Tools.render(main_frm, ascii=True)

            writer.write(main_frm)
        writer.release()
예제 #23
0
            raise e
            sys.exit(1)

    plot = None
    if V_RED_FIT:
        Tools.pyout("V_PCA_FIT")
        plot = UMAPPlot(data_root=os.path.join(root, 'val'), n_components=2)
        plot.save_model(os.path.join('./res/models', save_root, 'reduce'))

    if VIZ_EMBED:
        Tools.pyout("VIZ_EMBED")
        OUTPUT_FOLDER = os.path.join(root, 'results/embeddings')
        if plot is None:
            plot = UMAPPlot(joblib_path=os.path.join(
                './res/models', save_root, 'reduce/reduction.joblib'))
        for trial_folder in poem(Tools.list_dirs(os.path.join(root, 'val')),
                                 "PLOTTING TCN EMBEDDINGS"):
            plot.visualize(trial_folder, OUTPUT_FOLDER)
        Tools.ffmpeg(OUTPUT_FOLDER)

    if TEMP_SEGM:
        Tools.pyout("TEMP_SEGM")
        OUTPUT_FOLDER = os.path.join(root, 'results/WT')
        if 'manual_folding' in root:
            n_clusters = 6
        elif 'pouring' in root:
            n_clusters = 5
        else:
            n_clusters = 3
        clusterer = WTCluster(joblib_path=os.path.join(
            './res/models', save_root, 'reduce/reduction.joblib'),
예제 #24
0
 def _load_matrix(self, root):
     for trial_root in Tools.tqdm_pbar(Tools.list_dirs(root),
                                       "LOADING ALIGNMENT DATA"):
         pass