예제 #1
0
    def embed(self, force=False):
        self.tcn.switch_mode('eval')
        for root in poem(sorted(self.roots, reverse=True), "EMBEDDING"):
            for trial in poem(Tools.list_dirs(root), Tools.fname(root)):
                for pos_path in poem(Tools.list_dirs(trial),
                                     Tools.fname(trial)):
                    pos = pos_path.split('/')[-1]
                    dataset = EmbedderSet(root_dir=pos_path)
                    embeddings = {}
                    for X, paths in poem(dataset, Tools.fname(pos_path)):
                        if len(paths) > 0:
                            y = self._fwd(X)
                            if not np.isfinite(y).all():
                                pyout('before', paths)
                                sys.exit(0)

                            for ii, path in enumerate(paths):
                                embeddings[path.split('/')[-1]] = np.copy(
                                    y[ii, :]).tolist()
                    for key in embeddings:
                        if np.isnan(embeddings[key]).any():
                            pyout('after', pos_path, key)
                            sys.exit(0)

                    with open(os.path.join(trial, 'embed_' + pos + '.json'),
                              'w+') as f:
                        json.dump(embeddings, f, indent=1)
                    del dataset
                    del embeddings
    def __init__(self,
                 root_dir=None,
                 pos=None,
                 batch_size=Config.TCN_BATCH,
                 input_size=(3, 224, 224),
                 output_size=(3, 224, 224),
                 augment=False):
        self.root = root_dir
        assert pos is not None
        self.batch_size = batch_size
        self.input_size = input_size
        self.output_size = output_size
        self.augment = augment

        frames = []

        for trial_folder in Tools.list_dirs(root_dir):
            for frame_pth in Tools.list_files(
                    os.path.join(trial_folder, pos), end='.jpg'):
                frames.append(frame_pth)

        if augment:
            random.shuffle(frames)

        self.batches = [[]]
        for frame in poem(frames, 'LOADING ' + Tools.fname(root_dir)):
            if len(self.batches[-1]) >= batch_size:
                self.batches.append([frame])
            else:
                self.batches[-1].append(frame)
예제 #3
0
    def _load_embedding_dicts(self, folder):
        try:
            out = []
            for json_filename in sorted(
                    Tools.list_files(folder, end='.json', substr='embed_')):
                pos = Tools.fname(json_filename).replace('embed_', '').replace(
                    '.json', '')
                root_folder = os.path.join(folder, pos)

                with open(json_filename, 'r') as f:
                    D = json.load(f)
                X = np.zeros((len(D), len(D[list(D)[0]])), dtype=float)
                frames = sorted(list(D))
                for ii, frame in enumerate(frames):
                    X[ii, :] = np.array(D[frame])

                out.append((root_folder, frames, X, self._reduce(X)))

            if self.averaged:
                X_avg = np.stack([pos_data[2] for pos_data in out], axis=0)
                X_avg = np.mean(X_avg, axis=0)
                for ii, (fldr, frms, _, rdcd) in enumerate(out):
                    out[ii] = (fldr, frms, X_avg, rdcd)
            return out
        except Exception as e:
            Tools.debug(e)
            Tools.debug(folder, ex=0)
    def _load_images(self, path):
        """
            read images into working memoryk

            Args:
                path: string - input path

            Returns:
                imgs: list(np.array) - list of frames from 'left' perspective
                N:    int            - number of frames
        """
        N = len(Tools.list_files(os.path.join(path, 'left')))
        imgs = np.zeros((N, 240, 360, 3), dtype=np.uint8)

        load_path = os.path.join(path, 'left')
        for fii, frm_pth in Tools.tqdm_pbar(enumerate(
                Tools.list_files(load_path)),
                                            path,
                                            total=N):
            if self.zfill_n is None:
                self.zfill_n = len(Tools.fname(frm_pth).split('.')[0])

            img = cv2.imread(frm_pth)
            img = cv2.resize(img, (360, 240))
            imgs[fii, :, :, :] = np.copy(img[:, :, :])
        return imgs, N
    def copy(self, root, in_pos, ou_pos):
        """
            extract frames from video files

            Args:
                root: string - path to store video files
                in_pos: string - camera perspective (e.g. left, middle, right)
                ou_pos: string - name for camera perspective directory in
                                 dataset
        """
        for trial in Tools.tqdm_pbar(Tools.list_dirs(root), 'COPYING'):
            Tools.pyout(trial)
            fname = '_'.join(Tools.fname(trial).split('_')[:-1])
            Tools.makedirs(os.path.join(trial, ou_pos))

            vid_folder = self._find(fname)
            frames = []
            for frame in Tools.list_files(os.path.join(trial, in_pos)):
                frames.append(int(Tools.fname(frame).split('.')[0]))

            path = os.path.join(vid_folder,
                                'color-recording-' + ou_pos + '.avi')
            if not os.path.isfile(path):
                path = path.replace('.avi', '-x265.mp4')
            cap = cv2.VideoCapture(path)
            if not cap.isOpened():
                Tools.pyout("ERROR OPENING VideoCapture")
                raise FileNotFoundError('in copy(): "' + path + '"')

            ii = 0
            with tqdm(total=len(frames)) as pbar:
                while cap.isOpened():
                    ret, frame = cap.read()
                    if ret is True:
                        if ii in frames:
                            img = np.copy(frame)
                            cv2.imwrite(
                                os.path.join(trial, ou_pos,
                                             str(ii).zfill(5) + '.jpg'), img)
                            pbar.update(1)
                    else:
                        break
                    ii += 1
    def visualize(self, in_folder, ou_folder):
        Tools.makedirs(ou_folder)

        nn = self._nearest_neighbor(in_folder)

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(
            os.path.join(ou_folder,
                         in_folder.split('/')[-1] + '.mp4'), fourcc, 16,
            (480, 480))

        frames = [
            Tools.fname(f)
            for f in Tools.list_files(os.path.join(in_folder, self.POS),
                                      end='.jpg')
        ]
        cv_plot = CVLine((240, 480), minx=0, maxx=len(frames), miny=0, maxy=1)

        main_frm = np.zeros((480, 480, 3), dtype=np.uint8)
        for fii, frame in Tools.tqdm_pbar(enumerate(frames),
                                          Tools.fname(in_folder),
                                          total=len(frames)):
            frame = cv2.imread(os.path.join(in_folder, self.POS, frame))
            try:
                ancho = cv2.imread(
                    os.path.join(nn, self.POS,
                                 self.alignments[in_folder][nn][fii][0]))
            except Exception as e:
                Tools.debug(self.alignments[in_folder][nn])
                Tools.debug(e, ex=0)

            frame = cv2.resize(frame, (240, 240))
            ancho = cv2.resize(ancho, (240, 240))

            main_frm[:240, :240, :] = frame[:, :, :]
            main_frm[:240, 240:, :] = ancho[:, :, :]

            R = self._reward_score(in_folder, fii)
            main_frm[240:, :, :] = cv_plot.plot((fii, R))

            writer.write(main_frm)
        writer.release()
    def _reward_score(self, trial, index):
        cum_gain = 0.
        cum_weight = 0.
        for match_trial in self.alignments[trial]:
            match_frm, weight = self.alignments[trial][match_trial][index]
            match_nmr = int(''.join(filter(str.isdigit, match_frm)))
            try:
                match_min = min(
                    int(''.join(filter(str.isdigit, Tools.fname(f))))
                    for f in Tools.list_files(
                        os.path.join(match_trial, self.POS), end='.jpg'))
                match_max = max(
                    int(''.join(filter(str.isdigit, Tools.fname(f))))
                    for f in Tools.list_files(
                        os.path.join(match_trial, self.POS), end='.jpg'))
            except Exception as e:
                Tools.debug(os.path.join(Tools.pathstr(match_trial), self.POS))
                Tools.debug(e, ex=0)

            cum_gain += (match_nmr - match_min) / (match_max - match_min)
            cum_weight += 1
        return cum_gain / cum_weight
 def __init__(self, root=None):
     self.POS = tuple([
         Tools.fname(f) for f in Tools.list_dirs(Tools.list_dirs(root)[0])
     ])
     Tools.debug(self.POS)
     if 'steady' in self.POS:
         self.POS = 'steady'
     elif 'middle' in self.POS:
         self.POS = 'middle'
     else:
         self.POS = self.POS[0]
     am = AlignMatrix(root)
     self.alignments = am.load()
    def walk_dirs(self, ou_path):
        """
            find trials and store

            Args:
                ou_path: string - path to output file
        """
        recording_paths = []

        for f0 in Tools.tqdm_pbar(Tools.list_dirs(self.root), "GATHERING"):
            fname = Tools.fname(f0)
            if fname == '2017-11-09-to-encode':
                for f1 in Tools.list_dirs(f0):
                    for f2 in Tools.list_dirs(f1):
                        if self._valid_folder(f2):
                            if self._length(f2, encoded=False) >= 60:
                                recording_paths.append(f2)
            elif fname == '2017-11-06-still some parts to compress':
                for f1 in Tools.list_dirs(f0):
                    if self._valid_folder(f1):
                        if self._length(f1, encoded=False) >= 60:
                            recording_paths.append(f1)
            elif fname == 'toencode':
                for f1 in Tools.list_dirs(f0):
                    fname1 = Tools.fname(f1)
                    if "copy" not in fname1 and not fname1 == "encoded":
                        for f2 in Tools.list_dirs(f1):
                            if self._valid_folder(f2):
                                if self._length(f2, encoded=False) >= 60:
                                    recording_paths.append(f2)
            else:
                for f1 in Tools.list_dirs(f0):
                    if self._valid_folder(f1):
                        if self._length(f1) >= 60:
                            recording_paths.append(f1)

        with open(ou_path, 'w+') as f:
            f.write('\n'.join(recording_paths))
예제 #10
0
 def __init__(self, device=None, state_dict_root=None, root=None):
     self.device = device
     self.POS = tuple([
         Tools.fname(f) for f in Tools.list_dirs(Tools.list_dirs(root)[0])
     ])
     self.VAE_DICT, self.EMB_SIZE = self._load_vae_dicts(root)
     am = AlignMatrix(root)
     self.alignments = am.load()
     self.VAE = [
         VAE(state_dict_path=os.path.join(state_dict_root, pos,
                                          'vae_mdl.pth')).to(device)
         for pos in poem(self.POS, "LOADING VAE MODELS")
     ]
     self.cv_hist, self.labels = self._init_hist((240, 240 * len(self.POS)),
                                                 root)
     self.lbl_dict = self._init_labels(root)
예제 #11
0
    def _write_csv(self, data_root, trials_embeddings, reduce_embd, labels):
        Tools.pyout("WRITING CSV FILE")
        SAVE_ROOT = '/media/roblaundry/' + \
            data_root.split('/')[-2] + '/results/BT'
        Tools.makedirs(SAVE_ROOT)
        with open(os.path.join(SAVE_ROOT, 'data.csv'), 'w+') as csv_file:
            fieldnames = ['trial_name', 'x', 'y', 'c']
            writer = csv.DictWriter(csv_file, fieldnames=fieldnames)

            writer.writeheader()
            for ii, embedding in enumerate(trials_embeddings):
                writer.writerow({
                    'trial_name': Tools.fname(embedding[0]),
                    'x': reduce_embd[ii][0],
                    'y': reduce_embd[ii][1],
                    'c': labels[ii].replace(',', ';')
                })
        Tools.pyout("----------> DONE")
예제 #12
0
    def visualize(self, in_folder, ou_folder):
        Tools.makedirs(ou_folder)
        data = self._load_embedding_dicts(in_folder)
        main_frm = np.zeros((480, len(data) * 240, 3), dtype=np.uint8)
        N = data[0][2].shape[0]

        # define codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(
            os.path.join(ou_folder,
                         in_folder.split('/')[-1] + '.mp4'), fourcc, 16,
            (240 * len(data), 480))
        Tools.pyout(os.path.join(ou_folder, in_folder.split('/')[-1] + '.mp4'))

        for ii, pos_data in enumerate(data):
            data[ii] = self._cluster(*pos_data)

        for n in poem(range(N), Tools.fname(in_folder)):
            for ii, pos_data in enumerate(data):
                fldr, frms, embd, rdcd, lbls, plot = pos_data

                # load frame image
                img_pth = os.path.join(fldr, frms[n])
                frame = cv2.imread(img_pth)
                frame = cv2.resize(frame, (240, 240)).astype(np.uint8)

                # draw cluster color in frame
                color = plot._prog2color(lbls[n])
                cv2.rectangle(frame, (0, 0), (frame.shape[1], frame.shape[0]),
                              color, 10)

                # draw embeddings
                plt_img = plot.plot(rdcd[n], color=color, scope=False)

                # add subfigures to frame
                main_frm[0:240, ii * 240:(ii + 1) * 240, :] = frame[:, :, :]
                main_frm[240:480, ii * 240:(ii + 1) * 240, :] = \
                    plt_img[:, :, :]
            # Tools.render(main_frm, waitkey=50, name="SEGMENTATION")
            writer.write(main_frm)
        writer.release()
    def embed(self):
        dict_ = None
        folder = None

        with torch.no_grad():
            for ii in poem(range(len(self.dataset)),
                           "STORING EMBEDDINGS " + self.pos):
                X, _, paths = self.dataset[ii]
                X = X.to(self.device)
                _, mu, _ = self.vae(X)
                mu = mu.cpu().numpy().tolist()

                for pth, emb in zip(paths, mu):
                    if not folder == os.path.join(*pth.split('/')[:-1]):
                        if dict_ is not None:
                            with open('/' + folder + '_vae.json', 'w+') as f:
                                json.dump(dict_, f, indent=1)
                        folder = os.path.join(*pth.split('/')[:-1])
                        dict_ = {}
                    dict_[Tools.fname(pth)] = emb

        with open('/' + folder + '_vae.json', 'w+') as f:
            json.dump(dict_, f, indent=1)
import cv2
import os
import shutil

from src.utils.tools import Tools
from src.segmenter_grasping import SegmenterGrasping

IN_ROOT = ''
OU_ROOT = ''
RGB = False  # whether videos are in RGB (true) format or BGR (false) format

# MANUALLY SEGMENT RECORDINGS
s = SegmenterGrasping()
for trial in Tools.tqdm_pbar(Tools.list_dirs(IN_ROOT),
                             description="SEGMENTING"):
    s.segment(trial, os.path.join(OU_ROOT, Tools.fname(trial)))

# COPY ACTUAL FRAMES TO OU_ROOT
for trial in Tools.tqdm_pbar(Tools.list_dirs(OU_ROOT),
                             description="MOVING FRAMES"):
    for pos in Tools.list_dirs(trial):
        with open(os.path.join(pos, 'frames.txt'), 'r') as f:
            for path in f:
                shutil.copy(
                    path.replace('\n', ''),
                    os.path.join(pos, Tools.fname(path.replace('\n', ''))))
        os.remove(os.path.join(pos, 'frames.txt'))

# BGR->RGB, RESIZE, AND CROP IMAGES
for trial in Tools.tqdm_pbar(Tools.list_dirs(IN_ROOT), description="CROPPING"):
    for pos in Tools.list_dirs(trial):
예제 #15
0
roots = []
roots.append('/media/roblaundry/folding_full')
roots.append('/media/roblaundry/folding_single')
roots.append('/media/roblaundry/grasping')
roots.append('/media/roblaundry/grasping_reverse')
roots.append('/media/roblaundry/pouring')

os.system('clear')
Tools.pyout("STARTING PIPELINE")

devices = (torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
           torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))

for root in roots:
    if Config.SERVER:
        save_root = Tools.fname(root)

    # train TCN on specified dataset
    if TRAIN_TCN:
        Tools.pyout("TRAIN_TCN")
        STATE_DICT_PATHS = (None, None)
        TRAIN_ROOT = os.path.join(root, 'train')
        VAL_ROOT = os.path.join(root, 'val')
        SAVE_LOC = os.path.join(save_root, 'tcn')
        EPOCHS = 100
        trainer = TCNTrainer(devices=devices,
                             state_dict_paths=STATE_DICT_PATHS,
                             train_root=TRAIN_ROOT,
                             val_root=VAL_ROOT,
                             save_loc=SAVE_LOC)
        trainer.train_loop(0, EPOCHS)
예제 #16
0
    def visualize(self, in_folder, ou_folder):
        Tools.makedirs(ou_folder)

        self.len = [
            len(Tools.list_files(os.path.join(in_folder, pos)))
            for pos in self.POS
        ]

        frames = [
            Tools.fname(f) for f in Tools.list_files(
                os.path.join(in_folder, min(self.POS, key=lambda x: len(x))))
        ]

        n_zfill = len(frames[0].split('.')[0])

        # define codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(
            os.path.join(ou_folder,
                         in_folder.split('/')[-1] + '.mp4'), fourcc, 16,
            (240 * len(self.POS), 480 if self.lbl_dict is None else 720))

        # init frame
        # if os.path.exists(in_folder.replace('_view', '') + '.json'):
        if self.lbl_dict is not None:
            main_frm = np.zeros((720, len(self.POS) * 240, 3), dtype=np.uint8)
        else:
            main_frm = np.zeros((480, len(self.POS) * 240, 3), dtype=np.uint8)

        for fii, frame in poem(enumerate(frames),
                               Tools.fname(in_folder),
                               total=len(frames)):
            try:
                vae_embds = self._imagine(in_folder, fii, n_zfill)
                for pii, pos in enumerate(self.POS):
                    orig_img = cv2.imread(os.path.join(in_folder, pos, frame))
                    orig_img = cv2.resize(orig_img, (240, 240))
                    main_frm[0:240, pii * 240:(pii + 1) * 240, :] = \
                        np.copy(orig_img[:, :, :3])

                    with torch.no_grad():
                        X = torch.FloatTensor(vae_embds[pos]).to(
                            self.device).unsqueeze(0)
                        y = self.VAE[pii].decode(X)
                        imag_img = y.cpu().numpy().squeeze()
                    imag_img = (imag_img + 1.) * 127.5
                    imag_img = imag_img.transpose(1, 2, 0)[:, :, ::-1].astype(
                        np.uint8)
                    imag_img = cv2.resize(imag_img, (240, 240))
                    main_frm[240:480, pii * 240:(pii + 1) * 240, :] = \
                        np.copy(imag_img)
                if self.lbl_dict is not None:
                    est_lbls = self._estimate_labels(in_folder, fii, n_zfill)
                    vals = tuple([(float(self.lbl_dict[in_folder][frame][lbl]),
                                   el)
                                  for lbl, el in zip(self.labels, est_lbls)])
                    main_frm[480:720, :, :] = self.cv_hist.plot(values=vals)
            except IndexError:
                pass
            writer.write(main_frm)
        writer.release()
    def segment_video(self, in_folder, ou_folder):
        """
            Extract trials from video

            Args:
                in_folder: string - path to intput video folder
                ou_fodler: string - path to output folder root
        """
        try:
            l_color = self._load_videos(in_folder)[0]

            img = np.copy(l_color[0])
            img = cv2.resize(img, (240, 160))
            Tools.render(img[:, :, ::-1])

        except Exception as e:
            print(in_folder)
            print(e)
            time.sleep(1000)
            raise e

        progression = [0]
        created_folders = []
        ii = 0
        key = ''
        try:
            with tqdm(total=len(l_color)) as pbar:
                while key != ord('+'):
                    feedback = ''
                    key = self.stdscr.getch()
                    self.stdscr.addch(20, 25, key)
                    self.stdscr.refresh()
                    if key == ord('d'):
                        # move forwards slow
                        ii = min(ii + 1, len(l_color) - 1)
                    elif key == ord('e'):
                        # move forwards medium
                        ii = min(ii + 5, len(l_color) - 1)
                    elif key == ord('3'):
                        # move forwards fast
                        ii = min(ii + 25, len(l_color) - 1)

                    elif key == ord('a'):
                        # move backwards slow
                        ii = max(progression[-1], ii - 1)
                    elif key == ord('q'):
                        # move backwards medium
                        ii = max(progression[-1], ii - 5)
                    elif key == ord('1'):
                        # move backwards fast
                        ii = max(progression[-1], ii - 25)

                    elif key == ord(']'):
                        # STORE SEGMENT
                        # make new folder
                        store_folder_number = len(
                            [f for f in created_folders if f is not None])
                        savepath = os.path.join(
                            ou_folder,
                            Tools.fname(in_folder) + '_' +
                            str(store_folder_number).zfill(3))
                        self._makedirs(savepath)

                        # store images
                        self._store_imgs(l_color, savepath + '/left',
                                         progression[-1], ii)

                        # update progression data
                        progression.append(ii)
                        created_folders.append(savepath)

                        feedback = 'store'

                    elif key == ord('u'):
                        # skip
                        progression.append(ii)
                        created_folders.append(None)

                        feedback = 'skip'
                    elif key == ord('.'):
                        # undo
                        if len(progression) > 1:
                            if created_folders[-1] is not None:
                                shutil.rmtree(created_folders[-1])

                            progression.pop(-1)
                            created_folders.pop(-1)
                            ii = progression[-1]

                        feedback = 'back'

                    pbar.update(ii - pbar.n)
                    img = np.copy(l_color[ii])
                    img = cv2.resize(img, (240, 160))

                    if feedback == 'store':
                        cv2.circle(img, (10, 10), 10, (0, 255, 0), -1)
                    if feedback == 'skip':
                        cv2.circle(img, (10, 10), 10, (255, 0, 0), -1)
                    if feedback == 'back':
                        cv2.circle(img, (10, 10), 10, (0, 0, 255), -1)

                    Tools.render(img[:, :, ::-1])
        except Exception as e:
            print(e)
            time.sleep(1000)
            raise e

        with open('./res/finished_files.txt', 'a+') as f:
            f.write(in_folder + '\n')