def _store(self, in_root, path, frm, to): """ Store selected fragment. Creates file containing paths to frames. Args: in_root: string - path to input folder path: string - path to output folder frm: int - first frame of trial to: int - last frame of trial """ Tools.makedirs(path) for pos in ('left', 'middle', 'right'): Tools.makedirs(os.path.join(path, pos)) with open(os.path.join(path, pos, 'frames.txt'), 'w+') as f: for ii in range(frm, to + 1): f.write( os.path.join(in_root, pos, str(ii).zfill(self.zfill_n) + '.jpg') + '\n')
def visualize(self, in_folder, ou_folder): Tools.makedirs(ou_folder) data = self._load_embedding_dicts(in_folder) main_frm = np.zeros((480, len(data) * 240, 3), dtype=np.uint8) N = data[0][2].shape[0] # define codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'mp4v') writer = cv2.VideoWriter( os.path.join(ou_folder, in_folder.split('/')[-1] + '.mp4'), fourcc, 16, (240 * len(data), 480)) Tools.pyout(os.path.join(ou_folder, in_folder.split('/')[-1] + '.mp4')) for ii, pos_data in enumerate(data): data[ii] = self._cluster(*pos_data) for n in poem(range(N), Tools.fname(in_folder)): for ii, pos_data in enumerate(data): fldr, frms, embd, rdcd, lbls, plot = pos_data # load frame image img_pth = os.path.join(fldr, frms[n]) frame = cv2.imread(img_pth) frame = cv2.resize(frame, (240, 240)).astype(np.uint8) # draw cluster color in frame color = plot._prog2color(lbls[n]) cv2.rectangle(frame, (0, 0), (frame.shape[1], frame.shape[0]), color, 10) # draw embeddings plt_img = plot.plot(rdcd[n], color=color, scope=False) # add subfigures to frame main_frm[0:240, ii * 240:(ii + 1) * 240, :] = frame[:, :, :] main_frm[240:480, ii * 240:(ii + 1) * 240, :] = \ plt_img[:, :, :] # Tools.render(main_frm, waitkey=50, name="SEGMENTATION") writer.write(main_frm) writer.release()
def visualize(self, in_folder, ou_folder): Tools.makedirs(ou_folder) self.len = [ len(Tools.list_files(os.path.join(in_folder, pos))) for pos in self.POS ] frames = [ Tools.fname(f) for f in Tools.list_files( os.path.join(in_folder, min(self.POS, key=lambda x: len(x)))) ] n_zfill = len(frames[0].split('.')[0]) # define codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'mp4v') writer = cv2.VideoWriter( os.path.join(ou_folder, in_folder.split('/')[-1] + '.mp4'), fourcc, 16, (240 * len(self.POS), 480 if self.lbl_dict is None else 720)) # init frame # if os.path.exists(in_folder.replace('_view', '') + '.json'): if self.lbl_dict is not None: main_frm = np.zeros((720, len(self.POS) * 240, 3), dtype=np.uint8) else: main_frm = np.zeros((480, len(self.POS) * 240, 3), dtype=np.uint8) for fii, frame in poem(enumerate(frames), Tools.fname(in_folder), total=len(frames)): try: vae_embds = self._imagine(in_folder, fii, n_zfill) for pii, pos in enumerate(self.POS): orig_img = cv2.imread(os.path.join(in_folder, pos, frame)) orig_img = cv2.resize(orig_img, (240, 240)) main_frm[0:240, pii * 240:(pii + 1) * 240, :] = \ np.copy(orig_img[:, :, :3]) with torch.no_grad(): X = torch.FloatTensor(vae_embds[pos]).to( self.device).unsqueeze(0) y = self.VAE[pii].decode(X) imag_img = y.cpu().numpy().squeeze() imag_img = (imag_img + 1.) * 127.5 imag_img = imag_img.transpose(1, 2, 0)[:, :, ::-1].astype( np.uint8) imag_img = cv2.resize(imag_img, (240, 240)) main_frm[240:480, pii * 240:(pii + 1) * 240, :] = \ np.copy(imag_img) if self.lbl_dict is not None: est_lbls = self._estimate_labels(in_folder, fii, n_zfill) vals = tuple([(float(self.lbl_dict[in_folder][frame][lbl]), el) for lbl, el in zip(self.labels, est_lbls)]) main_frm[480:720, :, :] = self.cv_hist.plot(values=vals) except IndexError: pass writer.write(main_frm) writer.release()
def save_model(self, save_loc): Tools.makedirs(save_loc) dump(self.reduction_model, os.path.join(save_loc, 'reduction.joblib'))