def readFromCSV(self, fpath, scale=1.0): with open(fpath) as f: tablereader = csv.reader(filter(lambda row: row[0]!='#', f)) for row in tablereader: tr = TableRow() for elt in row: try: tr.addElement(Element(str(float(elt) * scale))) except: tr.addElement(Element(elt)) self.addRow(tr)
def readFromCSV(self, fpath, scale=1.0): with open(fpath) as f: tablereader = csv.reader(filter(lambda row: row[0] != '#', f)) for row in tablereader: tr = TableRow() for elt in row: try: tr.addElement(Element(str(float(elt) * scale))) except: tr.addElement(Element(elt)) self.addRow(tr)
import sys sys.path.append('/home/rohit/Software/vis/PyHTMLWriter/src'); from Element import Element from TableRow import TableRow from Table import Table from TableWriter import TableWriter import numpy as np t = Table() for r in range(100): if r == 0: r = TableRow(isHeader = True) else: r = TableRow() for e in range(10): e = Element() pose = np.array([[587, 569, 490, 535, 621, 630, 512, 490, 488, 483, 576, 479, 457, 522, 550, 571, 491],[561, 447, 411, 393, 430, 537, 402, 298, 275, 201, 394, 377, 305, 289, 348, 367, 376]]).transpose().tolist() e.addImg("http://sun.pc.cs.cmu.edu/~rohit/Public/Images/himym_chairs.jpg", poses=[pose], width=500, imsize=[1280,768]) r.addElement(e) t.addRow(r) tw = TableWriter(t, 'out') tw.write()
for i in xrange(1,501): filename = '%04d' %i + '_input_pred.gif' f.write(filename+'\n') # upload on a webpage t = Table() srcpath = './gifs/' image_set_file = 'path_gifs.txt' with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] trajnum = len(image_index) // 10 for r in range(trajnum): idx = r # if r == 0: # r = TableRow(isHeader = True) # else: r = TableRow() for e in range(10): idx2 = idx * 10 + e + 1 e = Element() tpath = srcpath + '%04d' % idx2 + '_input_pred.gif' print tpath e.addImg(tpath) r.addElement(e) t.addRow(r) tw = TableWriter(t, 'out') tw.write()
return([], []) f.close() matches = s.strip().split() imnames = [] scores = [] for m in matches[:50]: temp = m.strip().split(':') imname = imgslist[int(temp[0]) / 10000 - 1] score = float(temp[1]) imnames.append(imname) scores.append(score) return (imnames, scores) t = Table() r= TableRow(isHeader=True) r.addElement(Element('Sno')) r.addElement(Element('Method')) r.addElement(Element('Query/Top Match')) r.addElement(Element('Matches...')) for i in range(500): fpath = os.path.join(respath, str(i + 1) + '.txt'); fpath2 = os.path.join(respath_f, str(i + 2) + '.txt'); imnames, scores = readScores(fpath) imnames2, scores2 = readScores2(fpath2) if len(imnames) == 0 or len(imnames2) == 0: continue r = TableRow(rno=i) r.addElement(Element('Background')) for j in range(20): e = Element()
def export(args, model, dataloader, dataset): # remove head model.top_layer = None model.classifier = nn.Sequential(*list(model.classifier.children())[:-1]) # get the features for the whole dataset features, idxs, pos1 = dc_main.compute_features(dataloader, model, len(dataset), args) idxs = idxs[np.argsort(idxs)] features = features[np.argsort(idxs)] if args.group > 1: args.group = args.ep_length - args.traj_length + 1 # clustering algorithm to use deepcluster = clustering.__dict__[args.clustering](args.nmb_cluster, group=args.group) # cluster the features clustering_loss = deepcluster.cluster(features, verbose=args.verbose) centroids = deepcluster.clus.centroids # centroids = faiss.vector_float_to_array(deepcluster.clus.get_means_and_variances) # centroids = centroids.reshape(nmb_cluster, 256) # import pdb; pdb.set_trace() # self_index = faiss.IndexFlatL2(centroids.shape[1]) # build the index # self_index.add(centroids) # self_dists = self_index.search(centroids, centroids.shape[0]) _, (mean, std), _, _ = vis_utils.make_transform(args.data) model.features = model.features.module c_mean, c_cov, c_var = get_means_and_variances(deepcluster, features, args) resume = args.resume if len(args.resume) > 0 else args.exp out = { 'state_dict': model.state_dict(), 'centroids': centroids, 'pca_path': resume + '.pca', 'mean': mean, 'std': std, # 'cluster_mean': c_mean, 'cluster_cov': c_cov, 'clus': deepcluster.clus, } if args.export > 0: faiss.write_VectorTransform(deepcluster.mat, resume + '.pca') torch.save(out, resume + '.clus') out['pca'] = deepcluster.mat T = args.traj_length pos = pos1 if sum(sum(pos)) == 0: meta = torch.load('%s/meta.dict' % args.data) pos = np.array(meta['pos']) pos_idx = np.arange(pos.shape[0]*pos.shape[1]) pos_idx = pos_idx.reshape(pos.shape[0], pos.shape[1])[:, T-1:] pos_idx = pos_idx.reshape(pos_idx.shape[0] * pos_idx.shape[1]) pos = pos.reshape(pos.shape[0]*pos.shape[1], pos.shape[2]) else: meta = torch.load('/data3/ajabri/vizdoom/single_env_hard_fixed1/0/meta.dict') # import pdb; pdb.set_trace() sz = 30 from scipy.ndimage.filters import gaussian_filter def get_obj_masks(objs): out = np.zeros((3, sz, sz)) for o in objs[0]: # import pdb; pdb.set_trace() x, y = o x, y = int((x - x0)/x1 *sz), int((y-y0)/y1 * sz) out[:, x:x+1, y:y+1] = 1 return out def get_mask_from_coord(coord): import matplotlib.cm as cm x, y, a = coord x, y = int(x), int(y) out = np.zeros((3, sz, sz)) out[:, x, y] = cm.jet(a)[:3] return out # import pdb; pdb.set_trace() # sorted_self_dists = np.argsort(self_dists[0][:, 1])[::-1] # sorted_self_dists = np.argsort(self_dists[0].sum(axis=-1))[::-1] smoother1 = models.mini_models.GaussianSmoothing(3, 5, 5) smoother2 = models.mini_models.GaussianSmoothing(3, 7, 5) smoother3 = models.mini_models.GaussianSmoothing(3, 7, 7) smoother4 = models.mini_models.GaussianSmoothing(3, 9, 7) exp_name = args.resume.split('/')[-2] if args.resume != '' else args.exp.split('/')[-1] out_root = '%s/%s' % (args.export_path, exp_name) # import pdb; pdb.set_trace() if not os.path.exists(out_root): os.makedirs(out_root) table = Table() num_show = 8 sorted_variance = np.argsort(c_var)[::-1] sorted_sizes = np.argsort([len(ll) for ll in deepcluster.images_dists])[::-1] # import pdb; pdb.set_trace() for c, clus_idx in enumerate(sorted_sizes): # for c, clus_idx in enumerate(sorted_variance): # for c, clus_idx in enumerate(sorted_self_dists): l = deepcluster.images_dists[clus_idx] if len(l) == 0: continue ll = [ii[0] for ii in sorted(l, key=lambda x: x[1])[::-1]][:num_show//2] ll += [ii[0] for ii in random.sample(l, min(num_show//2, len(l)))] l = [ii[0] for ii in l] row = TableRow(rno=c) e = Element() e.addTxt('size: %s <br>variance: %s' % (len(deepcluster.images_dists[clus_idx]), c_var[clus_idx])) row.addElement(e) # import pdb; pdb.set_trace() ## MAP poo = [] for t in range(T): poo += [pos[np.array(l) - t]] posum = env.make_pose_map(np.concatenate(poo), meta['objs'][0], sz=sz) # posum *= 255.0 # vis.image((posum*255.).astype(np.uint8), opts=dict(width=300, height=300)) # vis.image(gaussian_filter((posum*255.), sigma=1).astype(np.uint8), opts=dict(width=300, height=300)) # gifname = '%s/%s_%s.png' % (exp_name, c, 'map') gifname = '%s_%s.png' % (c, 'map') gifpath = '%s/%s' % (out_root, gifname) imageio.imwrite(gifpath, cv2.resize((posum*255.).astype(np.uint8).transpose(1, 2, 0), (0,0), fx=5, fy=5, interpolation = cv2.INTER_AREA)) e = Element() e.addImg(gifname, width=180) row.addElement(e) ## EXEMPLARS for iii, i in enumerate(ll): # import pdb; pdb.set_trace() imgs = vis_utils.unnormalize_batch(dataset[i][0], mean, std) # vis.images(imgs, opts=dict(title=f"{c} of length {len(l)}")) # vis.images(smoother1(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # vis.images(smoother2(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # vis.images(smoother3(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # vis.images(smoother4(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # gifname = '%s/%s_%s.gif' % (exp_name, c, i) gifname = '%s_%s.gif' % (c, i) gifpath = '%s/%s' % (out_root, gifname) vis_utils.make_gif_from_tensor(imgs.astype(np.uint8), gifpath) e = Element() if iii < num_show // 2: e.addTxt('rank %i<br>' % iii) else: e.addTxt('random<br>') e.addImg(gifname, width=128) row.addElement(e) ## EXEMPLARS # import visdom # vis = visdom.Visdom(port=8095, env='main', use_incoming_socket=False) # vis.images(vis_utils.unnormalize_batch( # np.stack([dataset[iii][0][0] for iii in range(-100, -50)]), mean, std # )) # import pdb; pdb.set_trace() # gl = np.array(l).reshape(-1, args.group) # if args.group > 10: # exemplars = gl[random.sample(list(range(gl.shape[0])), 4)] # else: # exemplars = gl[random.sample(list(range(gl.shape[0])), 10)] # for iii, i in enumerate(exemplars): # # import pdb; pdb.set_trace() # # imgs = vis_utils.unnormalize_batch(dataset[i][0], mean, std) # imgs = np.stack([dataset[_idx][0][0] for _idx in i]) # imgs = vis_utils.unnormalize_batch(imgs, mean, std) # # import pdb; pdb.set_trace() # # imgs = vis_utils.unnormalize_batch(dataset[i][0], mean, std) # # vis.images(imgs, opts=dict(title=f"{c} of length {len(l)}")) # # vis.images(smoother1(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # # vis.images(smoother2(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # # vis.images(smoother3(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # # vis.images(smoother4(torch.Tensor(imgs)).numpy(), opts=dict(title=f"{c} of length {len(l)}")) # # gifname = '%s/%s_%s.gif' % (exp_name, c, i) # gifname = '%s_%s.gif' % (c, i[0]) # gifpath = '%s/%s' % (out_root, gifname) # vis_utils.make_gif_from_tensor(imgs.astype(np.uint8), gifpath) # e = Element() # if iii < num_show // 2: # e.addTxt('rank %i<br>' % iii) # else: # e.addTxt('random<br>') # e.addImg(gifname, width=128) # row.addElement(e) table.addRow(row) tw = TableWriter(table, '%s/%s' % (args.export_path, exp_name), rowsPerPage=min(args.nmb_cluster,100)) tw.write() # import pdb; pdb.set_trace() return out
def writeHTML(video_ls, output_folder_name, genre): length = len(video_ls) t = Table() print('length : ', length) for r in range(length): count = r if r == 0: r = TableRow(isHeader=True) a1 = Element() a1.addTxt("No.") r.addElement(a1) a2 = Element() a2.addTxt("Viedo_ID") r.addElement(a2) a3 = Element() a3.addTxt("Genre") r.addElement(a3) a4 = Element() a4.addTxt("Handscore") r.addElement(a4) a4 = Element() a4.addTxt("Is_beauty") r.addElement(a4) a4 = Element() a4.addTxt("Is_cartoon") r.addElement(a4) a5 = Element() a5.addTxt("Thumbnail_1") r.addElement(a5) a6 = Element() a6.addTxt("Thumbnail_2") r.addElement(a6) a7 = Element() a7.addTxt("Thumbnail_3") r.addElement(a7) a8 = Element() a8.addTxt("Thumbnail_4") r.addElement(a8) a9 = Element() a9.addTxt("Link") r.addElement(a9) t.addRow(r) continue else: r = TableRow() # print(count) # print(v_id[count-1]) # print(label_json[v_id[count-1]]) # print(genre[str(label_json[v_id[count-1]]) ]) a = Element() # no. a.addTxt("No." + str(count)) r.addElement(a) b = Element() # video id b.addTxt(video_ls[count - 1][0]) r.addElement(b) c = Element() # genre c.addTxt(genre) r.addElement(c) c = Element() # score c.addTxt(video_ls[count - 1][1]) r.addElement(c) c = Element() # score c.addTxt(video_ls[count - 1][2]) r.addElement(c) c = Element() # score c.addTxt(video_ls[count - 1][0]) r.addElement(c) for i in range(4): # 4 thumbnails f = Element() f.addImg("https://img.youtube.com/vi/" + video_ls[count - 1][0] + "/" + str(i) + ".jpg") r.addElement(f) g = Element("click me") # link g.addLink("https://www.youtube.com/watch?v=" + video_ls[count - 1][0]) r.addElement(g) t.addRow(r) tw = TableWriter(t, output_folder_name) tw.write()
import sys sys.path.append('/home/rohit/Software/vis/PyHTMLWriter/src') from Element import Element from TableRow import TableRow from Table import Table from TableWriter import TableWriter import numpy as np t = Table() for r in range(100): if r == 0: r = TableRow(isHeader=True) else: r = TableRow() for e in range(10): e = Element() pose = np.array([[ 587, 569, 490, 535, 621, 630, 512, 490, 488, 483, 576, 479, 457, 522, 550, 571, 491 ], [ 561, 447, 411, 393, 430, 537, 402, 298, 275, 201, 394, 377, 305, 289, 348, 367, 376 ]]).transpose().tolist() e.addImg( "http://sun.pc.cs.cmu.edu/~rohit/Public/Images/himym_chairs.jpg", poses=[pose], width=500, imsize=[1280, 768]) r.addElement(e)
import sys sys.path.append('/home/dragon123/Affordances/PyHTMLWriter/src'); from Element import Element from TableRow import TableRow from Table import Table from TableWriter import TableWriter t = Table() for i in range(1): if i == 0: r = TableRow(isHeader = True) else: r = TableRow(rno = i) for e in range(10): e = Element() e.setDrawCheck() e.addImg('eiffeltower.jpg', bboxes=[[900,500,50,34],[100,100,100,100]]) r.addElement(e) t.addRow(r) r = TableRow(100) for e in range(10): e = Element() e.setDrawCheck() e.addImg('eiffeltower.jpg', overlay_path='../taj.jpg') r.addElement(e) t.addRow(r) tw = TableWriter(t, 'out') tw.write()
return ([], []) f.close() matches = s.strip().split() imnames = [] scores = [] for m in matches[:50]: temp = m.strip().split(':') imname = imgslist[int(temp[0]) / 10000 - 1] score = float(temp[1]) imnames.append(imname) scores.append(score) return (imnames, scores) t = Table() r = TableRow(isHeader=True) r.addElement(Element('Sno')) r.addElement(Element('Method')) r.addElement(Element('Query/Top Match')) r.addElement(Element('Matches...')) for i in range(500): fpath = os.path.join(respath, str(i + 1) + '.txt') fpath2 = os.path.join(respath_f, str(i + 2) + '.txt') imnames, scores = readScores(fpath) imnames2, scores2 = readScores2(fpath2) if len(imnames) == 0 or len(imnames2) == 0: continue r = TableRow(rno=i) r.addElement(Element('Background'))