def preprocessing(self, imgs, msks, sz=(48, 48), augx=0): print len(imgs) if augx > 0: print "augmenting train data ..." # augx = 2xnSample+1 n_sample = np.int(augx / 2.0) - 1 imH, imW = imgs[0].shape[0:2] borderH = np.int(imH * 0.2) borderW = np.int(imW * 0.2) w = imW - borderW h = imH - borderH x1s = np.random.randint(0, borderW, n_sample) y1s = np.random.randint(0, borderH, n_sample) imgs_crop = imgs msks_crop = msks for img, msk in zip(imgs, msks): imgs_crop += [imcrop(img, [x1, y1, w, h]) for x1, y1 in zip(x1s, y1s)] msks_crop += [imcrop(msk, [x1, y1, w, h]) for x1, y1 in zip(x1s, y1s)] print len(imgs_crop) imgs_flip = [pl.fliplr(im) for im in imgs_crop] msks_flip = [pl.fliplr(im) for im in msks_crop] imgs = imgs_crop + imgs_flip msks = msks_crop + msks_flip print len(imgs) imgs_rs = [imresize(im, sz, interp="bicubic") for im in imgs] imgs_norm = [imnormalize(im) for im in imgs_rs] msks_norm = [imresize(im, sz, interp="bicubic") for im in msks] imgs_final, msks_final = self.convert_data(imgs_norm, msks_norm) print len(imgs_final) return imgs_final, msks_final
def _load_images_salmaps(self, datapath=None, imgext="bmp"): """ load preliminary data (images, segmentations, and salience maps) """ if datapath is None: if sys.platform == "darwin": homedir = "/Users/rzhao/" else: homedir = "/home/rzhao/" datapath = homedir + "Dropbox/ongoing/reid_jrnl/salgt/data_viper/" filepath = datapath + "query/" imgfiles = sorted(glob(filepath + "*." + imgext)) self.nPerson = len(imgfiles) imgs = [imread(im) for im in imgfiles] salfilepath = datapath + "labels.pkl" data = loadfile(salfilepath) segmsks, salmsks = data imgs = [imresize(im, size=(self.imH, self.imW)) for im in imgs] segmsks = [imresize(im, size=(self.imH, self.imW)) for im in segmsks] salmsks = [imresize(im, size=(self.imH, self.imW)) / 255.0 for im in salmsks] self.imgs = np.asarray(imgs) self.segmsks = np.asarray(segmsks) self.salmsks = np.asarray(salmsks) # load dense colorsift features labeled_imidx_path = "../data_viper/labeled_imidx.mat" tmp = loadfile(labeled_imidx_path) labeled_imidx = tmp["labeled_imidx"].flatten() feat_path = homedir + "Dropbox/ongoing/reid_jrnl/salgt/data_viper/features.mat" tmp = loadfile(feat_path) self.feats = tmp["features"].astype(np.float)[labeled_imidx]
def _load_images_salmaps(self, datapath=None, imgext="bmp"): """ load preliminary data (images, segmentations, and salience maps) """ if datapath is None: if sys.platform == "darwin": homedir = "/Users/rzhao/" else: homedir = "/home/rzhao/" datapath = homedir + "Dropbox/ongoing/reid_jrnl/salgt/data_viper/" filepath = datapath + "query/" imgfiles = sorted(glob(filepath + "*." + imgext)) imgs = [imread(im) for im in imgfiles] salfilepath = datapath + "labels.pkl" data = loadfile(salfilepath) segmsks, salmsks = data imgs = [imresize(im, size=(segmsks[0].shape), interp="bicubic") for im in imgs] # imgs_norm = [imnormalize(im) for im in imgs] # return imgs, segmsks, salmsks self.imgs = imgs self.segmsks = segmsks self.salmsks = salmsks
def main(): ''' pipeline for testing and evaluation ''' # preset parameters save_path = '../data_viper/model_feat/' # load data # imL = 48 bs = 100 datafile_viper = '../data_viper/viper.pkl' viper = loadfile(datafile_viper) # load model modelfile_viper = '../data_viper/model_feat/model.pkl' model = loadfile(modelfile_viper) # evaluation and testing # test_x = viper.test_x.get_value(borrow=True) test_x = np.asarray(viper.test_feat) test_y = viper.test_y n_test = test_x.shape[0] test_ypred = model.predict(viper.test_feat) test_ypred = np.asarray(test_ypred).flatten() # test_ims = test_x.reshape((n_test, imL, imL, -1)) # assign predicted scores to images h, w = viper.imgs[0].shape[:2] mh, mw = len(np.unique(viper.yy)), len(np.unique(viper.xx)) msk0 = np.zeros(mh*mw).astype(np.uint8) msks = [msk0.copy() for im in viper.imgs] showlist = [] for i in range(n_test): imgid = viper.test_imgids[i] patid = viper.test_ctrids[i] score = test_ypred[i] msks[imgid][patid] = score*255 # resize predicted salience map to match image size msks_rs = [imresize(msk.reshape((mw, mh)).T, size=(h, w))/255. for msk in msks] # save salience map for comparison test_imids = np.asarray(np.unique(viper.test_imgids)) salmap_gt = np.asarray(viper.salmsks) #np.asarray([viper.salmsks[imid] for imid in test_imids]) salmap_pred = np.asarray(msks_rs) #np.asarray([msks_rs[imid]/255. for imid in test_imids]) savefile(save_path+'salmaps_comparison.pkl', [test_imids, salmap_gt, salmap_pred]) # quantize to show different test patches # kmeans = KMeans(init='k-means++', n_clusters=10, n_init=10) # kmeans.fit(test_ypred.reshape(n_test, 1)) # # save to result folder # for i in range(10): # idx = kmeans.labels_== i # if any(idx): # im = immontage(list(test_ims[idx])) # imsave(save_path+'{}.jpg'.format(kmeans.cluster_centers_[i]), im) print 'testing finished'
def test_knn(): datafile_viper = '../data_viper/viper.pkl' viper = loadfile(datafile_viper) viper = downsample_data(viper) # from sklearn.neighbors import KNeighborsRegressor # model = KNeighborsRegressor(n_neighbors=5, weights='uniform', metric='euclidean') # model.fit(viper.train_feat, viper.train_y) from sklearn.neighbors import KDTree # divide into stripes nStripe = 10 y_max = viper.yy.max() y_min = viper.yy.min() y_len = np.int((y_max - y_min)/10.) y_centers = np.round(np.linspace(y_min+y_len, y_max-y_len, nStripe)) k = 5 y_ctr = y_centers[k] stripe_idx = np.where((viper.yy[viper.train_ctrids] >= y_ctr-y_len) & (viper.yy[viper.train_ctrids] < y_ctr+y_len))[0] model = KDTree(viper.train_feat[stripe_idx, :288], metric='euclidean') train_patset = viper.get_patchset('train') test_patset = viper.get_patchset('test') test_ids = np.where((viper.yy[viper.test_ctrids] >= y_ctr-y_len) & (viper.yy[viper.test_ctrids] < y_ctr+y_len))[0] np.random.shuffle(test_ids) for i in test_ids: get_testrect = lambda i: [viper.xx[viper.test_ctrids[i]] - viper.patL/2, viper.yy[viper.test_ctrids[i]] - viper.patL/2, viper.patL, viper.patL] get_trainrect = lambda i: [viper.xx[viper.train_ctrids[i]] - viper.patL/2, viper.yy[viper.train_ctrids[i]] - viper.patL/2, viper.patL, viper.patL] gray2color = lambda grayim: np.dstack((grayim, grayim, grayim)) imlist = [] patlist = [] maplist = [] patlist.append(imresize(test_patset[i], size=(100, 100))) imlist.append(drawrect(viper.imgs[viper.test_imgids[i]], get_testrect(i))) maplist.append(viper.salmsks[viper.test_imgids[i]]) dist, ind = model.query(viper.test_feat[i, :288], k=30, return_distance=True) print viper.test_y[i] hist = np.histogram(viper.train_y[stripe_idx[ind[0]]]) print hist[0] print hist[1] print dist for id in stripe_idx[ind[0]]: patlist.append(imresize(train_patset[id], size=(100, 100))) imlist.append(drawrect(viper.imgs[viper.train_imgids[id]], get_trainrect(id))) maplist.append(viper.salmsks[viper.train_imgids[id]]) pats = immontage(patlist) imgs = immontage(imlist) maps = immontage(maplist) imsave('tmp1.jpg', pats) imsave('tmp2.jpg', imgs) imsave('tmp3.jpg', maps) raw_input() os.system('xdg-open tmp1.jpg')
def print_labeling(data_path = None): # if data_path is None: # newDialog = QDialog() # fpath = QFileDialog.getExistingDirectory(newDialog, "Select data directory", '../') # if len(fpath) == 0: # QMessageBox.warning(None, 'Warning!', 'Nothing loaded.') # return # data_path = str(fpath) + '/' # loaded path src_file = data_path + 'parts.pkl' usr_file = sorted(glob(data_path + '#*.pkl')) src = DataMan(src_file) srcdata = src.load() usrhits = [] for f in usr_file: tmp = DataMan(f) tmpdata = tmp.load() usrhits.append(tmpdata['scores']) save_path = data_path + 'result/' qfiles = sorted(glob(data_path + 'query/*')) im = imread(qfiles[0]) imsz = im.shape[0:2] msk0 = np.zeros(srcdata['labels'][0].shape) segmsks = [] salmsks = [] for i in range(len(qfiles)): im = imread(qfiles[i]) seg = msk0.copy() sal = msk0.copy() for k in usrhits[0][i].keys(): idx = srcdata['labels'][i] == k nhits = np.asarray([nhit[i][k] for nhit in usrhits]) sal[idx] = hit2score(nhits) seg[idx] = k salmsks.append(sal) segmsks.append(seg) # normalize all msk # scaler = MinMaxScaler() # salscores = scaler.fit_transform(np.asarray(salmsks)) # save label and salience score map savefile([segmsks, salmsks], data_path + 'labels.pkl') for i in range(len(qfiles)): im = imread(qfiles[i]) msk = salmsks[i]*255. im_rs = imresize(im, msk0.shape, interp='bicubic') pl.figure(1) pl.clf() pl.subplot(1, 2, 1) pl.imshow(im_rs) pl.subplot(1, 2, 2) pl.imshow(color.rgb2grey(im_rs), cmap='gray', alpha=0.6) pl.imshow(msk, cmap='hot', vmin=0, vmax=255, alpha=0.6) pl.savefig(save_path+'{0:03d}.jpg'.format(i)) print save_path +'{0:03d}.jpg'.format(i) + ' saved!' visualize_imfolder(save_path)