def main(args): vis = args.vis debug = args.debug save = args.save nparticles = args.particles root_path = '/home/ccjiang/Documents/py-faster-rcnn/caffe-fast-rcnn/examples/tracker/' dataset_path = args.dataset # "/data/OTB100" dataset100_seq = ['Bird2', 'BlurCar1', 'BlurCar3', 'BlurCar4', 'Board', 'Bolt2', 'Boy', 'Car2', 'Car24', 'Coke', 'Coupon', 'Crossing', 'Dancer', 'Dancer2', 'David2', 'David3', 'Dog', 'Dog1', 'Doll', 'FaceOcc1', 'FaceOcc2', 'Fish', 'FleetFace', 'Football1', 'Freeman1', 'Freeman3', 'Girl2', 'Gym', 'Human2', 'Human5', 'Human7', 'Human8', 'Jogging', 'KiteSurf', 'Lemming', 'Man', 'Mhyang', 'MountainBike', 'Rubik', 'Singer1', 'Skater', 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans', 'Twinnings', 'Vase'] dataset50_seq = ['Basketball', 'Bird1', 'BlurBody', 'BlurCar2', 'BlurFace', 'BlurOwl', 'Bolt', 'Box', 'Car1', 'Car4', 'CarDark', 'CarScale', 'ClifBar', 'Couple', 'Crowds','David', 'Deer', 'Diving', 'DragonBaby', 'Dudek', 'Football', 'Freeman4', 'Girl', 'Human3', 'Human4', 'Human6', 'Human9', 'Ironman', 'Jump', 'Jumping', 'Liquor', 'Matrix', 'MotorRolling', 'Panda', 'RedTeam', 'Shaking', 'Singer2', 'Skating1', 'Skating2', 'Skiing', 'Soccer', 'Surfer', 'Sylvester', 'Tiger2', 'Trellis', 'Walking', 'Walking2', 'Woman'] datafull_seq = dataset100_seq + dataset50_seq if "OTB50" in dataset_path: data_seq = dataset50_seq else: data_seq = dataset100_seq log_name = 'log_1119.txt' log_file = open(log_name, 'w') records_success = [] # defaultdict(list) records_precision = [] # defaultdict(list) records_reinit = defaultdict(list) model_def = os.path.join(root_path, args.prototxt) model_weight = os.path.join(root_path, args.caffemodel) vggnet = VGGnet.VGGnet(model_def, model_weight) thre_max_neg = 0.3 # 0.5 test_times = 1 # 0 for t in range(test_times): print 'Test round: %d' % t log_file.write('Test round: %d\n' % t) # sequences = ['Fish'] for sequence in datafull_seq: # datafull_seq if sequence in dataset50_seq: dataset_path = "/data/OTB50" else: dataset_path = "/data/OTB100" for t in os.walk(os.path.join(dataset_path, sequence, sequence, "img")): if t[0] == os.path.join(dataset_path, sequence, sequence, "img"): nFrame = len(t[2]) print 'Processing: %s' % sequence log_file.write('Processing: %s\n' % sequence) print "Total frames are: ", nFrame log_file.write('Total frames are: %d\n' % nFrame) gt_path = os.path.join(dataset_path, sequence, sequence, "groundtruth_rect.txt") gt_boxes = utils.get_boxes_all(gt_path) conf_hist = [] iou_hist = [] area_hist = [] pred_hist = [] # (x1,y1,x2,y2) eig_hist = [] reinit = 0 nFrame = np.minimum(nFrame, gt_boxes.shape[0]) id_shift = 0 init_id = False update_recent = False for id in np.arange(0, nFrame): frame_name = "img/%04d.jpg" % (id + 1) # print "Start processing: %s" % frame_name frame_path = os.path.join(dataset_path, sequence, sequence, frame_name) if os.path.exists(frame_path) == False: id_shift = id_shift + 1 continue id = id - id_shift frame_data = caffe.io.load_image(frame_path) # (432,576,3), in [0,1] gt_box = gt_boxes[id] if init_id == False: h, w, c = frame_data.shape frame_shape = [c, w, h] fps = 20 fourcc = cv2.VideoWriter_fourcc(*'MJPG') video_writer = cv2.VideoWriter("res_%s.avi"%sequence,fourcc,fps,(w,h)) fail_times = 0 box_w = gt_box[2] - gt_box[0] box_h = gt_box[3] - gt_box[1] area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]) # ratio=w/h # set up net.blobs['im_info'] print "Image Size: ", w, h log_file.write('Image Size: %d %d\n' % (w, h)) b = gt_box[np.newaxis, :] vggnet.reshape(w=w, h=h, nbox=b.shape[0]) features0 = vggnet.get_features("conv3_3", frame_data, boxes_raw=b) # shape:(256,hs,ws),conv3_3,res3b3 features0 = np.squeeze(features0) pca_f, scaler_f = featmap_pca2(features0,ncompnents=128)#128 box_w = gt_box[2] - gt_box[0] box_h = gt_box[3] - gt_box[1] vggnet.reshape(w=w, h=h, nbox=nparticles) pfilter = PFfilter.PFfilter(utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) pfilter.create_particles() pfilter.restrict_particles(w, h) area_hist.append(pfilter.cur_a) pred_hist.append(np.array(gt_box).reshape(1, -1)) # pca # test sample_iou num_true = 500 num_false = 1000 # 1000 #data augument gt_box_otb = gt_box.copy() gt_box_otb[2] -= gt_box_otb[0] gt_box_otb[3] -= gt_box_otb[1] boxes_train = [] ids = np.zeros(num_false + num_true) imgs = [] for i in np.arange(4): if i == 0: img1,gt1,img2,gt2,img3,gt3 = DataAugment(frame_data,gt_box_otb,True) gt1[2] += gt1[0] gt1[3] += gt1[1] gt2[2] += gt2[0] gt2[3] += gt2[1] gt3[2] += gt3[0] gt3[3] += gt3[1] box_true1, iou_true = pfilter.sample_iou_pred_box(gt1, 0.05, 0.01, 0.01, 20, 0.8, 1.0) box_true2, iou_true = pfilter.sample_iou_pred_box(gt2, 0.05, 0.01, 0.01, 40, 0.8, 1.0) box_true3, iou_true = pfilter.sample_iou_pred_box(gt3, 0.05, 0.01, 0.01, 20, 0.8, 1.0) box_true1[0, ...] = gt1 box_true2[0, ...] = gt2 box_true3[0, ...] = gt3 boxes_train.append(box_true1) boxes_train.append(box_true2) boxes_train.append(box_true3) imgs.append(img1) imgs.append(img2) imgs.append(img3) ids[20:60] = 1 ids[60:80] = 2 else: img1, gt1, img2, gt2 = DataAugment(frame_data, gt_box_otb,False) gt1[2] += gt1[0] gt1[3] += gt1[1] gt2[2] += gt2[0] gt2[3] += gt2[1] box_true1, iou_true = pfilter.sample_iou_pred_box(gt1, 0.05, 0.01, 0.01, 20, 0.8, 1.0) box_true2, iou_true = pfilter.sample_iou_pred_box(gt2, 0.05, 0.01, 0.01, 20, 0.8, 1.0) box_true1[0, ...] = gt1 box_true2[0, ...] = gt2 boxes_train.append(box_true1) boxes_train.append(box_true2) imgs.append(img1) imgs.append(img2) cur_i = 80+(i-1)*40 ids[cur_i:(cur_i+20)] = 3+(i-1)*2 ids[(cur_i+20):(cur_i+40)] = 3+(i-1)*2+1 # boxes_train_neg=[] try: # Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 # box_w,box_h,0.05 sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(gt_box, Q, 0.01, 0.01, num_true-200, 0.8, 1.0)#0.8 except OverflowError as e: print "too many loops in sample in Initialize--TRUE." boxes_train.append(sample_box_true) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) # 0.2,0.01 except OverflowError as e: print "too many loops in sample in Initialize--FALSE." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) # 0.01,0.2 except OverflowError as e: print "too many loops in sample in Initialize--FALSE." boxes_train.append(sample_box_false) boxes_train = np.vstack(boxes_train) imgs.append(frame_data) imgs = np.stack(imgs,axis=0)#(10,h,w,c) ids[200:] = 9 y_train_true = np.ones((num_true,)) y_train_false = np.zeros((num_false,)) ids_save = np.ones((num_true+num_false)) ids_save[num_true:] = 0 ids_save[20:60] = 2 y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] ids_save = ids_save[ind_perm] y_train = y_train[ind_perm] ids = ids[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0],batch_size=10) #features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id) features = vggnet.get_features_first_id(imgs,boxes_raw=boxes_train,id=ids) #features = vggnet.get_features_first_sel(frame_data, boxes_raw=boxes_train, id=id, sel=f_inds) for k, v in features.iteritems(): # print k,v.shape if k == 'f3': #pca3, scaler1, nPCA = utils.skl_pca2(v) v = feat_transformpca(pca_f, scaler_f, v) # (N,128,7,7) #pca3,scaler,nPCA=utils.skl_pca2(v) #v_pca3 = pca3.transform(scaler.transform(v)) #np.save("pca_results/testpca_%s.npy"%sequence,v_pca3) #np.save('labelpca.npy',y_train) #np.save("pca_results/label_pca_%s"%sequence,ids_save) pca3_pos = np.zeros((num_true, pca_f.n_components_*49), dtype=np.float32) pca3_neg = np.zeros((num_false, pca_f.n_components_*49), dtype=np.float32) pca3_pos[...] = v[ind_pos, :] pca3_neg[...] = v[ind_neg, :] # utils.vis_as_image(v_pca3) # plt.imshow(v_pca3) # plt.title("PCA features") # plt.show() # plt.close() # logistic regression y_weight = sklearn.utils.class_weight.compute_class_weight(class_weight='balanced', classes=np.array([0, 1]), y=y_train) # print y_weight class_weight = {0: y_weight[0], 1: y_weight[1]} clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear') clf3.fit(v, y_train) vis_feature = False if vis_feature: utils.vis_features(features, id) start_time = time.time() else: if fail_times >= 5: # reinitialize update_recent = False reinit += 1 area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]) pfilter = PFfilter.PFfilter(utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) # filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio) pfilter.create_particles() pfilter.restrict_particles(w, h) area_hist.append(pfilter.cur_a) pred_box = gt_box pred_hist.append(np.array(pred_box).reshape(1, -1)) conf_hist.append(-0.1) boxes_train = [] # boxes_train_neg=[] iou_train = [] try: # Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 # box_w,box_h,0.05 sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0) except OverflowError as e: print "too many loops in sample in Reinitialize--TRUE." boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample in Reinitialize--FALSE." boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample in Reinitialize--FALSE." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((num_true,)) y_train_false = np.zeros((num_false,)) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] iou_train = iou_train[ind_perm] y_train = y_train[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id) #features=feat_transformpca(pca_f,scaler_f,features) #features = vggnet.get_features_first_sel(frame_data, boxes_raw=boxes_train, id=id, sel=f_inds) for k, v in features.iteritems(): # print k, v.shape if k == 'f3': v = feat_transformpca(pca_f, scaler_f, v) # (N,128,7,7) #v_pca3 = pca3.transform(scaler.transform(v)) pca3_pos[...] = v[ind_pos, :] pca3_neg[...] = v[ind_neg, :] clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear') clf3.fit(v, y_train) # score3 = clf3.score(v_pca3, y_train) # print 'score3: ', score3 # prob=clf3.predict_proba(v_pca3) # print clf3.classes_ fail_times = 0 continue pfilter.predict_particles(Q=0.2, cr=0.01, ca=0.01) # 0.2,0.01 pfilter.restrict_particles(w, h) area_hist.append(pfilter.cur_a) # compute conf # conf = np.zeros(pfilter.weights.shape) # np.save('particles.npy',filter.particles) pred_boxes = utils.state_to_bbox(pfilter.particles, area, ratio) #add Gaussian regularization if id>1: gauss_sig = 0.5 gauss_w = np.exp(-np.square((pfilter.particles[:,0]-pred_state[0])/(gauss_sig*box_w)/2.0)-np.square((pfilter.particles[:,1]-pred_state[1])/(gauss_sig*box_h)/2.0)) pfilter.update_particles(gauss_w) print gauss_w vggnet.reshape(w, h, pfilter.num_particles) features = vggnet.get_features_first_raw(frame_data, boxes_raw=pred_boxes, id=id) #features=feat_transformpca(pca_f,scaler_f,features) #features = vggnet.get_features_first_sel(frame_data, boxes_raw=pred_boxes, id=id, sel=f_inds) for k, v in features.iteritems(): # print k,v.shape if k == 'f3': v = feat_transformpca(pca_f, scaler_f, v) # (N,128,7,7) vf = v #v_pca3 = pca3.transform(scaler.transform(v)) conf = clf3.predict_proba(v)[:, 1] # process preds to find out pred_box in terms of confm conf = np.array(conf) conf_max = np.max(conf) conf_min = np.min(conf) pfilter.update_particles(conf) # do resample first or estimate first? # filter.resample() # always resample pred_state, s_particles, r_particles = pfilter.estimate(k=10) pfilter.resample() pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area, ratio) hard,hard_negv = nms_pred(pred_box,pred_boxes,vf,conf) if hard: hard_negvN = hard_negv.shape[0] #print hard_negv.shape else: hard_negvN = 0 avg_pos = np.mean(pfilter.particles[:, :2], axis=0, keepdims=True) # avg_pos[:,0]/=w # avg_pos[:,1]/=h ptls_avg = (pfilter.particles[:, :2] - avg_pos) / np.array([[box_w, box_h]]) cov_particles = np.dot(ptls_avg.T, ptls_avg) / pfilter.particles.shape[ 0] eigval, eigvec = np.linalg.eig(cov_particles) max_val = eigval[0] eig_hist.append(max_val) print 'Max eigvalue: %f' % max_val # print 'conf is: ',conf if conf_max > 0.5: # 0.8 fail_times = 0 update_recent = False else: fail_times += 1 show_sr = False if show_sr: count, xedge, yedge, tmp_im = plt.hist2d(s_particles, r_particles, bins=10, weights=pfilter.weights.squeeze(), cmap=plt.cm.gray) top3 = np.argsort(-count, axis=None)[:3] row_ind = top3[:] / count.shape[1] col_ind = top3[:] % count.shape[0] plt.show() print pred_box iou = utils.calc_iou(gt_box, pred_box) # print 'iou is: ', iou pred_hist.append(pred_box) conf_hist.append(conf_max) iou_hist.append(iou) if conf_max >= 0.7: # 0.5 # update pca3_pos and pca3_neg new_true = 100 # 100 new_false = 400 # 200 boxes_train = [] iou_train = [] Q = 0.05 # 0.02 try: sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(pred_box, Q, 0.01, 0.01, new_true, 0.85, 1.0) except OverflowError as e: print "too many loops in sample in Update--TRUE." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] # print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) # part_iou=utils.calc_iou(pred_box,pred_boxes) # ind_iou=np.where(part_iou<0.3)[0] # ind_n=np.minimum(new_false/2,ind_iou.shape[0]) # boxes_train.append(pred_boxes[ind_iou[:ind_n],:]) # iou_train.append(part_iou[ind_iou]) new_false_left = new_false - hard_negvN # -ind_n try: Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou_pred_box(pred_box, Q, 0.2, 0.01, (new_false_left + 1) / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample in Update--FALSE." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: Q = 0.2 # 0.2 sample_box_false, sample_iou_false = pfilter.sample_iou_pred_box(pred_box, Q, 0.01, 0.2, new_false_left / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample in Update--FALSE." boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) # iou_train = np.vstack(iou_train) vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_second_raw(boxes_raw=boxes_train, id=id) #features = feat_transformpca(pca_f,scaler_f,features) #features = vggnet.get_features_second_sel(boxes_raw=boxes_train, id=id, sel=f_inds) for k, v in features.iteritems(): # print k, v.shape if k == 'f3': v = feat_transformpca(pca_f, scaler_f, v) # (N,128,7,7) #v_pca3 = pca3.transform(scaler.transform(v)) if hard: print v.shape print hard_negv.shape v = np.vstack([v,hard_negv]) y_train_true = np.ones((new_true,)) y_train_false = np.zeros((new_false,)) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(new_false + new_true)) #boxes_train = boxes_train[ind_perm, :] v = v[ind_perm,:] y_train = y_train[ind_perm] new_y = np.zeros(y_train.shape) new_y[...] = y_train ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] # random substitude pca3_cur_pos = v[ind_pos, :] pca3_cur_neg = v[ind_neg, :] to_subst = random.sample(range(num_true), new_true) pca3_pos[to_subst, :] = pca3_cur_pos to_subst = random.sample(range(num_false), new_false) pca3_neg[to_subst, :] = pca3_cur_neg if conf_max < 1 and fail_times >= 2 and update_recent==False: # if id%10==0: update_recent = True pca3_train = np.vstack([pca3_pos, pca3_neg]) y_train_true = np.ones((num_true,)) y_train_false = np.zeros((num_false,)) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(num_false + num_true)) pca3_train = pca3_train[ind_perm, :] y_train = y_train[ind_perm] # logistic regression clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear') clf3.fit(pca3_train, y_train) # print 'score is: ',clf3.score(pca3_train,y_train) # (B,G,R) frame_data_cv = frame_data * 255 # [0,1]-->[0,255] frame_data_cv = frame_data_cv[:, :, ::-1] # RGB->BGR frame_data_cv = frame_data_cv.astype('uint8') cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])), (255, 0, 0), 2, 1) if id > 0 and init_id == True: cv2.rectangle(frame_data_cv, (int(pred_box[0, 0]), int(pred_box[0, 1])), (int(pred_box[0, 2]), int(pred_box[0, 3])), (0, 255, 0), 2, 1) if init_id == False: init_id = True show_particles = False if show_particles: for i in range(filter.num_particles): cx = pfilter.particles[i, 0] cy = pfilter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 0, 255), thickness=1) show_box = False if show_box: n = 0 for i in ind_pos: if n % 5 == 0: cv2.rectangle(frame_data_cv, (int(boxes_train[i, 0]), int(boxes_train[i, 1])), (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 0, 255), 2, 1) n += 1 n = 0 show_particles_init = False if show_particles_init: for i in range(filter.num_particles): cx = pfilter.particles[i, 0] cy = pfilter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 255, 0), thickness=1) show_frame = False cv2.circle(frame_data_cv, (int(pfilter.cur_c[0]), int(pfilter.cur_c[1])), 2, (0, 0, 255), thickness=1) if show_frame: cv2.imshow(sequence, frame_data_cv) c = cv2.waitKey(1) if c != -1: cv2.destroyWindow(sequence) break else: video_writer.write(frame_data_cv) end_time = time.time() video_writer.release() print "Average FPS: %f" % (nFrame / (end_time - start_time)) log_file.write("Average FPS: %f\n" % (nFrame / (end_time - start_time))) conf_hist = np.array(conf_hist) iou_hist = np.array(iou_hist) area_hist = np.array(area_hist) pred_hist = np.vstack(pred_hist) precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist) suc, auc_iou = utils.calc_success(iou_hist) records_precision.append(precisions * nFrame) records_success.append(suc * nFrame) print 'Precision @20 is: %f' % precisions[19] print 'Auc of Precision is: %f' % auc_pre print 'Auc of Success is: %f' % auc_iou print 'Reinit times: %d' % reinit log_file.write("Precision @20 is: %f\n" % precisions[19]) log_file.write('Auc of Precision is: %f\n' % auc_pre) log_file.write('Auc of Success is: %f\n' % auc_iou) log_file.write('Reinit times: %d\n' % reinit) #log_file.write('Selected feature maps: %d\n' % f_inds.shape[0]) #log_file.write('PCA components: %d\n' % nPCA) #res_f = open('results11/%s.txt'%sequence,'w') #pred_hist[:,2:] = pred_hist[:,2:] - pred_hist[:,:2] #res_f = write_res(pred_hist,res_f) #res_f.close() log_file.close() pkl = open('results_1031.pkl', 'w') pickle.dump([records_precision, records_success], pkl) pkl.close()
def main(args): vis = args.vis debug = args.debug save = args.save nparticles = args.particles root_path = '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/' dataset_path = "/data/OTB100" sequence = args.sequence model_def = os.path.join(root_path, args.prototxt) model_weight = os.path.join(root_path, args.caffemodel) for t in os.walk(os.path.join(dataset_path, sequence, sequence, "img")): if t[0] == os.path.join(dataset_path, sequence, sequence, "img"): nFrame = len(t[2]) print "Total frames are: ", nFrame gt_path = os.path.join(dataset_path, sequence, sequence, "groundtruth_rect.txt") gt_boxes = utils.get_boxes_all(gt_path) vggnet = VGGnet.VGGnet(model_def, model_weight) thre_min_neg = 0.0 thre_max_neg = 0.4 #0.5 thre_min_pos = 0.8 thre_max_pos = 1.0 conf_hist = [] iou_hist = [] area_hist = [] eig_hist = [] pred_hist = [] #(x1,y1,x2,y2) reinit = 0 nFrame = np.minimum(nFrame, gt_boxes.shape[0]) for id in np.arange(0, nFrame): #nFrame frame_name = "img/%04d.jpg" % (id + 1) print "Start processing: %s" % frame_name frame_path = os.path.join(dataset_path, sequence, sequence, frame_name) frame_data = caffe.io.load_image(frame_path) # (432,576,3), in [0,1] gt_box = gt_boxes[id] if id == 0: h, w, c = frame_data.shape frame_shape = [c, w, h] fps = 20 fourcc = cv2.VideoWriter_fourcc(*'MJPG') video_writer = cv2.VideoWriter("res_%s.avi" % sequence, fourcc, fps, (w, h)) fail_times = 0 area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1] ) #ratio=w/h # set up net.blobs['im_info'] print "Image Size: ", w, h vggnet.reshape(w=w, h=h, nbox=nparticles) filter = PFfilter.PFfilter( utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) filter.create_particles() filter.restrict_particles(w, h) area_hist.append(filter.cur_a) pred_hist.append(np.array(gt_box).reshape(1, -1)) #pca # test sample_iou num_true = 500 num_false = 1000 boxes_train = [] #boxes_train_neg=[] iou_train = [] try: #Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 #box_w,box_h sample_box_true, sample_iou_true = filter.sample_iou( gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0) except OverflowError as e: print "too many loops in sample." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: #Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 #0.15 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: #Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) #permutation ind_perm = np.random.permutation(range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] iou_train = iou_train[ind_perm] y_train = y_train[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): print k, v.shape if k == 'f3': pca3 = utils.skl_pca(v) v_pca3 = pca3.transform(v) pca3_pos = np.zeros((num_true, pca3.n_components_), dtype=np.float32) pca3_neg = np.zeros((num_false, pca3.n_components_), dtype=np.float32) pca3_pos[...] = v_pca3[ind_pos, :] pca3_neg[...] = v_pca3[ind_neg, :] #utils.vis_as_image(v_pca3) #plt.imshow(v_pca3) #plt.title("PCA features") #plt.show() #plt.close() #logistic regression y_weight = sklearn.utils.class_weight.compute_class_weight( class_weight='balanced', classes=np.array([0, 1]), y=y_train) #print y_weight class_weight = {0: y_weight[0], 1: y_weight[1]} clf3 = SVC(kernel="linear") #clf3=linear_model.LogisticRegression(fit_intercept=True,solver='liblinear') clf3.fit(v_pca3, y_train) score3 = clf3.score(v_pca3, y_train) print 'score3: ', score3 #prob=clf3.predict_proba(v_pca3) print clf3.classes_ #print prob vis_feature = False if vis_feature: utils.vis_features(features, id) start_time = time.time() else: if fail_times >= 5: #reinitialize reinit += 1 area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]) filter = PFfilter.PFfilter( utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) #filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio) filter.create_particles() filter.restrict_particles(w, h) area_hist.append(filter.cur_a) pred_box = gt_box boxes_train = [] pred_hist.append(np.array(gt_box).reshape(1, -1)) #pred_hist.append(pred_box) conf_hist.append(-0.1) # boxes_train_neg=[] iou_train = [] try: # Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 # box_w,box_h sample_box_true, sample_iou_true = filter.sample_iou( gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0) except OverflowError as e: print "too many loops in sample." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 #0.15 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] iou_train = iou_train[ind_perm] y_train = y_train[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): print k, v.shape if k == 'f3': v_pca3 = pca3.transform(v) pca3_pos[...] = v_pca3[ind_pos, :] pca3_neg[...] = v_pca3[ind_neg, :] clf3.fit(v_pca3, y_train) score3 = clf3.score(v_pca3, y_train) print 'score3: ', score3 # prob=clf3.predict_proba(v_pca3) print clf3.classes_ fail_times = 0 continue filter.predict_particles(Q=0.02, cr=0.05, ca=0.05) #0.02,0.0005,0.005 filter.restrict_particles(w, h) area_hist.append(filter.cur_a) #compute conf conf = np.zeros(filter.weights.shape) #np.save('particles.npy',filter.particles) pred_boxes = utils.state_to_bbox(filter.particles, area, ratio) vggnet.reshape(w, h, filter.num_particles) features = vggnet.get_features_first_raw(frame_data, boxes_raw=pred_boxes, id=id) for k, v in features.iteritems(): print k, v.shape if k == 'f3': v_pca3 = pca3.transform(v) #utils.vis_as_image(v_pca3) #plt.imshow(v_pca3) #plt.title("PCA features") #plt.show() #plt.close() #logistic regression #conf=clf3.predict_proba(v_pca3)[:,1] conf = -clf3.decision_function(v_pca3) conf_max = np.max(conf) conf_min = np.min(conf) print 'conf_max: ', conf_max print 'conf_min: ', conf_min filter.update_particles(conf) # pred_state = filter.estimate() print filter.weights filter.resample() # always resample pred_state, s_particles, r_particles = filter.estimate(k=10) cov_particles = np.dot( filter.particles[:, :4].T, filter.particles[:, :4]) / filter.particles.shape[0] eigval, eigvec = np.linalg.eig(cov_particles) max_val = eigval[0] eig_hist.append(max_val) print 'Max eigvalue: %f' % max_val #print 'conf is: ',conf if conf_max > 0 and max_val < 200000: fail_times = 0 else: fail_times += 1 #filter.update_particles(conf) #pred_state=filter.estimate() #filter.resample() #pred_state, s_particles, r_particles = filter.estimate(k=10) print "conf_max too low, not update particles " pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area, ratio) print 'ground truth bbox is: ', gt_box print "pred_box is: ", pred_box show_sr = False if show_sr: plt.hist2d(s_particles, r_particles, bins=50, weights=filter.weights.squeeze()) ''' plt.scatter(s_particles,r_particles,c='r',marker='.',linewidths=1) plt.xlabel('Area') plt.ylabel('Aspect ratio') plt.title('Area and Ratio of particles') plt.axis('equal') ''' plt.show() iou = utils.calc_iou(gt_box, pred_box) print 'iou is: ', iou pred_hist.append(pred_box) conf_hist.append(conf_max) iou_hist.append(iou) if conf_max >= 0.1: #0.5 #update pca3_pos and pca3_neg new_true = 100 #50 new_false = 200 #100 boxes_train = [] iou_train = [] Q = 0.02 try: sample_box_true, sample_iou_true = filter.sample_iou( pred_box, Q, 0.01, 0.01, new_true, 0.85, 1.0) except OverflowError as e: print "too many loops in sample." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( pred_box, Q, 0.2, 0.01, new_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( pred_box, Q, 0.01, 0.2, new_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((new_true, )) y_train_false = np.zeros((new_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(new_false + new_true)) boxes_train = boxes_train[ind_perm, :] y_train = y_train[ind_perm] new_y = np.zeros(y_train.shape) new_y[...] = y_train ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): print k, v.shape if k == 'f3': v_pca3 = pca3.transform(v) #random substitude pca3_cur_pos = v_pca3[ind_pos, :] pca3_cur_neg = v_pca3[ind_neg, :] to_subst = random.sample(range(num_true), new_true) pca3_pos[to_subst, :] = pca3_cur_pos to_subst = random.sample(range(num_false), new_false) pca3_neg[to_subst, :] = pca3_cur_neg if conf_max < 0.1 and fail_times >= 2: #0.99 #if conf_max<0.95 and conf_max>0.5: #update classification model print 'updating model...' pca3_train = np.vstack([pca3_pos, pca3_neg]) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation(range(num_false + num_true)) pca3_train = pca3_train[ind_perm, :] y_train = y_train[ind_perm] #logistic regression clf3.fit(pca3_train, y_train) print 'score is: ', clf3.score(pca3_train, y_train) #fail_times=0 # (B,G,R) frame_data_cv = frame_data * 255 # [0,1]-->[0,255] frame_data_cv = frame_data_cv[:, :, ::-1] # RGB->BGR frame_data_cv = frame_data_cv.astype('uint8') #cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])), # (255, 0, 0), 2, 1) if id > 0: cv2.rectangle(frame_data_cv, (int(pred_box[0, 0]), int(pred_box[0, 1])), (int(pred_box[0, 2]), int(pred_box[0, 3])), (0, 255, 0), 2, 1) show_particles = False if show_particles: for i in range(filter.num_particles): cx = filter.particles[i, 0] cy = filter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 0, 255), thickness=1) show_box = False if show_box: n = 0 for i in ind_pos: if n % 5 == 0: cv2.rectangle( frame_data_cv, (int(boxes_train[i, 0]), int(boxes_train[i, 1])), (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 0, 255), 2, 1) n += 1 n = 0 show_particles_init = False if show_particles_init: for i in range(filter.num_particles): cx = filter.particles[i, 0] cy = filter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 255, 0), thickness=1) show_frame = False #cv2.circle(frame_data_cv, (int(filter.cur_c[0]), int(filter.cur_c[1])), 2, (0, 0, 255), thickness=1) if show_frame: cv2.imshow(sequence, frame_data_cv) c = cv2.waitKey(1) if c != -1: if chr(c) == 'p': c = cv2.waitKey() #print 'You press: ',chr(c) #if chr(c)=='c': if chr(c) == 'c': cv2.destroyWindow(sequence) #conf_hist=np.array(conf_hist) #iou_hist=np.array(iou_hist) #np.save('conf_hist.npy',conf_hist) #np.save('iou_hist.npy',iou_hist) break else: video_writer.write(frame_data_cv) end_time = time.time() video_writer.release() iou_hist = np.array(iou_hist) pred_hist = np.array(pred_hist).squeeze() print "iou_hist: ", iou_hist.shape print "pred_hist: ", pred_hist.shape print "get_boxes: ", gt_boxes.shape precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist) print "precision is: %f" % (precisions[19]) suc, auc_iou = utils.calc_success(iou_hist) print "Average IOU is: %f" % (np.mean(iou_hist)) print "Auc of precision is: %f" % (auc_pre) print "Auc of success is: %f" % auc_iou print "Reinit times: %d" % reinit print "Average FPS: %f" % ((id + 1) / (end_time - start_time))
def main(args): vis = args.vis debug = args.debug save = args.save nparticles = args.particles root_path = '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/' dataset_path = args.dataset #"/data/OTB100" dataset100_seq = [ 'Bird2', 'BlurCar1', 'BlurCar3', 'BlurCar4', 'Board', 'Bolt2', 'Boy', 'Car2', 'Car24', 'Coke', 'Coupon', 'Crossing', 'Dancer', 'Dancer2', 'David2', 'David3', 'Dog', 'Dog1', 'Doll', 'FaceOcc1', 'FaceOcc2', 'Fish', 'FleetFace', 'Football1', 'Freeman1', 'Freeman3', 'Girl2', 'Gym', 'Human2', 'Human5', 'Human7', 'Human8', 'Jogging', 'KiteSurf', 'Lemming', 'Man', 'Mhyang', 'MountainBike', 'Rubik', 'Singer1', 'Skater', 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans', 'Twinnings', 'Vase' ] dataset50_seq = [ 'Basketball', 'Biker', 'Bird1', 'BlurBody', 'BlurCar2', 'BlurFace', 'BlurOwl', 'Bolt', 'Box', 'Car1', 'Car4', 'CarDark', 'CarScale', 'ClifBar', 'Couple', 'Crowds', 'Deer', 'Diving', 'DragonBaby', 'Dudek', 'Football', 'Freeman4', 'Girl', 'Human3', 'Human4', 'Human6', 'Human9', 'Ironman', 'Jump', 'Jumping', 'Liquor', 'Matrix', 'MotorRolling', 'Panda', 'RedTeam', 'Shaking', 'Singer2', 'Skating1', 'Skating2', 'Skiing', 'Soccer', 'Surfer', 'Sylvester', 'Tiger2', 'Trellis', 'Walking', 'Walking2', 'Woman' ] if "OTB50" in dataset_path: data_seq = dataset50_seq else: data_seq = dataset100_seq log_name = 'log.txt' log_file = open(log_name, 'w') records_success = [] #defaultdict(list) records_precision = [] #defaultdict(list) records_reinit = defaultdict(list) model_def = os.path.join(root_path, args.prototxt) model_weight = os.path.join(root_path, args.caffemodel) vggnet = VGGnet.VGGnet(model_def, model_weight) thre_max_neg = 0.3 # 0.5 test_times = 1 # 0 for t in range(test_times): print 'Test round: %d' % t log_file.write('Test round: %d\n' % t) # sequences = ['Fish'] for sequence in data_seq: for t in os.walk( os.path.join(dataset_path, sequence, sequence, "img")): if t[0] == os.path.join(dataset_path, sequence, sequence, "img"): nFrame = len(t[2]) print 'Processing: %s' % sequence log_file.write('Processing: %s\n' % sequence) print "Total frames are: ", nFrame log_file.write('Total frames are: %d\n' % nFrame) gt_path = os.path.join(dataset_path, sequence, sequence, "groundtruth_rect.txt") gt_boxes = utils.get_boxes_all(gt_path) conf_hist = [] iou_hist = [] area_hist = [] pred_hist = [] # (x1,y1,x2,y2) eig_hist = [] reinit = 0 nFrame = np.minimum(nFrame, gt_boxes.shape[0]) id_shift = 0 init_id = False for id in np.arange(0, nFrame): frame_name = "img/%04d.jpg" % (id + 1) # print "Start processing: %s" % frame_name frame_path = os.path.join(dataset_path, sequence, sequence, frame_name) if os.path.exists(frame_path) == False: id_shift = id_shift + 1 continue id = id - id_shift frame_data = caffe.io.load_image( frame_path) # (432,576,3), in [0,1] gt_box = gt_boxes[id] if init_id == False: h, w, c = frame_data.shape frame_shape = [c, w, h] fail_times = 0 area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1] ) # ratio=w/h # set up net.blobs['im_info'] print "Image Size: ", w, h log_file.write('Image Size: %d %d\n' % (w, h)) vggnet.reshape(w=w, h=h, nbox=nparticles) filter = PFfilter.PFfilter( utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) filter.create_particles() filter.restrict_particles(w, h) area_hist.append(filter.cur_a) pred_hist.append(np.array(gt_box).reshape(1, -1)) # pca # test sample_iou num_true = 500 num_false = 1000 boxes_train = [] # boxes_train_neg=[] iou_train = [] try: # Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 # box_w,box_h sample_box_true, sample_iou_true = filter.sample_iou_pred_box( gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0) except OverflowError as e: print "too many loops in sample." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] # print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.15 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation( range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] iou_train = iou_train[ind_perm] y_train = y_train[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw( frame_data, boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): # print k,v.shape if k == 'f3': pca3 = utils.skl_pca(v) v_pca3 = pca3.transform(v) pca3_pos = np.zeros((num_true, pca3.n_components_), dtype=np.float32) pca3_neg = np.zeros( (num_false, pca3.n_components_), dtype=np.float32) pca3_pos[...] = v_pca3[ind_pos, :] pca3_neg[...] = v_pca3[ind_neg, :] # utils.vis_as_image(v_pca3) # plt.imshow(v_pca3) # plt.title("PCA features") # plt.show() # plt.close() # logistic regression y_weight = sklearn.utils.class_weight.compute_class_weight( class_weight='balanced', classes=np.array([0, 1]), y=y_train) # print y_weight class_weight = {0: y_weight[0], 1: y_weight[1]} clf3 = linear_model.LogisticRegression( fit_intercept=True, solver='liblinear') clf3.fit(v_pca3, y_train) vis_feature = False if vis_feature: utils.vis_features(features, id) start_time = time.time() else: if fail_times >= 5: # reinitialize reinit += 1 area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1]) ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]) filter = PFfilter.PFfilter( utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h, nparticles) # filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio) filter.create_particles() filter.restrict_particles(w, h) area_hist.append(filter.cur_a) pred_box = gt_box pred_hist.append(np.array(pred_box).reshape(1, -1)) conf_hist.append(-0.1) boxes_train = [] # boxes_train_neg=[] iou_train = [] try: # Q=[[1,0],[0,1]] #for pixel wise Q = 0.05 # box_w,box_h sample_box_true, sample_iou_true = filter.sample_iou_pred_box( gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0) except OverflowError as e: print "too many loops in sample." boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 # 0.15 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: # Q=[[36,0],[0,36]]#for pixel wise Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation( range(num_false + num_true)) boxes_train = boxes_train[ind_perm, :] iou_train = iou_train[ind_perm] y_train = y_train[ind_perm] ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_first_raw( frame_data, boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): # print k, v.shape if k == 'f3': v_pca3 = pca3.transform(v) pca3_pos[...] = v_pca3[ind_pos, :] pca3_neg[...] = v_pca3[ind_neg, :] clf3.fit(v_pca3, y_train) score3 = clf3.score(v_pca3, y_train) # print 'score3: ', score3 # prob=clf3.predict_proba(v_pca3) # print clf3.classes_ fail_times = 0 continue filter.predict_particles(Q=0.2, cr=0.005, ca=0.001) filter.restrict_particles(w, h) area_hist.append(filter.cur_a) # compute conf conf = np.zeros(filter.weights.shape) # np.save('particles.npy',filter.particles) pred_boxes = utils.state_to_bbox(filter.particles, area, ratio) vggnet.reshape(w, h, filter.num_particles) features = vggnet.get_features_first_raw( frame_data, boxes_raw=pred_boxes, id=id) for k, v in features.iteritems(): # print k,v.shape if k == 'f3': v_pca3 = pca3.transform(v) # utils.vis_as_image(v_pca3) # plt.imshow(v_pca3) # plt.title("PCA features") # plt.show() # plt.close() # logistic regression conf = clf3.predict_proba(v_pca3)[:, 1] conf_max = np.max(conf) conf_min = np.min(conf) filter.update_particles(conf) filter.resample() # always resample pred_state, s_particles, r_particles = filter.estimate( k=10) cov_particles = np.dot( filter.particles[:, :2].T, filter.particles[:, :2]) / filter.particles.shape[0] eigval, eigvec = np.linalg.eig(cov_particles) max_val = eigval[0] eig_hist.append(max_val) print 'Max eigvalue: %f' % max_val # print 'conf is: ',conf if conf_max > 0.8: fail_times = 0 else: fail_times += 1 print "conf_max too low, not update particles " pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area, ratio) show_sr = False if show_sr: count, xedge, yedge, tmp_im = plt.hist2d( s_particles, r_particles, bins=10, weights=filter.weights.squeeze(), cmap=plt.cm.gray) top3 = np.argsort(-count, axis=None)[:3] row_ind = top3[:] / count.shape[1] col_ind = top3[:] % count.shape[0] ''' plt.scatter(s_particles,r_particles,c='r',marker='.',linewidths=1) plt.xlabel('Area') plt.ylabel('Aspect ratio') plt.title('Area and Ratio of particles') plt.axis('equal') ''' plt.show() iou = utils.calc_iou(gt_box, pred_box) # print 'iou is: ', iou pred_hist.append(pred_box) conf_hist.append(conf_max) iou_hist.append(iou) if conf_max >= 0.9: # 0.8 # update pca3_pos and pca3_neg new_true = 100 # 50 new_false = 200 # 100 boxes_train = [] iou_train = [] Q = 0.02 try: sample_box_true, sample_iou_true = filter.sample_iou_pred_box( pred_box, Q, 0.01, 0.01, new_true, 0.85, 1.0) except OverflowError as e: print "too many loops in sample." # print sample_box_true[:10] # print sample_box_true.shape[0] # print sample_iou_true[:10] # print "average iou: ", np.mean(sample_iou_true) boxes_train.append(sample_box_true) iou_train.append(sample_iou_true) try: Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( pred_box, Q, 0.2, 0.01, new_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." # print sample_box_false[:10] # print sample_box_false.shape[0] # print sample_iou_false[:10] # print "average iou: ", np.mean(sample_iou_false) boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) try: Q = 0.2 sample_box_false, sample_iou_false = filter.sample_iou( pred_box, Q, 0.01, 0.2, new_false / 2, 0, thre_max_neg) except OverflowError as e: print "too many loops in sample." boxes_train.append(sample_box_false) iou_train.append(sample_iou_false) boxes_train = np.vstack(boxes_train) iou_train = np.vstack(iou_train) y_train_true = np.ones((new_true, )) y_train_false = np.zeros((new_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation( range(new_false + new_true)) boxes_train = boxes_train[ind_perm, :] y_train = y_train[ind_perm] new_y = np.zeros(y_train.shape) new_y[...] = y_train ind_pos = np.where(y_train == 1)[0] ind_neg = np.where(y_train == 0)[0] vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0]) features = vggnet.get_features_second_raw( boxes_raw=boxes_train, id=id) for k, v in features.iteritems(): # print k, v.shape if k == 'f3': v_pca3 = pca3.transform(v) # random substitude pca3_cur_pos = v_pca3[ind_pos, :] pca3_cur_neg = v_pca3[ind_neg, :] to_subst = random.sample( range(num_true), new_true) pca3_pos[to_subst, :] = pca3_cur_pos to_subst = random.sample( range(num_false), new_false) pca3_neg[to_subst, :] = pca3_cur_neg if conf_max < 1 and fail_times >= 2: pca3_train = np.vstack([pca3_pos, pca3_neg]) y_train_true = np.ones((num_true, )) y_train_false = np.zeros((num_false, )) y_train = np.hstack([y_train_true, y_train_false]) # permutation ind_perm = np.random.permutation( range(num_false + num_true)) pca3_train = pca3_train[ind_perm, :] y_train = y_train[ind_perm] # logistic regression clf3.fit(pca3_train, y_train) # print 'score is: ',clf3.score(pca3_train,y_train) # (B,G,R) frame_data_cv = frame_data * 255 # [0,1]-->[0,255] frame_data_cv = frame_data_cv[:, :, ::-1] # RGB->BGR frame_data_cv = frame_data_cv.astype('uint8') cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])), (255, 0, 0), 2, 1) if id > 0 and init_id == True: cv2.rectangle(frame_data_cv, (int(pred_box[0, 0]), int(pred_box[0, 1])), (int(pred_box[0, 2]), int(pred_box[0, 3])), (0, 255, 0), 2, 1) if init_id == False: init_id = True show_particles = False if show_particles: for i in range(filter.num_particles): cx = filter.particles[i, 0] cy = filter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 0, 255), thickness=1) show_box = False if show_box: n = 0 for i in ind_pos: if n % 5 == 0: cv2.rectangle(frame_data_cv, (int( boxes_train[i, 0]), int(boxes_train[i, 1])), (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 0, 255), 2, 1) n += 1 n = 0 ''' for i in ind_neg: if n%15==0: cv2.rectangle(frame_data_cv, (int(boxes_train[i, 0]), int(boxes_train[i, 1])), (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 255,255), 2, 1) n+=1 ''' show_particles_init = False if show_particles_init: for i in range(filter.num_particles): cx = filter.particles[i, 0] cy = filter.particles[i, 1] cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 255, 0), thickness=1) show_frame = False cv2.circle(frame_data_cv, (int(filter.cur_c[0]), int(filter.cur_c[1])), 2, (0, 0, 255), thickness=1) if show_frame: cv2.imshow(sequence, frame_data_cv) c = cv2.waitKey(1) # print 'You press: ',chr(c) # if chr(c)=='c': if c != -1: cv2.destroyWindow(sequence) # conf_hist=np.array(conf_hist) # iou_hist=np.array(iou_hist) # np.save('conf_hist.npy',conf_hist) # np.save('iou_hist.npy',iou_hist) break end_time = time.time() print "Average FPS: %f" % (nFrame / (end_time - start_time)) log_file.write("Average FPS: %f\n" % (nFrame / (end_time - start_time))) conf_hist = np.array(conf_hist) iou_hist = np.array(iou_hist) area_hist = np.array(area_hist) pred_hist = np.vstack(pred_hist) precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist) # plt.figure() # plt.subplot(221) # plt.plot(precisions) # plt.gca().invert_xaxis() # plt.title("Precision plot") # plt.xlabel('Location error threshold') # plt.ylabel('Precision') # plt.yticks(np.linspace(0,1,11)) # plt.subplot(222) # plt.show() suc, auc_iou = utils.calc_success(iou_hist) records_precision.append(precisions * nFrame) records_success.append(suc * nFrame) # plt.plot(suc) # plt.gca().invert_xaxis() # plt.title('Success plot') # plt.xlabel('Overlap threshold') # plt.ylabel('Success Rate') # plt.yticks(np.linspace(0,1,11)) # plt.show() # np.save('conf_hist.npy', conf_hist) # np.save('iou_hist.npy', iou_hist) # np.save('area_hist.npy',area_hist) # print 'Average iou is: %f'%(np.mean(iou_hist)) print 'Precision @20 is: %f' % precisions[19] print 'Auc of Precision is: %f' % auc_pre print 'Auc of Success is: %f' % auc_iou print 'Reinit times: %d' % reinit log_file.write("Precision @20 is: %f\n" % precisions[19]) log_file.write('Auc of Precision is: %f\n' % auc_pre) log_file.write('Auc of Success is: %f\n' % auc_iou) log_file.write('Reinit times: %d\n' % reinit) log_file.close() pkl = open( '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/results_100.pkl', 'w') pickle.dump([records_precision, records_success], pkl) pkl.close()