Exemplo n.º 1
0
    def __init__(self, state, area, ratio, img_w, img_h, N=256):
        '''state:u,v,s,r,du,dv'''
        self.state0 = state
        self.area = area
        self.ratio = ratio
        self.width = img_w  # img width,never change
        self.height = img_h  # img_height, never change
        # prev box position: [[x1,y1,x2,y2]]
        self.prev_pos = utils.state_to_bbox(state, area, ratio)
        self.prev_c = np.array(
            [(self.prev_pos[0, 2] + self.prev_pos[0, 0]) / 2, (self.prev_pos[0, 3] + self.prev_pos[0, 1]) / 2],
            dtype=np.float32)
        # print 'prev_pos: ',self.prev_pos.shape,' ',self.prev_pos.dtype
        # current box position: [[x1,x2,y1,y2]]
        self.cur_pos = np.empty((1, 4), dtype=np.float32)

        # print 'cur_pos ',self.cur_pos.shape,' ',self.cur_pos.dtype
        np.copyto(self.cur_pos, self.prev_pos)
        # current center,[cx,cy]
        self.cur_c = np.array(
            [(self.cur_pos[0, 2] + self.cur_pos[0, 0]) / 2, (self.cur_pos[0, 3] + self.cur_pos[0, 1]) / 2],
            dtype=np.float32)

        self.prev_a = 1.0
        self.cur_a = 1.0
        self.box_w = self.cur_pos[0, 2] - self.cur_pos[0, 0]  # current box width, update by estimate()
        self.box_h = self.cur_pos[0, 3] - self.cur_pos[0, 1]  # current box height, update by estimate()
        self.cur_a = self.box_h * self.box_w / self.area

        # print 'initial state is: ', self.state0
        self.num_particles = N
        self.weights = np.ones((self.num_particles,)) / self.num_particles
        self.dt = 1.0
Exemplo n.º 2
0
    def reset(self, state, area, ratio):
        '''state:u,v,s,r,du,dv'''
        self.state0 = state
        self.area = area
        self.ratio = ratio

        # cur box position: [[x1,y1,x2,y2]]
        self.cur_pos = utils.state_to_bbox(state, area, ratio)
        self.cur_c = np.array(
            [(self.prev_pos[0, 2] + self.prev_pos[0, 0]) / 2, (self.prev_pos[0, 3] + self.prev_pos[0, 1]) / 2],
            dtype=np.float32)

        self.box_w = self.cur_pos[0, 2] - self.cur_pos[0, 0]  # current box width, update by estimate()
        self.box_h = self.cur_pos[0, 3] - self.cur_pos[0, 1]  # current box height, update by estimate()
        self.cur_a = self.box_h * self.box_w / self.area
        # self.cur_pos=self.prev_pos
        # print 'cur_pos.shape= ',self.cur_pos.shape

        self.weights = np.ones((self.num_particles,)) / self.num_particles
        D = np.array([[self.box_w, 0], [0, self.box_h]], dtype=np.float32)
        vs = 0.01  # 2
        VL = D * vs
        dxy = self.cur_c.reshape((1, 2))[:, :2] - self.prev_c.reshape((1, 2))[:, :2]

        maxd = self.area * 0.01
        # dxy cannot be too much
        area_d = np.abs(dxy[:, 0:1]) * np.abs(dxy[:, 1:])
        ind = np.where(area_d > maxd)
        dxy[ind[0], :] = dxy[ind[0], :] / np.sqrt(area_d[ind[0], :]) * np.sqrt(maxd)
        # print 'dxy = ', dxy
        self.particles[:, 4:] = dxy + np.dot(VL, np.random.randn(2, self.num_particles)).transpose()

        Q = 0.02  # 0.02
        QL = D * Q
        # cx,cy:cx=cx+dt*dcx+noise
        self.particles[:, :2] = self.state0[:, :2] + self.dt * self.state0[:, 4:] + np.dot(QL, np.random.randn(2,
                                                                                                               self.num_particles)).transpose()
        self.mltply = False
        if self.mltply:
            # s
            a = 1.5
            dsn = np.random.randn(self.num_particles)
            ds = np.power(a, dsn)
            self.particles[:, 2] = self.state0[:, 2] * ds
            # r
            R = 0.16
            dr = np.random.randn(self.num_particles) * np.sqrt(R) + 1
            self.particles[:, 3] = self.state0[:, 3] * dr
        else:  # add predict
            ca = 0.0001  # 0.01
            cr = 0.00001
            self.particles[:, 2] = 1 + np.random.randn(self.num_particles) * np.sqrt(ca)
            self.particles[:, 3] = 1 + np.random.randn(self.num_particles) * np.sqrt(cr)
Exemplo n.º 3
0
    def estimate_const(self,conf,k=10):
        cur_pos = np.zeros((6,), dtype=np.float32)

        # avreage
        cur_pos = np.average(self.particles, weights=conf, axis=0)  # (cx,cy,s,r,dcx,dcy)

        if self.mltply:
            cur_pos = utils.state_to_bbox_m(cur_pos, self.area, self.ratio)
        else:
            cur_pos = utils.state_to_bbox(cur_pos, self.area, self.ratio)
        cur_pos = utils.restrict_box(cur_pos, self.width, self.height)
        return cur_pos
Exemplo n.º 4
0
 def restrict_particles_extern(self,states,w,h):
     if self.mltply:
         bboxes = utils.state_to_bbox_m(states, self.area, self.ratio)
     else:
         bboxes = utils.state_to_bbox(states, self.area, self.ratio)
     # restrict x1,y1,x2,y2
     # bboxes[:, 0] = np.minimum(np.maximum(0, bboxes[:, 0]), w)
     # bboxes[:, 2] = np.minimum(np.maximum(0, bboxes[:, 2]), w)
     # bboxes[:, 1] = np.minimum(np.maximum(0, bboxes[:, 1]), h)
     # bboxes[:, 3] = np.minimum(np.maximum(0, bboxes[:, 3]), h)
     if self.mltply:
         bboxes = utils.restrict_box_m(bboxes, w, h)
     else:
         bboxes = utils.restrict_box(bboxes, w, h)
     # prev_particles= self.particles
     if self.mltply:
         state_half = utils.bbox_to_states_m(bboxes, self.area, self.ratio)
     else:
         state_half = utils.bbox_to_states(bboxes, self.area, self.ratio)
     return state_half
Exemplo n.º 5
0
    def sample_iou_new(self, gt_box, Q, T, R, N, thre_min=0, thre_max=1):

        sample_boxN = []
        sample_iouN = []
        cur_n = 0
        D = np.array([[self.box_w, 0], [0, self.box_h]])
        QL = D * Q
        # QL = np.linalg.cholesky(Q)
        a = 1.5  # 1.5
        gt_state = utils.bbox_to_states(gt_box, self.area, self.ratio)
        sample_times = 0
        chg_i = 0
        while cur_n < N:
            sample_particles = np.zeros((N, 6), dtype=np.float32)
            QL = D * Q
            sample_particles[:, :2] = gt_state[:, :2] + np.dot(QL,
                                                               np.random.randn(2, N)).transpose()

            if self.mltply:
                dsn = np.random.randn(N) * T
                ds = np.power(a, dsn)
            else:
                ds = 1 + np.random.randn(N) * T
                ds = np.maximum(0.01, ds)  # in case of ds<0
            sample_particles[:, 2] = gt_state[:, 2] * ds

            if self.mltply:
                dr = np.random.randn(N) * R + 1
            else:
                dr = 1 + np.random.randn(N) * R
                dr = np.maximum(0.01, dr)  # in case of dr<0
            sample_particles[:, 3] = gt_state[:, 3] * dr

            # get box
            if self.mltply:
                sample_box = utils.state_to_bbox_m(sample_particles, self.area, self.ratio)
            else:
                sample_box = utils.state_to_bbox(sample_particles, self.area, self.ratio)
            sample_box = utils.restrict_box(sample_box, self.width, self.height)
            # compute iou
            sample_iou = utils.calc_iou(gt_box, sample_box)
            # restrict iou
            ind = np.where((sample_iou >= thre_min) & (sample_iou <= thre_max))
            sample_box = sample_box[ind[0]]
            sample_iou = sample_iou[ind[0]]
            cur_n += sample_box.shape[0]
            sample_boxN.append(sample_box)
            sample_iouN.append(sample_iou.reshape((-1, 1)))
            if ind[0].shape[0] < N / 2 and thre_max >= 0.8:
                if chg_i == 0:
                    Q *= 0.5
                    chg_i = (chg_i + 1) % 3
                else:
                    if chg_i == 1:
                        T *= 0.5
                        T = np.minimum(T, 0.5)
                        chg_i = (chg_i + 1) % 3
                    else:
                        R *= 0.5
                        R = np.minimum(R, 0.5)
                        chg_i = (chg_i + 1) % 3

            if ind[0].shape[0] < N / 2 and thre_min <= 0.5:
                if chg_i == 0:
                    Q *= 2
                    chg_i = (chg_i + 1) % 3
                else:
                    if chg_i == 1:
                        T *= 2
                        T = np.minimum(T, 0.5)
                        chg_i = (chg_i + 1) % 3
                    else:
                        R *= 2
                        R = np.minimum(R, 0.5)
                        chg_i = (chg_i + 1) % 3

            sample_times += 1
            if sample_times >= 100:  # and cur_n>N/2.0:#100
                # print "Caution: too many loops in sampling"
                # break
                raise OverflowError()
        if cur_n >= N:
            sample_boxN = np.vstack(sample_boxN)[:N, :]
            sample_iouN = np.vstack(sample_iouN)[:N, :]
        else:
            diff_n = N - cur_n
            sample_iouN = np.vstack(sample_iouN)
            sample_boxN = np.vstack(sample_boxN)
            diff_ind = random.sample(range(cur_n), diff_n)  # need to ensure diff_n<cur_n
            sample_boxN = np.vstack([sample_boxN, sample_boxN[diff_ind]])
            sample_iouN = np.vstack([sample_iouN, sample_iouN[diff_ind]])
        return sample_boxN, sample_iouN
Exemplo n.º 6
0
    def estimate(self, k=10):
        '''estimate current position'''
        np.copyto(self.prev_pos, self.cur_pos)
        np.copyto(self.prev_c, self.cur_c)
        cur_pos = np.zeros((6,), dtype=np.float32)
        # there are two methods to estimating cur_pos: average or max
        # avreage
        maxw = np.max(self.weights)
        inds = np.where(self.weights>0.5*maxw)[0]
        cur_pos = np.average(self.particles[inds], weights=self.weights.squeeze()[inds], axis=0)  # (cx,cy,s,r,dcx,dcy)

        # max
        # cur_pos = self.particles[np.argmax(self.weights)]

        # max k pos
        '''
        #k = 3

        #print type(self.weights), self.weights.shape
        #print 'max: ',np.max(self.weights)
        #print 'min: ',np.min(self.weights)
        sort_ind = np.argsort(-self.weights.squeeze())
        cur_pos = np.average(self.particles[sort_ind[:k]], weights=self.weights[sort_ind[:k]], axis=0)
        '''
        '''
        #hist estimate
        count_xy,edge_x,edge_y=np.histogram2d(self.particles[:,0],self.particles[:,1],bins=40,weights=self.weights.squeeze())
        top3=(-count_xy).argsort(axis=None)[:2]
        ind_x=top3[:]/count_xy.shape[1]
        ind_y=top3[:]%count_xy.shape[1]
        if abs(max(ind_x)-min(ind_x))==1:
            #adjacent
            ind_right=max(ind_x)
            ind_left=min(ind_x)
            edge_x1=edge_x[ind_left]
            edge_x2=edge_x[ind_right+1]
        else:
            edge_x1=edge_x[ind_x[0]]
            edge_x2=edge_x[ind_x[0]+1]
        if abs(max(ind_y)-min(ind_y))==1:
            #adjacent
            ind_right = max(ind_y)
            ind_left = min(ind_y)
            edge_y1=edge_y[ind_left]
            edge_y2=edge_y[ind_right+1]
        else:
            edge_y1=edge_y[ind_y[0]]
            edge_y2=edge_y[ind_y[0]+1]

        cur_pos[0]=(edge_x1+edge_x2)/2.0
        cur_pos[1]=(edge_y1+edge_y2)/2.0
        #area and ratio
        count_sr, edge_s, edge_r = np.histogram2d(self.particles[:, 2], self.particles[:, 3], bins=20,
                                                  weights=self.weights.squeeze())
        top3 = (-count_sr).argsort(axis=None)[:2]
        ind_s = top3[:] / count_sr.shape[1]
        ind_r = top3[:] % count_sr.shape[1]
        if abs(max(ind_s) - min(ind_s)) == 1:
            # adjacent
            ind_right = max(ind_s)
            ind_left = min(ind_s)
            edge_s1 = edge_s[ind_left]
            edge_s2 = edge_s[ind_right + 1]
        else:
            edge_s1 = edge_s[ind_s[0]]
            edge_s2 = edge_s[ind_s[0] + 1]
        if abs(max(ind_r) - min(ind_r)) == 1:
            # adjacent
            ind_right = max(ind_r)
            ind_left = min(ind_r)
            edge_r1 = edge_r[ind_left]
            edge_r2 = edge_r[ind_right + 1]
        else:
            edge_r1 = edge_r[ind_r[0]]
            edge_r2 = edge_r[ind_r[0] + 1]

        cur_pos[2] = (edge_s1 + edge_s2) / 2.0
        cur_pos[3] = (edge_r1 + edge_r2) / 2.0
        '''
        if self.mltply:
            self.cur_pos = utils.state_to_bbox_m(cur_pos, self.area, self.ratio)
        else:
            self.cur_pos = utils.state_to_bbox(cur_pos, self.area, self.ratio)
        self.cur_pos = utils.restrict_box(self.cur_pos, self.width, self.height)
        self.box_h = self.cur_pos[0, 3] - self.cur_pos[0, 1]
        self.box_w = self.cur_pos[0, 2] - self.cur_pos[0, 0]
        self.cur_c[0] = np.minimum(np.maximum(0, cur_pos[0]), self.width)
        self.cur_c[1] = np.minimum(np.maximum(0, cur_pos[1]), self.height)
        # update self.cur_a and self.prev_a
        self.prev_a = self.cur_a
        self.cur_a = self.box_w * self.box_h / self.area
        # self.cur_pos[0, 2] = np.minimum(np.maximum(0, cur_pos[2]), self.width)
        # self.cur_pos[0, 3] = np.minimum(np.maximum(0, cur_pos[3]), self.height)
        # print 'prev_pos = ', self.prev_pos
        # print 'cur_pos = ', self.cur_pos

        # calculate s and r
        s = self.particles[:, 2]
        r = self.particles[:, 3]

        return cur_pos, s, r
Exemplo n.º 7
0
            for i in range(filter.num_particles):
                cx = filter.particles[i, 0]
                cy = filter.particles[i, 1]
                cv2.circle(frame_data, (int(cx), int(cy)), 1, (0, 255, 0), thickness=1)
        filter.predict_particles()
        # np.save('particles.npy',filter.particles)
        filter.restrict_particles(w, h)
        if show_particles:
            for i in range(filter.num_particles):
                cx = filter.particles[i, 0]
                cy = filter.particles[i, 1]
                cv2.circle(frame_data, (int(cx), int(cy)), 1, (0, 0, 255), thickness=1)
        # compute conf
        conf = np.zeros(filter.weights.shape)
        # np.save('particles.npy',filter.particles)
        pred_boxes = utils.state_to_bbox(filter.particles, area, ratio)

        # print 'pred_boxes: ',pred_boxes
        # for i in range(conf.shape[0]):
        #    pred_box=pred_boxes[i,:]
        # print "pred_box is: ",pred_box
        # conf[i]=np.dot(gt_box,pred_box)/np.linalg.norm(gt_box,ord=2)
        #    conf[i]=np.dot(gt_box,pred_box)/np.sum(np.square(gt_box))
        conf = utils.calc_iou(gt_box, pred_boxes)
        # print 'conf is: ',conf
        filter.update_particles(conf)
        if filter.neff() < len(filter.particles):  # 1/2
            filter.resample()
        pred_state = filter.estimate()
        pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area, ratio)
        print 'ground truth bbox is: ', gt_box
Exemplo n.º 8
0
def main(args):

    vis = args.vis
    debug = args.debug
    save = args.save
    nparticles = args.particles

    root_path = '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/'
    dataset_path = "/data/OTB100"
    sequence = args.sequence
    model_def = os.path.join(root_path, args.prototxt)
    model_weight = os.path.join(root_path, args.caffemodel)
    for t in os.walk(os.path.join(dataset_path, sequence, sequence, "img")):
        if t[0] == os.path.join(dataset_path, sequence, sequence, "img"):
            nFrame = len(t[2])
            print "Total frames are: ", nFrame

    gt_path = os.path.join(dataset_path, sequence, sequence,
                           "groundtruth_rect.txt")

    gt_boxes = utils.get_boxes_all(gt_path)

    vggnet = VGGnet.VGGnet(model_def, model_weight)

    thre_min_neg = 0.0
    thre_max_neg = 0.4  #0.5

    thre_min_pos = 0.8
    thre_max_pos = 1.0

    conf_hist = []
    iou_hist = []
    area_hist = []
    eig_hist = []
    pred_hist = []  #(x1,y1,x2,y2)
    reinit = 0
    nFrame = np.minimum(nFrame, gt_boxes.shape[0])

    for id in np.arange(0, nFrame):  #nFrame
        frame_name = "img/%04d.jpg" % (id + 1)
        print "Start processing: %s" % frame_name
        frame_path = os.path.join(dataset_path, sequence, sequence, frame_name)
        frame_data = caffe.io.load_image(frame_path)  # (432,576,3), in [0,1]
        gt_box = gt_boxes[id]

        if id == 0:
            h, w, c = frame_data.shape
            frame_shape = [c, w, h]
            fps = 20
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')

            video_writer = cv2.VideoWriter("res_%s.avi" % sequence, fourcc,
                                           fps, (w, h))
            fail_times = 0
            area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
            ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]
                                               )  #ratio=w/h
            # set up net.blobs['im_info']
            print "Image Size: ", w, h
            vggnet.reshape(w=w, h=h, nbox=nparticles)
            filter = PFfilter.PFfilter(
                utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h,
                nparticles)
            filter.create_particles()
            filter.restrict_particles(w, h)
            area_hist.append(filter.cur_a)
            pred_hist.append(np.array(gt_box).reshape(1, -1))
            #pca
            # test sample_iou
            num_true = 500
            num_false = 1000
            boxes_train = []
            #boxes_train_neg=[]
            iou_train = []
            try:
                #Q=[[1,0],[0,1]] #for pixel wise
                Q = 0.05  #box_w,box_h
                sample_box_true, sample_iou_true = filter.sample_iou(
                    gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0)
            except OverflowError as e:
                print "too many loops in sample."
            # print sample_box_true[:10]
            # print sample_box_true.shape[0]
            # print sample_iou_true[:10]
            print "average iou: ", np.mean(sample_iou_true)
            boxes_train.append(sample_box_true)
            iou_train.append(sample_iou_true)
            try:
                #Q=[[36,0],[0,36]]#for pixel wise
                Q = 0.2  #0.15
                sample_box_false, sample_iou_false = filter.sample_iou(
                    gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg)
            except OverflowError as e:
                print "too many loops in sample."
            # print sample_box_false[:10]
            # print sample_box_false.shape[0]
            # print sample_iou_false[:10]
            print "average iou: ", np.mean(sample_iou_false)
            boxes_train.append(sample_box_false)
            iou_train.append(sample_iou_false)
            try:
                #Q=[[36,0],[0,36]]#for pixel wise
                Q = 0.2
                sample_box_false, sample_iou_false = filter.sample_iou(
                    gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg)
            except OverflowError as e:
                print "too many loops in sample."
            # print sample_box_false[:10]
            # print sample_box_false.shape[0]
            # print sample_iou_false[:10]
            print "average iou: ", np.mean(sample_iou_false)
            boxes_train.append(sample_box_false)
            iou_train.append(sample_iou_false)

            boxes_train = np.vstack(boxes_train)

            iou_train = np.vstack(iou_train)
            y_train_true = np.ones((num_true, ))
            y_train_false = np.zeros((num_false, ))

            y_train = np.hstack([y_train_true, y_train_false])

            #permutation
            ind_perm = np.random.permutation(range(num_false + num_true))
            boxes_train = boxes_train[ind_perm, :]
            iou_train = iou_train[ind_perm]
            y_train = y_train[ind_perm]
            ind_pos = np.where(y_train == 1)[0]
            ind_neg = np.where(y_train == 0)[0]

            vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
            features = vggnet.get_features_first_raw(frame_data,
                                                     boxes_raw=boxes_train,
                                                     id=id)

            for k, v in features.iteritems():
                print k, v.shape
                if k == 'f3':
                    pca3 = utils.skl_pca(v)
                    v_pca3 = pca3.transform(v)
                    pca3_pos = np.zeros((num_true, pca3.n_components_),
                                        dtype=np.float32)
                    pca3_neg = np.zeros((num_false, pca3.n_components_),
                                        dtype=np.float32)
                    pca3_pos[...] = v_pca3[ind_pos, :]
                    pca3_neg[...] = v_pca3[ind_neg, :]
                    #utils.vis_as_image(v_pca3)
                    #plt.imshow(v_pca3)
                    #plt.title("PCA features")
                    #plt.show()
                    #plt.close()
                    #logistic regression
                    y_weight = sklearn.utils.class_weight.compute_class_weight(
                        class_weight='balanced',
                        classes=np.array([0, 1]),
                        y=y_train)
                    #print y_weight
                    class_weight = {0: y_weight[0], 1: y_weight[1]}
                    clf3 = SVC(kernel="linear")
                    #clf3=linear_model.LogisticRegression(fit_intercept=True,solver='liblinear')
                    clf3.fit(v_pca3, y_train)
                    score3 = clf3.score(v_pca3, y_train)
                    print 'score3: ', score3
                    #prob=clf3.predict_proba(v_pca3)
                    print clf3.classes_
                    #print prob

            vis_feature = False
            if vis_feature:
                utils.vis_features(features, id)

            start_time = time.time()
        else:
            if fail_times >= 5:
                #reinitialize
                reinit += 1
                area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
                ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1])
                filter = PFfilter.PFfilter(
                    utils.bbox_to_states(gt_box, area, ratio), area, ratio, w,
                    h, nparticles)
                #filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio)
                filter.create_particles()
                filter.restrict_particles(w, h)
                area_hist.append(filter.cur_a)
                pred_box = gt_box
                boxes_train = []
                pred_hist.append(np.array(gt_box).reshape(1, -1))
                #pred_hist.append(pred_box)
                conf_hist.append(-0.1)

                # boxes_train_neg=[]
                iou_train = []
                try:
                    # Q=[[1,0],[0,1]] #for pixel wise
                    Q = 0.05  # box_w,box_h
                    sample_box_true, sample_iou_true = filter.sample_iou(
                        gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_true[:10]
                # print sample_box_true.shape[0]
                # print sample_iou_true[:10]
                print "average iou: ", np.mean(sample_iou_true)
                boxes_train.append(sample_box_true)
                iou_train.append(sample_iou_true)
                try:
                    # Q=[[36,0],[0,36]]#for pixel wise
                    Q = 0.2  #0.15
                    sample_box_false, sample_iou_false = filter.sample_iou(
                        gt_box, Q, 0.2, 0.01, num_false / 2, 0, thre_max_neg)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_false[:10]
                # print sample_box_false.shape[0]
                # print sample_iou_false[:10]
                print "average iou: ", np.mean(sample_iou_false)
                boxes_train.append(sample_box_false)
                iou_train.append(sample_iou_false)
                try:
                    # Q=[[36,0],[0,36]]#for pixel wise
                    Q = 0.2
                    sample_box_false, sample_iou_false = filter.sample_iou(
                        gt_box, Q, 0.01, 0.2, num_false / 2, 0, thre_max_neg)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_false[:10]
                # print sample_box_false.shape[0]
                # print sample_iou_false[:10]
                print "average iou: ", np.mean(sample_iou_false)
                boxes_train.append(sample_box_false)
                iou_train.append(sample_iou_false)

                boxes_train = np.vstack(boxes_train)
                iou_train = np.vstack(iou_train)
                y_train_true = np.ones((num_true, ))
                y_train_false = np.zeros((num_false, ))

                y_train = np.hstack([y_train_true, y_train_false])
                # permutation
                ind_perm = np.random.permutation(range(num_false + num_true))
                boxes_train = boxes_train[ind_perm, :]
                iou_train = iou_train[ind_perm]
                y_train = y_train[ind_perm]
                ind_pos = np.where(y_train == 1)[0]
                ind_neg = np.where(y_train == 0)[0]

                vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                features = vggnet.get_features_first_raw(frame_data,
                                                         boxes_raw=boxes_train,
                                                         id=id)

                for k, v in features.iteritems():
                    print k, v.shape
                    if k == 'f3':

                        v_pca3 = pca3.transform(v)

                        pca3_pos[...] = v_pca3[ind_pos, :]
                        pca3_neg[...] = v_pca3[ind_neg, :]
                        clf3.fit(v_pca3, y_train)
                        score3 = clf3.score(v_pca3, y_train)
                        print 'score3: ', score3
                        # prob=clf3.predict_proba(v_pca3)
                        print clf3.classes_
                fail_times = 0
                continue

            filter.predict_particles(Q=0.02, cr=0.05,
                                     ca=0.05)  #0.02,0.0005,0.005
            filter.restrict_particles(w, h)
            area_hist.append(filter.cur_a)
            #compute conf
            conf = np.zeros(filter.weights.shape)
            #np.save('particles.npy',filter.particles)
            pred_boxes = utils.state_to_bbox(filter.particles, area, ratio)
            vggnet.reshape(w, h, filter.num_particles)
            features = vggnet.get_features_first_raw(frame_data,
                                                     boxes_raw=pred_boxes,
                                                     id=id)
            for k, v in features.iteritems():
                print k, v.shape
                if k == 'f3':

                    v_pca3 = pca3.transform(v)
                    #utils.vis_as_image(v_pca3)
                    #plt.imshow(v_pca3)
                    #plt.title("PCA features")
                    #plt.show()
                    #plt.close()
                    #logistic regression
                    #conf=clf3.predict_proba(v_pca3)[:,1]
                    conf = -clf3.decision_function(v_pca3)

            conf_max = np.max(conf)
            conf_min = np.min(conf)
            print 'conf_max: ', conf_max
            print 'conf_min: ', conf_min
            filter.update_particles(conf)
            # pred_state = filter.estimate()
            print filter.weights
            filter.resample()  # always resample
            pred_state, s_particles, r_particles = filter.estimate(k=10)

            cov_particles = np.dot(
                filter.particles[:, :4].T,
                filter.particles[:, :4]) / filter.particles.shape[0]

            eigval, eigvec = np.linalg.eig(cov_particles)
            max_val = eigval[0]
            eig_hist.append(max_val)
            print 'Max eigvalue: %f' % max_val

            #print 'conf is: ',conf
            if conf_max > 0 and max_val < 200000:
                fail_times = 0

            else:
                fail_times += 1
                #filter.update_particles(conf)
                #pred_state=filter.estimate()
                #filter.resample()
                #pred_state, s_particles, r_particles = filter.estimate(k=10)
                print "conf_max too low, not update particles "
            pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area,
                                           ratio)
            print 'ground truth bbox is: ', gt_box
            print "pred_box is: ", pred_box
            show_sr = False
            if show_sr:
                plt.hist2d(s_particles,
                           r_particles,
                           bins=50,
                           weights=filter.weights.squeeze())
                '''
                plt.scatter(s_particles,r_particles,c='r',marker='.',linewidths=1)
                plt.xlabel('Area')
                plt.ylabel('Aspect ratio')
                plt.title('Area and Ratio of particles')
                plt.axis('equal')
                '''
                plt.show()
            iou = utils.calc_iou(gt_box, pred_box)
            print 'iou is: ', iou
            pred_hist.append(pred_box)
            conf_hist.append(conf_max)
            iou_hist.append(iou)

            if conf_max >= 0.1:  #0.5
                #update pca3_pos and pca3_neg
                new_true = 100  #50
                new_false = 200  #100
                boxes_train = []

                iou_train = []
                Q = 0.02
                try:
                    sample_box_true, sample_iou_true = filter.sample_iou(
                        pred_box, Q, 0.01, 0.01, new_true, 0.85, 1.0)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_true[:10]
                # print sample_box_true.shape[0]
                # print sample_iou_true[:10]
                print "average iou: ", np.mean(sample_iou_true)
                boxes_train.append(sample_box_true)
                iou_train.append(sample_iou_true)
                try:
                    Q = 0.2
                    sample_box_false, sample_iou_false = filter.sample_iou(
                        pred_box, Q, 0.2, 0.01, new_false / 2, 0, thre_max_neg)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_false[:10]
                # print sample_box_false.shape[0]
                # print sample_iou_false[:10]
                print "average iou: ", np.mean(sample_iou_false)
                boxes_train.append(sample_box_false)
                iou_train.append(sample_iou_false)
                try:
                    Q = 0.2
                    sample_box_false, sample_iou_false = filter.sample_iou(
                        pred_box, Q, 0.01, 0.2, new_false / 2, 0, thre_max_neg)
                except OverflowError as e:
                    print "too many loops in sample."
                # print sample_box_false[:10]
                # print sample_box_false.shape[0]
                # print sample_iou_false[:10]
                print "average iou: ", np.mean(sample_iou_false)
                boxes_train.append(sample_box_false)
                iou_train.append(sample_iou_false)

                boxes_train = np.vstack(boxes_train)

                iou_train = np.vstack(iou_train)
                y_train_true = np.ones((new_true, ))
                y_train_false = np.zeros((new_false, ))
                y_train = np.hstack([y_train_true, y_train_false])

                # permutation
                ind_perm = np.random.permutation(range(new_false + new_true))
                boxes_train = boxes_train[ind_perm, :]

                y_train = y_train[ind_perm]
                new_y = np.zeros(y_train.shape)
                new_y[...] = y_train
                ind_pos = np.where(y_train == 1)[0]
                ind_neg = np.where(y_train == 0)[0]

                vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                features = vggnet.get_features_first_raw(frame_data,
                                                         boxes_raw=boxes_train,
                                                         id=id)
                for k, v in features.iteritems():
                    print k, v.shape
                    if k == 'f3':
                        v_pca3 = pca3.transform(v)

                        #random substitude
                        pca3_cur_pos = v_pca3[ind_pos, :]
                        pca3_cur_neg = v_pca3[ind_neg, :]
                        to_subst = random.sample(range(num_true), new_true)
                        pca3_pos[to_subst, :] = pca3_cur_pos
                        to_subst = random.sample(range(num_false), new_false)
                        pca3_neg[to_subst, :] = pca3_cur_neg

            if conf_max < 0.1 and fail_times >= 2:  #0.99
                #if conf_max<0.95 and conf_max>0.5:
                #update classification model
                print 'updating model...'
                pca3_train = np.vstack([pca3_pos, pca3_neg])

                y_train_true = np.ones((num_true, ))
                y_train_false = np.zeros((num_false, ))
                y_train = np.hstack([y_train_true, y_train_false])

                # permutation
                ind_perm = np.random.permutation(range(num_false + num_true))
                pca3_train = pca3_train[ind_perm, :]

                y_train = y_train[ind_perm]

                #logistic regression

                clf3.fit(pca3_train, y_train)
                print 'score is: ', clf3.score(pca3_train, y_train)
                #fail_times=0

        # (B,G,R)
        frame_data_cv = frame_data * 255  # [0,1]-->[0,255]
        frame_data_cv = frame_data_cv[:, :, ::-1]  # RGB->BGR
        frame_data_cv = frame_data_cv.astype('uint8')
        #cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])),
        #                         (255, 0, 0), 2, 1)
        if id > 0:
            cv2.rectangle(frame_data_cv,
                          (int(pred_box[0, 0]), int(pred_box[0, 1])),
                          (int(pred_box[0, 2]), int(pred_box[0, 3])),
                          (0, 255, 0), 2, 1)
        show_particles = False
        if show_particles:
            for i in range(filter.num_particles):
                cx = filter.particles[i, 0]
                cy = filter.particles[i, 1]
                cv2.circle(frame_data_cv, (int(cx), int(cy)),
                           1, (0, 0, 255),
                           thickness=1)
        show_box = False
        if show_box:
            n = 0
            for i in ind_pos:
                if n % 5 == 0:
                    cv2.rectangle(
                        frame_data_cv,
                        (int(boxes_train[i, 0]), int(boxes_train[i, 1])),
                        (int(boxes_train[i, 2]), int(boxes_train[i, 3])),
                        (0, 0, 255), 2, 1)
                n += 1
            n = 0

        show_particles_init = False
        if show_particles_init:
            for i in range(filter.num_particles):
                cx = filter.particles[i, 0]
                cy = filter.particles[i, 1]
                cv2.circle(frame_data_cv, (int(cx), int(cy)),
                           1, (0, 255, 0),
                           thickness=1)
        show_frame = False
        #cv2.circle(frame_data_cv, (int(filter.cur_c[0]), int(filter.cur_c[1])), 2, (0, 0, 255), thickness=1)

        if show_frame:
            cv2.imshow(sequence, frame_data_cv)

            c = cv2.waitKey(1)
            if c != -1:
                if chr(c) == 'p':
                    c = cv2.waitKey()
                #print 'You press: ',chr(c)
                #if chr(c)=='c':
                if chr(c) == 'c':
                    cv2.destroyWindow(sequence)
                    #conf_hist=np.array(conf_hist)
                    #iou_hist=np.array(iou_hist)
                    #np.save('conf_hist.npy',conf_hist)
                    #np.save('iou_hist.npy',iou_hist)
                    break
        else:
            video_writer.write(frame_data_cv)
    end_time = time.time()
    video_writer.release()
    iou_hist = np.array(iou_hist)
    pred_hist = np.array(pred_hist).squeeze()
    print "iou_hist: ", iou_hist.shape
    print "pred_hist: ", pred_hist.shape
    print "get_boxes: ", gt_boxes.shape
    precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist)
    print "precision is: %f" % (precisions[19])
    suc, auc_iou = utils.calc_success(iou_hist)
    print "Average IOU is: %f" % (np.mean(iou_hist))
    print "Auc of precision is: %f" % (auc_pre)
    print "Auc of success is: %f" % auc_iou
    print "Reinit times: %d" % reinit
    print "Average FPS: %f" % ((id + 1) / (end_time - start_time))
def main(args):
    vis = args.vis
    debug = args.debug
    save = args.save
    nparticles = args.particles
    root_path = '/home/ccjiang/Documents/py-faster-rcnn/caffe-fast-rcnn/examples/tracker/'
    dataset_path = args.dataset  # "/data/OTB100"
    dataset100_seq = ['Bird2', 'BlurCar1', 'BlurCar3', 'BlurCar4', 'Board', 'Bolt2', 'Boy', 'Car2',
                      'Car24', 'Coke', 'Coupon', 'Crossing', 'Dancer', 'Dancer2', 'David2', 'David3',
                      'Dog', 'Dog1', 'Doll', 'FaceOcc1', 'FaceOcc2', 'Fish', 'FleetFace', 'Football1',
                      'Freeman1', 'Freeman3', 'Girl2', 'Gym', 'Human2', 'Human5', 'Human7', 'Human8',
                      'Jogging', 'KiteSurf', 'Lemming', 'Man', 'Mhyang', 'MountainBike', 'Rubik',
                      'Singer1', 'Skater', 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans',
                      'Twinnings', 'Vase']
    dataset50_seq = ['Basketball',  'Bird1', 'BlurBody', 'BlurCar2', 'BlurFace', 'BlurOwl',
                     'Bolt', 'Box', 'Car1', 'Car4', 'CarDark', 'CarScale', 'ClifBar', 'Couple', 'Crowds','David',
                     'Deer', 'Diving', 'DragonBaby', 'Dudek', 'Football', 'Freeman4', 'Girl',
                     'Human3', 'Human4', 'Human6', 'Human9', 'Ironman', 'Jump', 'Jumping', 'Liquor',
                     'Matrix', 'MotorRolling', 'Panda', 'RedTeam', 'Shaking', 'Singer2', 'Skating1',
                     'Skating2', 'Skiing', 'Soccer', 'Surfer', 'Sylvester', 'Tiger2', 'Trellis',
                     'Walking', 'Walking2', 'Woman']
    datafull_seq = dataset100_seq + dataset50_seq
    if "OTB50" in dataset_path:
        data_seq = dataset50_seq
    else:
        data_seq = dataset100_seq

    log_name = 'log_1119.txt'
    log_file = open(log_name, 'w')
    records_success = []  # defaultdict(list)
    records_precision = []  # defaultdict(list)
    records_reinit = defaultdict(list)
    model_def = os.path.join(root_path, args.prototxt)
    model_weight = os.path.join(root_path, args.caffemodel)
    vggnet = VGGnet.VGGnet(model_def, model_weight)

    thre_max_neg = 0.3  # 0.5
    test_times = 1  # 0
    for t in range(test_times):
        print 'Test round: %d' % t
        log_file.write('Test round: %d\n' % t)
        # sequences = ['Fish']
        for sequence in datafull_seq:  # datafull_seq
            if sequence in dataset50_seq:
                dataset_path = "/data/OTB50"
            else:
                dataset_path = "/data/OTB100"
            for t in os.walk(os.path.join(dataset_path, sequence, sequence, "img")):
                if t[0] == os.path.join(dataset_path, sequence, sequence, "img"):
                    nFrame = len(t[2])
                    print 'Processing: %s' % sequence
                    log_file.write('Processing: %s\n' % sequence)
                    print "Total frames are: ", nFrame
                    log_file.write('Total frames are: %d\n' % nFrame)
            gt_path = os.path.join(dataset_path, sequence, sequence, "groundtruth_rect.txt")

            gt_boxes = utils.get_boxes_all(gt_path)

            conf_hist = []
            iou_hist = []
            area_hist = []
            pred_hist = []  # (x1,y1,x2,y2)
            eig_hist = []
            reinit = 0
            nFrame = np.minimum(nFrame, gt_boxes.shape[0])

            id_shift = 0
            init_id = False
            update_recent = False
            for id in np.arange(0, nFrame):
                frame_name = "img/%04d.jpg" % (id + 1)
                # print "Start processing: %s" % frame_name
                frame_path = os.path.join(dataset_path, sequence, sequence, frame_name)
                if os.path.exists(frame_path) == False:
                    id_shift = id_shift + 1
                    continue
                id = id - id_shift

                frame_data = caffe.io.load_image(frame_path)  # (432,576,3), in [0,1]
                gt_box = gt_boxes[id]

                if init_id == False:
                    h, w, c = frame_data.shape
                    frame_shape = [c, w, h]
                    fps = 20
                    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                    video_writer = cv2.VideoWriter("res_%s.avi"%sequence,fourcc,fps,(w,h))
                    fail_times = 0
                    box_w = gt_box[2] - gt_box[0]
                    box_h = gt_box[3] - gt_box[1]
                    area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
                    ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1])  # ratio=w/h
                    # set up net.blobs['im_info']
                    print "Image Size: ", w, h
                    log_file.write('Image Size: %d %d\n' % (w, h))
                    b = gt_box[np.newaxis, :]
                    vggnet.reshape(w=w, h=h, nbox=b.shape[0])
                    features0 = vggnet.get_features("conv3_3", frame_data,
                                                    boxes_raw=b)  # shape:(256,hs,ws),conv3_3,res3b3
                    features0 = np.squeeze(features0)
                    pca_f, scaler_f = featmap_pca2(features0,ncompnents=128)#128

                    box_w = gt_box[2] - gt_box[0]
                    box_h = gt_box[3] - gt_box[1]

                    vggnet.reshape(w=w, h=h, nbox=nparticles)
                    pfilter = PFfilter.PFfilter(utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h,
                                                nparticles)
                    pfilter.create_particles()
                    pfilter.restrict_particles(w, h)
                    area_hist.append(pfilter.cur_a)
                    pred_hist.append(np.array(gt_box).reshape(1, -1))
                    # pca
                    # test sample_iou
                    num_true = 500
                    num_false = 1000  # 1000
                    #data augument
                    gt_box_otb = gt_box.copy()
                    gt_box_otb[2] -= gt_box_otb[0]
                    gt_box_otb[3] -= gt_box_otb[1]
                    boxes_train = []
                    ids = np.zeros(num_false + num_true)


                    imgs = []
                    for i in np.arange(4):
                        if i == 0:
                            img1,gt1,img2,gt2,img3,gt3 = DataAugment(frame_data,gt_box_otb,True)
                            gt1[2] += gt1[0]
                            gt1[3] += gt1[1]
                            gt2[2] += gt2[0]
                            gt2[3] += gt2[1]
                            gt3[2] += gt3[0]
                            gt3[3] += gt3[1]
                            box_true1, iou_true = pfilter.sample_iou_pred_box(gt1, 0.05, 0.01, 0.01, 20, 0.8, 1.0)
                            box_true2, iou_true = pfilter.sample_iou_pred_box(gt2, 0.05, 0.01, 0.01, 40, 0.8, 1.0)
                            box_true3, iou_true = pfilter.sample_iou_pred_box(gt3, 0.05, 0.01, 0.01, 20, 0.8, 1.0)
                            box_true1[0, ...] = gt1
                            box_true2[0, ...] = gt2
                            box_true3[0, ...] = gt3

                            boxes_train.append(box_true1)
                            boxes_train.append(box_true2)
                            boxes_train.append(box_true3)
                            imgs.append(img1)
                            imgs.append(img2)
                            imgs.append(img3)
                            ids[20:60] = 1
                            ids[60:80] = 2

                        else:
                            img1, gt1, img2, gt2 = DataAugment(frame_data, gt_box_otb,False)
                            gt1[2] += gt1[0]
                            gt1[3] += gt1[1]
                            gt2[2] += gt2[0]
                            gt2[3] += gt2[1]

                            box_true1, iou_true = pfilter.sample_iou_pred_box(gt1, 0.05, 0.01, 0.01, 20, 0.8, 1.0)
                            box_true2, iou_true = pfilter.sample_iou_pred_box(gt2, 0.05, 0.01, 0.01, 20, 0.8, 1.0)

                            box_true1[0, ...] = gt1
                            box_true2[0, ...] = gt2

                            boxes_train.append(box_true1)
                            boxes_train.append(box_true2)
                            imgs.append(img1)
                            imgs.append(img2)
                            cur_i = 80+(i-1)*40
                            ids[cur_i:(cur_i+20)] = 3+(i-1)*2
                            ids[(cur_i+20):(cur_i+40)] = 3+(i-1)*2+1
                    # boxes_train_neg=[]

                    try:
                        # Q=[[1,0],[0,1]] #for pixel wise
                        Q = 0.05  # box_w,box_h,0.05
                        sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(gt_box, Q, 0.01, 0.01,
                                                                                       num_true-200, 0.8,
                                                                                       1.0)#0.8
                    except OverflowError as e:
                        print "too many loops in sample in Initialize--TRUE."

                    boxes_train.append(sample_box_true)

                    try:
                        # Q=[[36,0],[0,36]]#for pixel wise
                        Q = 0.2  # 0.2
                        sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.2, 0.01, num_false / 2, 0,
                                                                                thre_max_neg)  # 0.2,0.01
                    except OverflowError as e:
                        print "too many loops in sample in Initialize--FALSE."
                    # print sample_box_false[:10]
                    # print sample_box_false.shape[0]
                    # print sample_iou_false[:10]
                    # print "average iou: ", np.mean(sample_iou_false)
                    boxes_train.append(sample_box_false)

                    try:
                        # Q=[[36,0],[0,36]]#for pixel wise
                        Q = 0.2  # 0.2
                        sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.01, 0.2, num_false / 2, 0,
                                                                                thre_max_neg)  # 0.01,0.2
                    except OverflowError as e:
                        print "too many loops in sample in Initialize--FALSE."

                    boxes_train.append(sample_box_false)

                    boxes_train = np.vstack(boxes_train)

                    imgs.append(frame_data)
                    imgs = np.stack(imgs,axis=0)#(10,h,w,c)
                    ids[200:] = 9
                    y_train_true = np.ones((num_true,))
                    y_train_false = np.zeros((num_false,))
                    ids_save = np.ones((num_true+num_false))
                    ids_save[num_true:] = 0
                    ids_save[20:60] = 2
                    y_train = np.hstack([y_train_true, y_train_false])

                    # permutation
                    ind_perm = np.random.permutation(range(num_false + num_true))
                    boxes_train = boxes_train[ind_perm, :]
                    ids_save = ids_save[ind_perm]
                    y_train = y_train[ind_perm]
                    ids = ids[ind_perm]
                    ind_pos = np.where(y_train == 1)[0]
                    ind_neg = np.where(y_train == 0)[0]

                    vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0],batch_size=10)
                    #features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id)
                    features = vggnet.get_features_first_id(imgs,boxes_raw=boxes_train,id=ids)
                    #features = vggnet.get_features_first_sel(frame_data, boxes_raw=boxes_train, id=id, sel=f_inds)
                    for k, v in features.iteritems():
                        # print k,v.shape
                        if k == 'f3':
                            #pca3, scaler1, nPCA = utils.skl_pca2(v)
                            v = feat_transformpca(pca_f, scaler_f, v)  # (N,128,7,7)
                            #pca3,scaler,nPCA=utils.skl_pca2(v)
                            #v_pca3 = pca3.transform(scaler.transform(v))
                            #np.save("pca_results/testpca_%s.npy"%sequence,v_pca3)
                            #np.save('labelpca.npy',y_train)
                            #np.save("pca_results/label_pca_%s"%sequence,ids_save)
                            pca3_pos = np.zeros((num_true, pca_f.n_components_*49), dtype=np.float32)
                            pca3_neg = np.zeros((num_false, pca_f.n_components_*49), dtype=np.float32)
                            pca3_pos[...] = v[ind_pos, :]
                            pca3_neg[...] = v[ind_neg, :]
                            # utils.vis_as_image(v_pca3)
                            # plt.imshow(v_pca3)
                            # plt.title("PCA features")
                            # plt.show()
                            # plt.close()
                            # logistic regression
                            y_weight = sklearn.utils.class_weight.compute_class_weight(class_weight='balanced',
                                                                                       classes=np.array([0, 1]),
                                                                                       y=y_train)
                            # print y_weight
                            class_weight = {0: y_weight[0], 1: y_weight[1]}
                            clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear')
                            clf3.fit(v, y_train)

                    vis_feature = False
                    if vis_feature:
                        utils.vis_features(features, id)

                    start_time = time.time()
                else:
                    if fail_times >= 5:
                        # reinitialize
                        update_recent = False
                        reinit += 1
                        area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
                        ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1])
                        pfilter = PFfilter.PFfilter(utils.bbox_to_states(gt_box, area, ratio), area, ratio, w, h,
                                                    nparticles)
                        # filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio)
                        pfilter.create_particles()
                        pfilter.restrict_particles(w, h)
                        area_hist.append(pfilter.cur_a)
                        pred_box = gt_box
                        pred_hist.append(np.array(pred_box).reshape(1, -1))
                        conf_hist.append(-0.1)
                        boxes_train = []
                        # boxes_train_neg=[]
                        iou_train = []
                        try:
                            # Q=[[1,0],[0,1]] #for pixel wise
                            Q = 0.05  # box_w,box_h,0.05
                            sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(gt_box, Q, 0.01, 0.01,
                                                                                           num_true, 0.8,
                                                                                           1.0)
                        except OverflowError as e:
                            print "too many loops in sample in Reinitialize--TRUE."

                        boxes_train.append(sample_box_true)
                        iou_train.append(sample_iou_true)
                        try:
                            # Q=[[36,0],[0,36]]#for pixel wise
                            Q = 0.2  # 0.2
                            sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.01, 0.2, num_false / 2,
                                                                                    0,
                                                                                    thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample in Reinitialize--FALSE."

                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)
                        try:
                            # Q=[[36,0],[0,36]]#for pixel wise
                            Q = 0.2  # 0.2
                            sample_box_false, sample_iou_false = pfilter.sample_iou(gt_box, Q, 0.2, 0.01, num_false / 2,
                                                                                    0,
                                                                                    thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample in Reinitialize--FALSE."
                        # print sample_box_false[:10]
                        # print sample_box_false.shape[0]
                        # print sample_iou_false[:10]
                        # print "average iou: ", np.mean(sample_iou_false)
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)

                        boxes_train = np.vstack(boxes_train)
                        iou_train = np.vstack(iou_train)
                        y_train_true = np.ones((num_true,))
                        y_train_false = np.zeros((num_false,))

                        y_train = np.hstack([y_train_true, y_train_false])
                        # permutation
                        ind_perm = np.random.permutation(range(num_false + num_true))
                        boxes_train = boxes_train[ind_perm, :]
                        iou_train = iou_train[ind_perm]
                        y_train = y_train[ind_perm]
                        ind_pos = np.where(y_train == 1)[0]
                        ind_neg = np.where(y_train == 0)[0]

                        vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                        features = vggnet.get_features_first_raw(frame_data, boxes_raw=boxes_train, id=id)
                        #features=feat_transformpca(pca_f,scaler_f,features)
                        #features = vggnet.get_features_first_sel(frame_data, boxes_raw=boxes_train, id=id, sel=f_inds)
                        for k, v in features.iteritems():
                            # print k, v.shape
                            if k == 'f3':
                                v = feat_transformpca(pca_f, scaler_f, v)  # (N,128,7,7)
                                #v_pca3 = pca3.transform(scaler.transform(v))

                                pca3_pos[...] = v[ind_pos, :]
                                pca3_neg[...] = v[ind_neg, :]
                                clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear')
                                clf3.fit(v, y_train)
                                # score3 = clf3.score(v_pca3, y_train)

                                # print 'score3: ', score3
                                # prob=clf3.predict_proba(v_pca3)
                                # print clf3.classes_
                        fail_times = 0
                        continue

                    pfilter.predict_particles(Q=0.2, cr=0.01, ca=0.01)  # 0.2,0.01
                    pfilter.restrict_particles(w, h)
                    area_hist.append(pfilter.cur_a)
                    # compute conf
                    # conf = np.zeros(pfilter.weights.shape)
                    # np.save('particles.npy',filter.particles)
                    pred_boxes = utils.state_to_bbox(pfilter.particles, area, ratio)
                    #add Gaussian regularization
                    if id>1:
                        gauss_sig = 0.5
                        gauss_w = np.exp(-np.square((pfilter.particles[:,0]-pred_state[0])/(gauss_sig*box_w)/2.0)-np.square((pfilter.particles[:,1]-pred_state[1])/(gauss_sig*box_h)/2.0))
                        pfilter.update_particles(gauss_w)
                        print gauss_w
                    vggnet.reshape(w, h, pfilter.num_particles)
                    features = vggnet.get_features_first_raw(frame_data, boxes_raw=pred_boxes, id=id)
                    #features=feat_transformpca(pca_f,scaler_f,features)
                    #features = vggnet.get_features_first_sel(frame_data, boxes_raw=pred_boxes, id=id, sel=f_inds)
                    for k, v in features.iteritems():
                        # print k,v.shape
                        if k == 'f3':
                            v = feat_transformpca(pca_f, scaler_f, v)  # (N,128,7,7)
                            vf = v
                            #v_pca3 = pca3.transform(scaler.transform(v))
                            conf = clf3.predict_proba(v)[:, 1]

                    # process preds to find out pred_box in terms of confm
                    conf = np.array(conf)

                    conf_max = np.max(conf)
                    conf_min = np.min(conf)
                    pfilter.update_particles(conf)
                    # do resample first or estimate first?
                    # filter.resample()  # always resample
                    pred_state, s_particles, r_particles = pfilter.estimate(k=10)
                    pfilter.resample()
                    pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)), area, ratio)

                    hard,hard_negv = nms_pred(pred_box,pred_boxes,vf,conf)
                    if hard:
                        hard_negvN = hard_negv.shape[0]
                        #print hard_negv.shape
                    else:
                        hard_negvN = 0

                    avg_pos = np.mean(pfilter.particles[:, :2], axis=0, keepdims=True)
                    # avg_pos[:,0]/=w
                    # avg_pos[:,1]/=h
                    ptls_avg = (pfilter.particles[:, :2] - avg_pos) / np.array([[box_w, box_h]])
                    cov_particles = np.dot(ptls_avg.T, ptls_avg) / pfilter.particles.shape[
                        0]

                    eigval, eigvec = np.linalg.eig(cov_particles)
                    max_val = eigval[0]
                    eig_hist.append(max_val)
                    print 'Max eigvalue: %f' % max_val

                    # print 'conf is: ',conf
                    if conf_max > 0.5:  # 0.8
                        fail_times = 0
                        update_recent = False
                    else:
                        fail_times += 1

                    show_sr = False
                    if show_sr:
                        count, xedge, yedge, tmp_im = plt.hist2d(s_particles, r_particles, bins=10,
                                                                 weights=pfilter.weights.squeeze(), cmap=plt.cm.gray)
                        top3 = np.argsort(-count, axis=None)[:3]
                        row_ind = top3[:] / count.shape[1]
                        col_ind = top3[:] % count.shape[0]

                        plt.show()
                    print pred_box
                    iou = utils.calc_iou(gt_box, pred_box)
                    # print 'iou is: ', iou
                    pred_hist.append(pred_box)
                    conf_hist.append(conf_max)
                    iou_hist.append(iou)

                    if conf_max >= 0.7:  # 0.5
                        # update pca3_pos and pca3_neg
                        new_true = 100  # 100
                        new_false = 400  # 200
                        boxes_train = []

                        iou_train = []
                        Q = 0.05  # 0.02
                        try:
                            sample_box_true, sample_iou_true = pfilter.sample_iou_pred_box(pred_box, Q, 0.01, 0.01,
                                                                                           new_true,
                                                                                           0.85,
                                                                                           1.0)
                        except OverflowError as e:
                            print "too many loops in sample in Update--TRUE."
                        # print sample_box_true[:10]
                        # print sample_box_true.shape[0]
                        # print sample_iou_true[:10]
                        # print "average iou: ", np.mean(sample_iou_true)
                        boxes_train.append(sample_box_true)

                        iou_train.append(sample_iou_true)
                        # part_iou=utils.calc_iou(pred_box,pred_boxes)

                        # ind_iou=np.where(part_iou<0.3)[0]

                        # ind_n=np.minimum(new_false/2,ind_iou.shape[0])
                        # boxes_train.append(pred_boxes[ind_iou[:ind_n],:])
                        # iou_train.append(part_iou[ind_iou])
                        new_false_left = new_false - hard_negvN  # -ind_n
                        try:
                            Q = 0.2  # 0.2
                            sample_box_false, sample_iou_false = pfilter.sample_iou_pred_box(pred_box, Q, 0.2, 0.01,
                                                                                             (new_false_left + 1) / 2,
                                                                                             0, thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample in Update--FALSE."
                        # print sample_box_false[:10]
                        # print sample_box_false.shape[0]
                        # print sample_iou_false[:10]
                        # print "average iou: ", np.mean(sample_iou_false)
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)
                        try:
                            Q = 0.2  # 0.2
                            sample_box_false, sample_iou_false = pfilter.sample_iou_pred_box(pred_box, Q, 0.01, 0.2,
                                                                                             new_false_left / 2, 0,
                                                                                             thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample in Update--FALSE."
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)

                        boxes_train = np.vstack(boxes_train)

                        # iou_train = np.vstack(iou_train)




                        vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                        features = vggnet.get_features_second_raw(boxes_raw=boxes_train, id=id)
                        #features = feat_transformpca(pca_f,scaler_f,features)
                        #features = vggnet.get_features_second_sel(boxes_raw=boxes_train, id=id, sel=f_inds)
                        for k, v in features.iteritems():
                            # print k, v.shape
                            if k == 'f3':
                                v = feat_transformpca(pca_f, scaler_f, v)  # (N,128,7,7)
                                #v_pca3 = pca3.transform(scaler.transform(v))
                                if hard:
                                    print v.shape
                                    print hard_negv.shape
                                    v = np.vstack([v,hard_negv])
                                y_train_true = np.ones((new_true,))
                                y_train_false = np.zeros((new_false,))
                                y_train = np.hstack([y_train_true, y_train_false])
                                # permutation
                                ind_perm = np.random.permutation(range(new_false + new_true))
                                #boxes_train = boxes_train[ind_perm, :]
                                v = v[ind_perm,:]
                                y_train = y_train[ind_perm]
                                new_y = np.zeros(y_train.shape)
                                new_y[...] = y_train
                                ind_pos = np.where(y_train == 1)[0]
                                ind_neg = np.where(y_train == 0)[0]

                                # random substitude
                                pca3_cur_pos = v[ind_pos, :]
                                pca3_cur_neg = v[ind_neg, :]
                                to_subst = random.sample(range(num_true), new_true)
                                pca3_pos[to_subst, :] = pca3_cur_pos
                                to_subst = random.sample(range(num_false), new_false)
                                pca3_neg[to_subst, :] = pca3_cur_neg


                    if conf_max < 1 and fail_times >= 2 and update_recent==False:
                        # if id%10==0:
                        update_recent = True
                        pca3_train = np.vstack([pca3_pos, pca3_neg])

                        y_train_true = np.ones((num_true,))
                        y_train_false = np.zeros((num_false,))
                        y_train = np.hstack([y_train_true, y_train_false])

                        # permutation
                        ind_perm = np.random.permutation(range(num_false + num_true))
                        pca3_train = pca3_train[ind_perm, :]

                        y_train = y_train[ind_perm]

                        # logistic regression
                        clf3 = linear_model.LogisticRegression(fit_intercept=True, solver='liblinear')
                        clf3.fit(pca3_train, y_train)

                        # print 'score is: ',clf3.score(pca3_train,y_train)

                # (B,G,R)
                frame_data_cv = frame_data * 255  # [0,1]-->[0,255]
                frame_data_cv = frame_data_cv[:, :, ::-1]  # RGB->BGR
                frame_data_cv = frame_data_cv.astype('uint8')
                cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])),
                              (255, 0, 0), 2, 1)
                if id > 0 and init_id == True:
                    cv2.rectangle(frame_data_cv, (int(pred_box[0, 0]), int(pred_box[0, 1])),
                                  (int(pred_box[0, 2]), int(pred_box[0, 3])),
                                  (0, 255, 0), 2, 1)
                if init_id == False:
                    init_id = True
                show_particles = False
                if show_particles:
                    for i in range(filter.num_particles):
                        cx = pfilter.particles[i, 0]
                        cy = pfilter.particles[i, 1]
                        cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 0, 255), thickness=1)
                show_box = False
                if show_box:
                    n = 0
                    for i in ind_pos:
                        if n % 5 == 0:
                            cv2.rectangle(frame_data_cv, (int(boxes_train[i, 0]), int(boxes_train[i, 1])),
                                          (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 0, 255), 2, 1)
                        n += 1
                    n = 0

                show_particles_init = False
                if show_particles_init:
                    for i in range(filter.num_particles):
                        cx = pfilter.particles[i, 0]
                        cy = pfilter.particles[i, 1]
                        cv2.circle(frame_data_cv, (int(cx), int(cy)), 1, (0, 255, 0), thickness=1)
                show_frame = False
                cv2.circle(frame_data_cv, (int(pfilter.cur_c[0]), int(pfilter.cur_c[1])), 2, (0, 0, 255), thickness=1)
                if show_frame:
                    cv2.imshow(sequence, frame_data_cv)
                    c = cv2.waitKey(1)

                    if c != -1:
                        cv2.destroyWindow(sequence)

                        break
                else:
                    video_writer.write(frame_data_cv)
            end_time = time.time()
            video_writer.release()
            print "Average FPS: %f" % (nFrame / (end_time - start_time))
            log_file.write("Average FPS: %f\n" % (nFrame / (end_time - start_time)))
            conf_hist = np.array(conf_hist)
            iou_hist = np.array(iou_hist)
            area_hist = np.array(area_hist)
            pred_hist = np.vstack(pred_hist)
            precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist)

            suc, auc_iou = utils.calc_success(iou_hist)
            records_precision.append(precisions * nFrame)
            records_success.append(suc * nFrame)

            print 'Precision @20 is: %f' % precisions[19]
            print 'Auc of Precision is: %f' % auc_pre
            print 'Auc of Success is: %f' % auc_iou
            print 'Reinit times: %d' % reinit
            log_file.write("Precision @20 is: %f\n" % precisions[19])
            log_file.write('Auc of Precision is: %f\n' % auc_pre)
            log_file.write('Auc of Success is: %f\n' % auc_iou)
            log_file.write('Reinit times: %d\n' % reinit)
            #log_file.write('Selected feature maps: %d\n' % f_inds.shape[0])
            #log_file.write('PCA components: %d\n' % nPCA)
            #res_f = open('results11/%s.txt'%sequence,'w')
            #pred_hist[:,2:] = pred_hist[:,2:] - pred_hist[:,:2]
            #res_f = write_res(pred_hist,res_f)
            #res_f.close()
    log_file.close()
    pkl = open('results_1031.pkl', 'w')
    pickle.dump([records_precision, records_success], pkl)
    pkl.close()
Exemplo n.º 10
0
def main(args):
    vis = args.vis
    debug = args.debug
    save = args.save
    nparticles = args.particles
    root_path = '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/'
    dataset_path = args.dataset  #"/data/OTB100"
    dataset100_seq = [
        'Bird2', 'BlurCar1', 'BlurCar3', 'BlurCar4', 'Board', 'Bolt2', 'Boy',
        'Car2', 'Car24', 'Coke', 'Coupon', 'Crossing', 'Dancer', 'Dancer2',
        'David2', 'David3', 'Dog', 'Dog1', 'Doll', 'FaceOcc1', 'FaceOcc2',
        'Fish', 'FleetFace', 'Football1', 'Freeman1', 'Freeman3', 'Girl2',
        'Gym', 'Human2', 'Human5', 'Human7', 'Human8', 'Jogging', 'KiteSurf',
        'Lemming', 'Man', 'Mhyang', 'MountainBike', 'Rubik', 'Singer1',
        'Skater', 'Skater2', 'Subway', 'Suv', 'Tiger1', 'Toy', 'Trans',
        'Twinnings', 'Vase'
    ]
    dataset50_seq = [
        'Basketball', 'Biker', 'Bird1', 'BlurBody', 'BlurCar2', 'BlurFace',
        'BlurOwl', 'Bolt', 'Box', 'Car1', 'Car4', 'CarDark', 'CarScale',
        'ClifBar', 'Couple', 'Crowds', 'Deer', 'Diving', 'DragonBaby', 'Dudek',
        'Football', 'Freeman4', 'Girl', 'Human3', 'Human4', 'Human6', 'Human9',
        'Ironman', 'Jump', 'Jumping', 'Liquor', 'Matrix', 'MotorRolling',
        'Panda', 'RedTeam', 'Shaking', 'Singer2', 'Skating1', 'Skating2',
        'Skiing', 'Soccer', 'Surfer', 'Sylvester', 'Tiger2', 'Trellis',
        'Walking', 'Walking2', 'Woman'
    ]
    if "OTB50" in dataset_path:
        data_seq = dataset50_seq
    else:
        data_seq = dataset100_seq

    log_name = 'log.txt'
    log_file = open(log_name, 'w')
    records_success = []  #defaultdict(list)
    records_precision = []  #defaultdict(list)
    records_reinit = defaultdict(list)
    model_def = os.path.join(root_path, args.prototxt)
    model_weight = os.path.join(root_path, args.caffemodel)
    vggnet = VGGnet.VGGnet(model_def, model_weight)

    thre_max_neg = 0.3  # 0.5
    test_times = 1  # 0
    for t in range(test_times):
        print 'Test round: %d' % t
        log_file.write('Test round: %d\n' % t)
        # sequences = ['Fish']
        for sequence in data_seq:
            for t in os.walk(
                    os.path.join(dataset_path, sequence, sequence, "img")):
                if t[0] == os.path.join(dataset_path, sequence, sequence,
                                        "img"):
                    nFrame = len(t[2])
                    print 'Processing: %s' % sequence
                    log_file.write('Processing: %s\n' % sequence)
                    print "Total frames are: ", nFrame
                    log_file.write('Total frames are: %d\n' % nFrame)
            gt_path = os.path.join(dataset_path, sequence, sequence,
                                   "groundtruth_rect.txt")

            gt_boxes = utils.get_boxes_all(gt_path)

            conf_hist = []
            iou_hist = []
            area_hist = []
            pred_hist = []  # (x1,y1,x2,y2)
            eig_hist = []
            reinit = 0
            nFrame = np.minimum(nFrame, gt_boxes.shape[0])
            id_shift = 0
            init_id = False
            for id in np.arange(0, nFrame):
                frame_name = "img/%04d.jpg" % (id + 1)
                # print "Start processing: %s" % frame_name
                frame_path = os.path.join(dataset_path, sequence, sequence,
                                          frame_name)
                if os.path.exists(frame_path) == False:
                    id_shift = id_shift + 1
                    continue
                id = id - id_shift

                frame_data = caffe.io.load_image(
                    frame_path)  # (432,576,3), in [0,1]
                gt_box = gt_boxes[id]

                if init_id == False:
                    h, w, c = frame_data.shape
                    frame_shape = [c, w, h]

                    fail_times = 0
                    area = (gt_box[2] - gt_box[0]) * (gt_box[3] - gt_box[1])
                    ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] - gt_box[1]
                                                       )  # ratio=w/h
                    # set up net.blobs['im_info']
                    print "Image Size: ", w, h
                    log_file.write('Image Size: %d %d\n' % (w, h))
                    vggnet.reshape(w=w, h=h, nbox=nparticles)
                    filter = PFfilter.PFfilter(
                        utils.bbox_to_states(gt_box, area, ratio), area, ratio,
                        w, h, nparticles)
                    filter.create_particles()
                    filter.restrict_particles(w, h)
                    area_hist.append(filter.cur_a)
                    pred_hist.append(np.array(gt_box).reshape(1, -1))
                    # pca
                    # test sample_iou
                    num_true = 500
                    num_false = 1000
                    boxes_train = []
                    # boxes_train_neg=[]
                    iou_train = []
                    try:
                        # Q=[[1,0],[0,1]] #for pixel wise
                        Q = 0.05  # box_w,box_h
                        sample_box_true, sample_iou_true = filter.sample_iou_pred_box(
                            gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0)
                    except OverflowError as e:
                        print "too many loops in sample."
                    # print sample_box_true[:10]
                    # print sample_box_true.shape[0]
                    # print sample_iou_true[:10]
                    # print "average iou: ", np.mean(sample_iou_true)
                    boxes_train.append(sample_box_true)
                    iou_train.append(sample_iou_true)
                    try:
                        # Q=[[36,0],[0,36]]#for pixel wise
                        Q = 0.2  # 0.15
                        sample_box_false, sample_iou_false = filter.sample_iou(
                            gt_box, Q, 0.2, 0.01, num_false / 2, 0,
                            thre_max_neg)
                    except OverflowError as e:
                        print "too many loops in sample."
                    # print sample_box_false[:10]
                    # print sample_box_false.shape[0]
                    # print sample_iou_false[:10]
                    # print "average iou: ", np.mean(sample_iou_false)
                    boxes_train.append(sample_box_false)
                    iou_train.append(sample_iou_false)
                    try:
                        # Q=[[36,0],[0,36]]#for pixel wise
                        Q = 0.2
                        sample_box_false, sample_iou_false = filter.sample_iou(
                            gt_box, Q, 0.01, 0.2, num_false / 2, 0,
                            thre_max_neg)
                    except OverflowError as e:
                        print "too many loops in sample."
                    # print sample_box_false[:10]
                    # print sample_box_false.shape[0]
                    # print sample_iou_false[:10]
                    # print "average iou: ", np.mean(sample_iou_false)
                    boxes_train.append(sample_box_false)
                    iou_train.append(sample_iou_false)

                    boxes_train = np.vstack(boxes_train)

                    iou_train = np.vstack(iou_train)
                    y_train_true = np.ones((num_true, ))
                    y_train_false = np.zeros((num_false, ))

                    y_train = np.hstack([y_train_true, y_train_false])

                    # permutation
                    ind_perm = np.random.permutation(
                        range(num_false + num_true))
                    boxes_train = boxes_train[ind_perm, :]
                    iou_train = iou_train[ind_perm]
                    y_train = y_train[ind_perm]
                    ind_pos = np.where(y_train == 1)[0]
                    ind_neg = np.where(y_train == 0)[0]

                    vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                    features = vggnet.get_features_first_raw(
                        frame_data, boxes_raw=boxes_train, id=id)

                    for k, v in features.iteritems():
                        # print k,v.shape
                        if k == 'f3':
                            pca3 = utils.skl_pca(v)
                            v_pca3 = pca3.transform(v)
                            pca3_pos = np.zeros((num_true, pca3.n_components_),
                                                dtype=np.float32)
                            pca3_neg = np.zeros(
                                (num_false, pca3.n_components_),
                                dtype=np.float32)
                            pca3_pos[...] = v_pca3[ind_pos, :]
                            pca3_neg[...] = v_pca3[ind_neg, :]
                            # utils.vis_as_image(v_pca3)
                            # plt.imshow(v_pca3)
                            # plt.title("PCA features")
                            # plt.show()
                            # plt.close()
                            # logistic regression
                            y_weight = sklearn.utils.class_weight.compute_class_weight(
                                class_weight='balanced',
                                classes=np.array([0, 1]),
                                y=y_train)
                            # print y_weight
                            class_weight = {0: y_weight[0], 1: y_weight[1]}
                            clf3 = linear_model.LogisticRegression(
                                fit_intercept=True, solver='liblinear')
                            clf3.fit(v_pca3, y_train)

                    vis_feature = False
                    if vis_feature:
                        utils.vis_features(features, id)

                    start_time = time.time()
                else:
                    if fail_times >= 5:
                        # reinitialize
                        reinit += 1
                        area = (gt_box[2] - gt_box[0]) * (gt_box[3] -
                                                          gt_box[1])
                        ratio = (gt_box[2] - gt_box[0]) / (gt_box[3] -
                                                           gt_box[1])
                        filter = PFfilter.PFfilter(
                            utils.bbox_to_states(gt_box, area, ratio), area,
                            ratio, w, h, nparticles)
                        # filter.reset(utils.bbox_to_states(gt_box, area, ratio), area, ratio)
                        filter.create_particles()
                        filter.restrict_particles(w, h)
                        area_hist.append(filter.cur_a)
                        pred_box = gt_box
                        pred_hist.append(np.array(pred_box).reshape(1, -1))
                        conf_hist.append(-0.1)
                        boxes_train = []
                        # boxes_train_neg=[]
                        iou_train = []
                        try:
                            # Q=[[1,0],[0,1]] #for pixel wise
                            Q = 0.05  # box_w,box_h
                            sample_box_true, sample_iou_true = filter.sample_iou_pred_box(
                                gt_box, Q, 0.01, 0.01, num_true, 0.8, 1.0)
                        except OverflowError as e:
                            print "too many loops in sample."

                        boxes_train.append(sample_box_true)
                        iou_train.append(sample_iou_true)
                        try:
                            # Q=[[36,0],[0,36]]#for pixel wise
                            Q = 0.2  # 0.15
                            sample_box_false, sample_iou_false = filter.sample_iou(
                                gt_box, Q, 0.2, 0.01, num_false / 2, 0,
                                thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample."

                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)
                        try:
                            # Q=[[36,0],[0,36]]#for pixel wise
                            Q = 0.2
                            sample_box_false, sample_iou_false = filter.sample_iou(
                                gt_box, Q, 0.01, 0.2, num_false / 2, 0,
                                thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample."
                        # print sample_box_false[:10]
                        # print sample_box_false.shape[0]
                        # print sample_iou_false[:10]
                        # print "average iou: ", np.mean(sample_iou_false)
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)

                        boxes_train = np.vstack(boxes_train)
                        iou_train = np.vstack(iou_train)
                        y_train_true = np.ones((num_true, ))
                        y_train_false = np.zeros((num_false, ))

                        y_train = np.hstack([y_train_true, y_train_false])
                        # permutation
                        ind_perm = np.random.permutation(
                            range(num_false + num_true))
                        boxes_train = boxes_train[ind_perm, :]
                        iou_train = iou_train[ind_perm]
                        y_train = y_train[ind_perm]
                        ind_pos = np.where(y_train == 1)[0]
                        ind_neg = np.where(y_train == 0)[0]

                        vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                        features = vggnet.get_features_first_raw(
                            frame_data, boxes_raw=boxes_train, id=id)

                        for k, v in features.iteritems():
                            # print k, v.shape
                            if k == 'f3':
                                v_pca3 = pca3.transform(v)

                                pca3_pos[...] = v_pca3[ind_pos, :]
                                pca3_neg[...] = v_pca3[ind_neg, :]
                                clf3.fit(v_pca3, y_train)
                                score3 = clf3.score(v_pca3, y_train)
                                # print 'score3: ', score3
                                # prob=clf3.predict_proba(v_pca3)
                                # print clf3.classes_
                        fail_times = 0
                        continue

                    filter.predict_particles(Q=0.2, cr=0.005, ca=0.001)
                    filter.restrict_particles(w, h)
                    area_hist.append(filter.cur_a)
                    # compute conf
                    conf = np.zeros(filter.weights.shape)
                    # np.save('particles.npy',filter.particles)
                    pred_boxes = utils.state_to_bbox(filter.particles, area,
                                                     ratio)
                    vggnet.reshape(w, h, filter.num_particles)
                    features = vggnet.get_features_first_raw(
                        frame_data, boxes_raw=pred_boxes, id=id)
                    for k, v in features.iteritems():
                        # print k,v.shape
                        if k == 'f3':
                            v_pca3 = pca3.transform(v)
                            # utils.vis_as_image(v_pca3)
                            # plt.imshow(v_pca3)
                            # plt.title("PCA features")
                            # plt.show()
                            # plt.close()
                            # logistic regression
                            conf = clf3.predict_proba(v_pca3)[:, 1]

                    conf_max = np.max(conf)
                    conf_min = np.min(conf)
                    filter.update_particles(conf)
                    filter.resample()  # always resample
                    pred_state, s_particles, r_particles = filter.estimate(
                        k=10)

                    cov_particles = np.dot(
                        filter.particles[:, :2].T,
                        filter.particles[:, :2]) / filter.particles.shape[0]

                    eigval, eigvec = np.linalg.eig(cov_particles)
                    max_val = eigval[0]
                    eig_hist.append(max_val)
                    print 'Max eigvalue: %f' % max_val

                    # print 'conf is: ',conf
                    if conf_max > 0.8:
                        fail_times = 0

                    else:
                        fail_times += 1

                        print "conf_max too low, not update particles "
                    pred_box = utils.state_to_bbox(pred_state.reshape((-1, 6)),
                                                   area, ratio)
                    show_sr = False
                    if show_sr:
                        count, xedge, yedge, tmp_im = plt.hist2d(
                            s_particles,
                            r_particles,
                            bins=10,
                            weights=filter.weights.squeeze(),
                            cmap=plt.cm.gray)
                        top3 = np.argsort(-count, axis=None)[:3]
                        row_ind = top3[:] / count.shape[1]
                        col_ind = top3[:] % count.shape[0]
                        '''
                        plt.scatter(s_particles,r_particles,c='r',marker='.',linewidths=1)
                        plt.xlabel('Area')
                        plt.ylabel('Aspect ratio')
                        plt.title('Area and Ratio of particles')
                        plt.axis('equal')
                        '''
                        plt.show()
                    iou = utils.calc_iou(gt_box, pred_box)
                    # print 'iou is: ', iou
                    pred_hist.append(pred_box)
                    conf_hist.append(conf_max)
                    iou_hist.append(iou)

                    if conf_max >= 0.9:  # 0.8
                        # update pca3_pos and pca3_neg
                        new_true = 100  # 50
                        new_false = 200  # 100
                        boxes_train = []

                        iou_train = []
                        Q = 0.02
                        try:
                            sample_box_true, sample_iou_true = filter.sample_iou_pred_box(
                                pred_box, Q, 0.01, 0.01, new_true, 0.85, 1.0)
                        except OverflowError as e:
                            print "too many loops in sample."
                        # print sample_box_true[:10]
                        # print sample_box_true.shape[0]
                        # print sample_iou_true[:10]
                        # print "average iou: ", np.mean(sample_iou_true)
                        boxes_train.append(sample_box_true)
                        iou_train.append(sample_iou_true)
                        try:
                            Q = 0.2
                            sample_box_false, sample_iou_false = filter.sample_iou(
                                pred_box, Q, 0.2, 0.01, new_false / 2, 0,
                                thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample."
                        # print sample_box_false[:10]
                        # print sample_box_false.shape[0]
                        # print sample_iou_false[:10]
                        # print "average iou: ", np.mean(sample_iou_false)
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)
                        try:
                            Q = 0.2
                            sample_box_false, sample_iou_false = filter.sample_iou(
                                pred_box, Q, 0.01, 0.2, new_false / 2, 0,
                                thre_max_neg)
                        except OverflowError as e:
                            print "too many loops in sample."
                        boxes_train.append(sample_box_false)
                        iou_train.append(sample_iou_false)

                        boxes_train = np.vstack(boxes_train)

                        iou_train = np.vstack(iou_train)
                        y_train_true = np.ones((new_true, ))
                        y_train_false = np.zeros((new_false, ))
                        y_train = np.hstack([y_train_true, y_train_false])

                        # permutation
                        ind_perm = np.random.permutation(
                            range(new_false + new_true))
                        boxes_train = boxes_train[ind_perm, :]

                        y_train = y_train[ind_perm]
                        new_y = np.zeros(y_train.shape)
                        new_y[...] = y_train
                        ind_pos = np.where(y_train == 1)[0]
                        ind_neg = np.where(y_train == 0)[0]

                        vggnet.reshape(w=w, h=h, nbox=boxes_train.shape[0])
                        features = vggnet.get_features_second_raw(
                            boxes_raw=boxes_train, id=id)
                        for k, v in features.iteritems():
                            # print k, v.shape
                            if k == 'f3':
                                v_pca3 = pca3.transform(v)

                                # random substitude
                                pca3_cur_pos = v_pca3[ind_pos, :]
                                pca3_cur_neg = v_pca3[ind_neg, :]
                                to_subst = random.sample(
                                    range(num_true), new_true)
                                pca3_pos[to_subst, :] = pca3_cur_pos
                                to_subst = random.sample(
                                    range(num_false), new_false)
                                pca3_neg[to_subst, :] = pca3_cur_neg

                    if conf_max < 1 and fail_times >= 2:
                        pca3_train = np.vstack([pca3_pos, pca3_neg])

                        y_train_true = np.ones((num_true, ))
                        y_train_false = np.zeros((num_false, ))
                        y_train = np.hstack([y_train_true, y_train_false])

                        # permutation
                        ind_perm = np.random.permutation(
                            range(num_false + num_true))
                        pca3_train = pca3_train[ind_perm, :]

                        y_train = y_train[ind_perm]

                        # logistic regression

                        clf3.fit(pca3_train, y_train)
                        # print 'score is: ',clf3.score(pca3_train,y_train)

                # (B,G,R)
                frame_data_cv = frame_data * 255  # [0,1]-->[0,255]
                frame_data_cv = frame_data_cv[:, :, ::-1]  # RGB->BGR
                frame_data_cv = frame_data_cv.astype('uint8')
                cv2.rectangle(frame_data_cv, (int(gt_box[0]), int(gt_box[1])),
                              (int(gt_box[2]), int(gt_box[3])), (255, 0, 0), 2,
                              1)
                if id > 0 and init_id == True:
                    cv2.rectangle(frame_data_cv,
                                  (int(pred_box[0, 0]), int(pred_box[0, 1])),
                                  (int(pred_box[0, 2]), int(pred_box[0, 3])),
                                  (0, 255, 0), 2, 1)
                if init_id == False:
                    init_id = True
                show_particles = False
                if show_particles:
                    for i in range(filter.num_particles):
                        cx = filter.particles[i, 0]
                        cy = filter.particles[i, 1]
                        cv2.circle(frame_data_cv, (int(cx), int(cy)),
                                   1, (0, 0, 255),
                                   thickness=1)
                show_box = False
                if show_box:
                    n = 0
                    for i in ind_pos:
                        if n % 5 == 0:
                            cv2.rectangle(frame_data_cv, (int(
                                boxes_train[i, 0]), int(boxes_train[i, 1])),
                                          (int(boxes_train[i, 2]),
                                           int(boxes_train[i, 3])),
                                          (0, 0, 255), 2, 1)
                        n += 1
                    n = 0
                    '''
                    for i in ind_neg:
                        if n%15==0:
                            cv2.rectangle(frame_data_cv, (int(boxes_train[i, 0]), int(boxes_train[i, 1])),
                                          (int(boxes_train[i, 2]), int(boxes_train[i, 3])), (0, 255,255), 2, 1)
                        n+=1
                    '''
                show_particles_init = False
                if show_particles_init:
                    for i in range(filter.num_particles):
                        cx = filter.particles[i, 0]
                        cy = filter.particles[i, 1]
                        cv2.circle(frame_data_cv, (int(cx), int(cy)),
                                   1, (0, 255, 0),
                                   thickness=1)
                show_frame = False
                cv2.circle(frame_data_cv,
                           (int(filter.cur_c[0]), int(filter.cur_c[1])),
                           2, (0, 0, 255),
                           thickness=1)
                if show_frame:
                    cv2.imshow(sequence, frame_data_cv)
                    c = cv2.waitKey(1)
                    # print 'You press: ',chr(c)
                    # if chr(c)=='c':
                    if c != -1:
                        cv2.destroyWindow(sequence)
                        # conf_hist=np.array(conf_hist)
                        # iou_hist=np.array(iou_hist)
                        # np.save('conf_hist.npy',conf_hist)
                        # np.save('iou_hist.npy',iou_hist)
                        break
            end_time = time.time()
            print "Average FPS: %f" % (nFrame / (end_time - start_time))
            log_file.write("Average FPS: %f\n" % (nFrame /
                                                  (end_time - start_time)))
            conf_hist = np.array(conf_hist)
            iou_hist = np.array(iou_hist)
            area_hist = np.array(area_hist)
            pred_hist = np.vstack(pred_hist)
            precisions, auc_pre = utils.calc_prec(gt_boxes, pred_hist)
            # plt.figure()
            # plt.subplot(221)
            # plt.plot(precisions)
            # plt.gca().invert_xaxis()
            # plt.title("Precision plot")
            # plt.xlabel('Location error threshold')
            # plt.ylabel('Precision')
            # plt.yticks(np.linspace(0,1,11))
            # plt.subplot(222)
            # plt.show()
            suc, auc_iou = utils.calc_success(iou_hist)
            records_precision.append(precisions * nFrame)
            records_success.append(suc * nFrame)
            # plt.plot(suc)
            # plt.gca().invert_xaxis()
            # plt.title('Success plot')
            # plt.xlabel('Overlap threshold')
            # plt.ylabel('Success Rate')
            # plt.yticks(np.linspace(0,1,11))
            # plt.show()

            # np.save('conf_hist.npy', conf_hist)
            # np.save('iou_hist.npy', iou_hist)
            # np.save('area_hist.npy',area_hist)
            # print 'Average iou is: %f'%(np.mean(iou_hist))
            print 'Precision @20 is: %f' % precisions[19]
            print 'Auc of Precision is: %f' % auc_pre
            print 'Auc of Success is: %f' % auc_iou
            print 'Reinit times: %d' % reinit
            log_file.write("Precision @20 is: %f\n" % precisions[19])
            log_file.write('Auc of Precision is: %f\n' % auc_pre)
            log_file.write('Auc of Success is: %f\n' % auc_iou)
            log_file.write('Reinit times: %d\n' % reinit)
    log_file.close()
    pkl = open(
        '/home/ccjiang/Documents/caffe-fast-rcnn/examples/tracker/results_100.pkl',
        'w')
    pickle.dump([records_precision, records_success], pkl)
    pkl.close()