Exemple #1
0
def detect_track(model, temp_image, det_image, temp_boxes, roidb=None):
    if roidb is None:
        roidb={}
    roidb['bound']=bound
    roidb['temp_image']=temp_image[np.newaxis,:,:,:].astype(np.float32)
    roidb['det_image']=det_image[np.newaxis,:,:,:].astype(np.float32)
    roidb['good_inds']=check_boxes(temp_boxes)
    roidb['temp_boxes']=temp_boxes
    search_boxes=[]
    for temp_box in temp_boxes:
        _,best_template=butil.best_search_box_test(templates, temp_box, bound)
        search_boxes.append(best_template)
    
    search_boxes=np.array(search_boxes)
    roidb['det_boxes']=None
    roidb['det_classes']=None
    roidb['temp_classes']=None
    roidb['search_boxes']=search_boxes
    roidb['anchors']=det_anchors

    output_dict=model(roidb, task='all')
    configs['search_boxes']=search_boxes
    configs['temp_boxes']=temp_boxes

    det_rets=get_detect_output(output_dict, batch_size=1)
    track_ret=get_track_output(output_dict, configs)
    det_ret=det_rets[0]
    '''dicts'''
    return det_ret, track_ret
Exemple #2
0
    def get_minibatch_inter_img(self):
        if self.iter_stop:
            self.iter_stop = False
            return None

        roidbs = []
        index = self.permute_inds[self.index]
        while self.index_per_seq[index] > self.upper_bound_per_seq[index]:
            self.index += 1
            if self.index == self.num_sequences:
                self.index = 0
            index = self.permute_inds[self.index]

        anno_file = op.join(self.anno_dir, self.anno_files[index])
        img_dir = op.join(self.data_dir, self.img_dirs[index])

        img_files = sorted(os.listdir(img_dir))
        tree = ET.parse(anno_file)
        root = tree.getroot()

        frames = root.findall('frame')

        cur_image_index = self.index_per_seq[index]
        ref_inds = np.arange(cur_image_index,
                             cur_image_index + self.batch_size)

        real_num_samples = ref_inds.size
        interval = np.random.randint(1,
                                     self.max_interval + 1,
                                     size=real_num_samples)
        det_inds = ref_inds + interval
        det_inds = np.minimum(det_inds, self.images_per_seq[index] - 1)

        for _, inds in enumerate(zip(ref_inds, det_inds)):
            roidb = {}

            ref_boxes = {}
            det_boxes = {}

            temp_image = None
            det_image = None

            temp_gt_classes = {}
            det_gt_classes = {}

            for ind in inds:
                img_file = img_files[ind]
                frame = frames[ind]

                image = cv2.imread(op.join(img_dir, img_file))

                #                image, (xstart, ystart), scale=util.resize_and_pad_image(image, self.im_w, self.im_h)
                h, w = image.shape[0:2]
                image = cv2.resize(image, (self.im_w, self.im_h),
                                   interpolation=cv2.INTER_LINEAR)
                nh, nw = image.shape[0:2]

                yscale = 1.0 * nh / h
                xscale = 1.0 * nw / w

                target_list = frame.find('target_list')
                targets = target_list.findall('target')

                for obj in targets:
                    attribs = obj.attrib
                    obj_id = int(attribs['id'])

                    bbox = np.zeros(4, dtype=np.float32)

                    bbox_attribs = obj.find('box').attrib
                    attribute_attribs = obj.find('attribute').attrib

                    left = float(bbox_attribs['left'])
                    top = float(bbox_attribs['top'])
                    width = float(bbox_attribs['width'])
                    height = float(bbox_attribs['height'])

                    bbox[0] = left
                    bbox[1] = top
                    bbox[2] = left + width - 1
                    bbox[3] = top + height - 1
                    '''
                    bbox*=scale
                    bbox[[0,2]]+=xstart
                    bbox[[1,3]]+=ystart
                    '''
                    bbox[[0, 2]] *= xscale
                    bbox[[1, 3]] *= yscale

                    bbox = butil.clip_bboxes(bbox[np.newaxis, :], self.im_w,
                                             self.im_h).squeeze()

                    cat = attribute_attribs['vehicle_type']
                    if cat == 'others':
                        cat = 'truck'
                    cat_ind = CAT_IND_MAP[cat]

                    if ind == inds[0]:
                        ref_boxes[obj_id] = bbox
                        temp_gt_classes[obj_id] = cat_ind
                    else:
                        det_boxes[obj_id] = bbox
                        det_gt_classes[obj_id] = cat_ind

                if ind == inds[0] and len(ref_boxes.keys()) > 0:
                    temp_image = image[np.newaxis, :, :, :].astype(np.float32)
                elif ind == inds[1] and len(ref_boxes.keys()) > 0:
                    det_image = image[np.newaxis, :, :, :].astype(np.float32)

            ref_boxes_align, det_boxes_align, ref_classes_align, det_classes_align = butil.align_boxes(
                ref_boxes, det_boxes, temp_gt_classes, det_gt_classes)
            good_inds = []
            if len(ref_boxes_align) > 0:
                roidb['raw_temp_boxes'] = ref_boxes_align
                #                temp_boxes=util.compute_template_boxes(ref_boxes_align, (temp_image.shape[2], temp_image.shape[1]), gain=self.margin_gain, shape='same')
                roidb['temp_image'] = temp_image
                roidb['det_image'] = det_image
                #                roidb['temp_boxes']=temp_boxes
                roidb['temp_boxes'] = ref_boxes_align
                roidb['det_boxes'] = det_boxes_align
                roidb['temp_classes'] = ref_classes_align
                roidb['det_classes'] = det_classes_align
                '''NHWC'''
                bound = (det_image.shape[2], det_image.shape[1])

                search_boxes = np.zeros((0, 4), dtype=np.float32)

                num_fg_anchors = -1
                best_track_anchors = None
                best_ind = 0

                for i, box in enumerate(ref_boxes_align):
                    if box[2] - box[0] < MAX_TEMPLATE_SIZE - 1 and box[
                            3] - box[1] < MAX_TEMPLATE_SIZE - 1:
                        if cfg.PHASE == 'TEST':
                            _, search_box = butil.best_search_box_test(
                                self.templates, box, bound)
                        else:
                            search_box, max_overlap_num, best_anchors = butil.best_search_box_train(
                                box, det_boxes_align[i], self.templates,
                                self.track_raw_anchors, bound, self.TK,
                                (self.rpn_conv_size, self.rpn_conv_size),
                                cfg.TRAIN.TRACK_RPN_POSITIVE_THRESH)

                            if max_overlap_num > num_fg_anchors:
                                num_fg_anchors = max_overlap_num
                                best_track_anchors = best_anchors
                                best_ind = i

                        search_boxes = np.append(search_boxes,
                                                 search_box.reshape(1, -1), 0)
                        good_inds.append(i)
                    else:
                        search_boxes = np.append(search_boxes,
                                                 np.array([[0, 0, 0, 0]]), 0)

                if len(good_inds) > 0:
                    roidb['search_boxes'] = search_boxes
                    roidb['bound'] = bound
                    roidb['good_inds'] = np.array(good_inds, dtype=np.int32)

                    roidb['best_anchors'] = best_track_anchors
                    roidb['best_ind'] = best_ind
                    '''detectio anchors'''
                    roidb['anchors'] = self.det_anchors
                    roidbs.append(roidb)

        self.index_per_seq[index] += self.batch_size
        index_res = self.index_per_seq - self.upper_bound_per_seq
        index_res = index_res[self.permute_inds]
        valid_seq_inds = np.where(index_res <= 0)[0]
        if valid_seq_inds.size == 0:
            self.index_per_seq = np.zeros(self.num_sequences, dtype=np.int32)
            self.round_per_seq = np.zeros(self.num_sequences, dtype=np.int32)
            self.permute_inds = np.random.permutation(
                np.arange(self.num_sequences))
            self.index = 0
            self.iter_stop = True
        else:
            self.index += 1
            if self.index == self.num_sequences:
                self.index = 0
        if len(roidbs) > 0 and len(roidbs) < self.batch_size:
            top_n = len(roidbs)
            print('Pad roidbs with previous elements. From {} to {}'.format(
                top_n, self.batch_size))
            m = self.batch_size // len(roidbs) - 1
            n = self.batch_size % len(roidbs)
            for i in range(m):
                roidbs.extend(roidbs[:top_n])
            if n > 0:
                roidbs.extend(roidbs[:n])
            assert len(
                roidbs
            ) == self.batch_size, 'roidbs length is not valid: {}/{}'.format(
                len(roidbs), self.batch_size)
        return roidbs
Exemple #3
0
def main(dataset_obj, model=None):
    loader = DataLoader(dataset_obj)

    temp_boxes = None
    temp_image = None
    det_image = None

    started = False
    track_started = False
    num_instances = 0
    track_ids = None

    VIS_DATASET = False
    TRACK_LAST_FRAME = False

    #    writer=cv2.VideoWriter('./track.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30.0, (im_width, im_height))

    frame_ind = 0
    for _, (image, gt_boxes) in enumerate(loader):
        canvas = image.copy()
        if VIS_DATASET:
            if gt_boxes is not None:
                temp_boxes = np.asarray(gt_boxes[:4]).reshape(-1, 4)
            for i, box in enumerate(temp_boxes):
                box = box.astype(np.int32)
                cv2.rectangle(canvas, (box[0], box[1]), (box[2], box[3]),
                              tuple(colors[i]), 2)
        else:
            image = image.astype(np.float32)
            if len(gt_boxes) == 0 and not started:
                print('Waiting for a frame with gt_box')
                continue
            started = True
            if not track_started:
                temp_boxes = np.asarray(gt_boxes[:, :4]).reshape(-1, 4).astype(
                    np.float32)
                temp_image = image.copy()
                for i, box in enumerate(gt_boxes):
                    box = box.astype(np.int32)
                    cv2.rectangle(canvas, (box[0], box[1]), (box[2], box[3]),
                                  tuple(colors[i]), 2)
                track_started = True
                num_instances = temp_boxes.shape[0]
                track_ids = np.ones(num_instances, dtype=np.int32)
            else:
                '''replace with detection'''
                if not TRACK_LAST_FRAME and len(gt_boxes) > num_instances:
                    track_ids, temp_boxes = add_new_targets(
                        track_ids, temp_boxes,
                        np.arange(num_instances, len(gt_boxes)),
                        gt_boxes[num_instances:])
                    print('add {} new targets'.format(
                        len(gt_boxes) - num_instances))
                    num_instances = len(track_ids)

                if TRACK_LAST_FRAME:
                    num_instances = len(temp_boxes)

                det_image = image.copy()
                roidb = dict()
                bound = (temp_image.shape[1], temp_image.shape[0])
                roidb['bound'] = bound
                roidb['temp_image'] = temp_image[np.newaxis, :, :, :].astype(
                    np.float32)
                roidb['det_image'] = det_image[np.newaxis, :, :, :].astype(
                    np.float32)
                roidb['good_inds'] = check_boxes(temp_boxes)
                roidb['temp_boxes'] = temp_boxes
                search_boxes = []
                for temp_box in temp_boxes:
                    _, best_template = butil.best_search_box_test(
                        templates, temp_box, bound)
                    search_boxes.append(best_template)

                search_boxes = np.array(search_boxes)
                #                print(search_boxes)
                roidb['det_boxes'] = None
                roidb['det_classes'] = None
                roidb['temp_classes'] = None
                roidb['search_boxes'] = search_boxes

                #                anchors=G.gen_region_anchors(track_raw_anchors, search_boxes, bound, K=TK, size=(rpn_conv_size, rpn_conv_size))[0]
                #                roidb['track_anchors']=anchors
                dummy_search_box = np.array(
                    [[0, 0, im_width - 1, im_height - 1]])
                anchors = G.gen_region_anchors(det_raw_anchors,
                                               dummy_search_box,
                                               bound,
                                               K=K,
                                               size=out_size)[0]
                roidb['anchors'] = anchors

                bboxes_list, _ = inference_track(model, roidb)
                last_temp_boxes = temp_boxes.copy()

                if not TRACK_LAST_FRAME and len(bboxes_list) > 0:
                    valid_boxes = np.zeros((0, 4), dtype=np.float32)
                    for i, bboxes in enumerate(bboxes_list):
                        if len(bboxes) > 0:
                            if len(bboxes.shape) > 0:
                                bboxes = bboxes.squeeze()
                            bbox = bboxes[:4]
                            valid_boxes = np.append(valid_boxes,
                                                    bbox.reshape(1, -1), 0)
                    if valid_boxes.shape[0] > 0:
                        temp_boxes = valid_boxes.copy()

                index = 0
                for i in range(num_instances):
                    if not track_ids[i]:
                        continue
                    boxes_one_target = bboxes_list[index]
                    if len(boxes_one_target) == 0:
                        track_ids[i] = 0
                        print('target {} disappears'.format(i))
                    else:
                        box_inst = bboxes_list[index][0].astype(np.int32)
                        cv2.rectangle(canvas, (box_inst[0], box_inst[1]),
                                      (box_inst[2], box_inst[3]),
                                      tuple(colors[i % len(colors)]), 2)
                    index += 1
                num_exist_instances = len(np.where(track_ids != 0)[0])
                for i in range(num_exist_instances):
                    temp_box = last_temp_boxes[i].astype(np.int32)
                    cv2.rectangle(canvas, (temp_box[0], temp_box[1]),
                                  (temp_box[2], temp_box[3]), (0, 0, 255), 1)
                if TRACK_LAST_FRAME:
                    temp_boxes = np.asarray(gt_boxes[:, :4]).reshape(
                        -1, 4).astype(np.float32)
                temp_image = image.copy()

        frame_ind += 1
        print('Frame {}'.format(frame_ind))
        cv2.imshow('benchmark', canvas)
        #        writer.write(canvas)
        if cv2.waitKey(1) == 27:
            break
Exemple #4
0
    def get_minibatch(self):
        if self.iter_stop:
            self.iter_stop = False
            return None

        roidbs = []
        index = self.inds[self.index]

        while self.index_per_seq[index] > self.upper_bound_per_seq[index]:
            self.index += 1
            if self.index == self.num_sequences:
                self.index = 0
            index = self.inds[self.index]

        img_dir = op.join(self.mot_dir, MOT_SUBDIRS[index], 'img1')
        annotation = self.gt_annotations[MOT_SUBDIRS[index]]

        img_files = sorted(os.listdir(img_dir))
        '''
        if MAX_SAMPLES>0:
            max_samples=min(MAX_SAMPLES, len(img_files))
            img_files=img_files[:max_samples]
        '''
        cur_image_index = self.index_per_seq[index]
        temp_inds = np.arange(cur_image_index,
                              cur_image_index + self.batch_size)
        real_num_samples = temp_inds.size

        interval = np.random.randint(1,
                                     self.max_interval + 1,
                                     size=real_num_samples)
        det_inds = temp_inds + interval
        det_inds = np.minimum(det_inds, self.images_per_seq[index] - 1)
        #        non_empty_batch=False

        for _, inds in enumerate(zip(temp_inds, det_inds)):
            roidb = {}
            temp_boxes = {}
            det_boxes = {}

            temp_image = None
            det_image = None

            temp_gt_classes = {}
            det_gt_classes = {}

            for ind in inds:
                image = cv2.imread(op.join(img_dir, img_files[ind]))

                h, w = image.shape[0:2]
                image = cv2.resize(image, (self.im_w, self.im_h),
                                   interpolation=cv2.INTER_LINEAR)
                nh, nw = image.shape[0:2]

                yscale = 1.0 * nh / h
                xscale = 1.0 * nw / w

                objects = annotation[ind]
                #                obj_boxes=np.zeros((0,4),dtype=np.float32)
                if len(objects) == 0:
                    print('{} has no targets'.format(
                        op.join(img_dir, img_files[ind])))
                    if DEBUG and self.vis_index < self.num_visualize:
                        cv2.imwrite(op.join('mot_no_target', img_files[ind]),
                                    image)

                for obj in objects:
                    obj_id = obj['id']
                    bbox = obj['bbox'].copy()

                    bbox[[0, 2]] *= xscale
                    bbox[[1, 3]] *= yscale
                    self.clip_boxes(bbox)

                    bbox = bbox.astype(np.int32)
                    #                    obj_boxes=np.append(obj_boxes, bbox[np.newaxis,:], 0)
                    if ind == inds[0]:
                        temp_boxes[obj_id] = bbox
                        temp_gt_classes[obj_id] = 1
                    else:
                        det_boxes[obj_id] = bbox
                        det_gt_classes[obj_id] = 1

#                    if self.vis_index < self.num_visualize:
#                        cv2.rectangle(image_cpy, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)

                if ind == inds[0] and len(temp_boxes.keys()) > 0:
                    temp_image = image[np.newaxis, :, :, :].astype(np.float32)


#                    non_empty_batch=True
                elif ind == inds[1] and len(temp_boxes.keys()) > 0:
                    det_image = image[np.newaxis, :, :, :].astype(np.float32)

            ref_boxes_align,det_boxes_align,ref_classes_align,det_classes_align=butil.align_boxes(temp_boxes, \
                det_boxes, temp_gt_classes, det_gt_classes)
            #            print(ref_boxes_align)
            #            print(det_boxes_align)
            good_inds = []

            if len(ref_boxes_align) > 0:
                roidb['raw_temp_boxes'] = ref_boxes_align
                roidb['temp_image'] = temp_image
                roidb['det_image'] = det_image
                roidb['temp_boxes'] = ref_boxes_align
                roidb['det_boxes'] = det_boxes_align
                roidb['temp_classes'] = ref_classes_align
                roidb['det_classes'] = det_classes_align
                '''NHWC'''
                bound = (det_image.shape[2], det_image.shape[1])
                search_boxes = np.zeros((0, 4), dtype=np.float32)

                num_fg_anchors = -1
                best_track_anchors = None
                best_ind = 0

                for i, box in enumerate(ref_boxes_align):
                    if cfg.PHASE == 'TEST':
                        _, search_box = butil.best_search_box_test(
                            self.templates, box, bound)
                    else:
                        search_box, max_overlap_num, best_anchors = butil.best_search_box_train(
                            box, det_boxes_align[i], self.templates,
                            self.track_raw_anchors, bound, self.TK,
                            (self.rpn_conv_size, self.rpn_conv_size),
                            cfg.TRAIN.TRACK_RPN_POSITIVE_THRESH)

                        if max_overlap_num > num_fg_anchors:
                            num_fg_anchors = max_overlap_num
                            best_track_anchors = best_anchors
                            best_ind = i

                    search_boxes = np.append(search_boxes,
                                             search_box.reshape(1, -1), 0)
                    good_inds.append(i)
                roidb['search_boxes'] = search_boxes
                roidb['bound'] = bound
                roidb['good_inds'] = good_inds

                if cfg.PHASE == 'TRAIN':
                    roidb['best_anchors'] = best_track_anchors
                    roidb['best_ind'] = best_ind

                dummy_search_box = np.array(
                    [[0, 0, self.im_w - 1, self.im_h - 1]])
                anchors = G.gen_region_anchors(self.raw_anchors,
                                               dummy_search_box,
                                               bound,
                                               K=self.K,
                                               size=self.out_size)
                '''detectio anchors'''
                roidb['anchors'] = anchors[0]
                roidbs.append(roidb)
        '''
        [NHWC,NHWC]
        '''
        self.index_per_seq[index] += self.batch_size
        index_res = self.index_per_seq[self.inds] - self.upper_bound_per_seq[
            self.inds]
        self.valid_seq_inds = np.nonzero(index_res <= 0)[0]
        if self.valid_seq_inds.size == 0:
            self.index_per_seq = np.zeros(self.num_sequences, dtype=np.int32)
            self.round_per_seq = np.zeros(self.num_sequences, dtype=np.int32)
            self.inds = np.random.permutation(np.arange(self.num_sequences))
            self.index = 0
            self.iter_stop = True
        else:
            self.index += 1
            if self.index == self.num_sequences:
                self.index = 0

        if self.batch_size == 1:
            if len(roidbs) == 0:
                return {}
            else:
                return roidbs[0]
        else:
            return roidbs
Exemple #5
0
def main(dataset_obj, model):
    loader=DataLoader(dataset_obj)

    temp_boxes=None
    det_boxes=None
    track_started=False
    num_instances=0
    track_ids=None

    frame_ind=0
    det_interval=30

    roidb={}
    roidb['bound']=bound
    roidb['temp_classes']=None
    roidb['det_classes']=None
    roidb['temp_boxes']=None
    roidb['det_boxes']=None
    roidb['anchors']=det_anchors

#    writer=cv2.VideoWriter('./self_video.avi', cv2.VideoWriter_fourcc('M','J','P','G'),30.0,(im_width, im_height))
    for idx, image in enumerate(loader):
        canvas=image.copy()
        canvas_det=image.copy()
        if not track_started or temp_boxes.shape[0]==0:
            ret=detect(model, image, roidb)
            cls_bboxes=ret['cls_bboxes']
            temp_boxes=cls_bboxes_to_boxes(cls_bboxes)
            '''init temp_boxes'''
            roidb['temp_boxes']=temp_boxes                        
            '''init num_instances'''
            num_instances=temp_boxes.shape[0]
            track_ids=np.ones(num_instances, dtype=np.int32)
            track_started=True
        else:
            temp_boxes=roidb['temp_boxes']
#            last_temp_boxes=temp_boxes.copy()

            temp_image=roidb['det_image'].squeeze(0)
            det_image=image.copy()
            roidb['temp_image']=temp_image
            roidb['det_image']=det_image
            roidb['good_inds']=check_boxes(temp_boxes)
            
            search_boxes=[]
            for temp_box in temp_boxes:
                _,best_template=butil.best_search_box_test(templates, temp_box, bound)
                search_boxes.append(best_template)
            
            search_boxes=np.array(search_boxes)
            roidb['det_boxes']=None
            roidb['det_classes']=None
            roidb['temp_classes']=None
            roidb['search_boxes']=search_boxes
            
            det_ret, track_ret=detect_track(model, temp_image, det_image, temp_boxes, roidb)
            '''do something with det_ret'''
            bboxes_list=track_ret['bboxes_list']
            valid_boxes=np.zeros((0,4), dtype=np.float32)
                
            index=0
            for i in range(num_instances):
                if not track_ids[i]:
                    continue
                bboxes=np.array(bboxes_list[index])
                if len(bboxes.shape)>1:
                    bboxes=bboxes.squeeze()
                if bboxes.size==0:
                    track_ids[i]=0
                else:
                    bbox=bboxes[:4]
                    w,h=bbox[2]-bbox[0]+1,bbox[3]-bbox[1]+1
                    if w>=MAX_TEMPLATE_SIZE or h>=MAX_TEMPLATE_SIZE:
                         track_ids[i]=0
                    else:
                         valid_boxes=np.append(valid_boxes, bbox.reshape(1,-1), 0)
                index+=1                 
            if valid_boxes.shape[0]!=temp_boxes.shape[0] or frame_ind==det_interval:
                if frame_ind!=det_interval:
                    print('Target changed, use det_ret!')    
                else:
                    print('Det interval arrived, use det_ret!')
                    frame_ind=0      
                cls_bboxes=det_ret['cls_bboxes']
                det_boxes=cls_bboxes_to_boxes(cls_bboxes)
                if valid_boxes.shape[0]>0 and det_boxes.shape[0]>0:
                    det_boxes=sort_boxes(valid_boxes, det_boxes)
#                track_ids, temp_boxes=add_new_targets(track_ids, valid_boxes, det_boxes[num_instances:])                    
                track_ids, temp_boxes=add_new_targets(track_ids, valid_boxes, det_boxes[valid_boxes.shape[0]:])
                num_instances=len(track_ids)
            else:
                temp_boxes=valid_boxes.copy()
            
            roidb['temp_boxes']=temp_boxes
        '''visualize'''
        index=0
#        print(track_ids)
#        print(temp_boxes.shape[0])
        for i in range(num_instances):
            if not track_ids[i]:
                continue
            bbox=temp_boxes[index].astype(np.int32)
            cv2.rectangle(canvas, (bbox[0], bbox[1]), (bbox[2],bbox[3]), tuple(colors[i%len(colors)]), 2)                
            index+=1
        if det_boxes is not None:
            for bbox in det_boxes:
                bbox=bbox.astype(np.int32)
                cv2.rectangle(canvas_det, (bbox[0], bbox[1]), (bbox[2],bbox[3]), tuple(colors[i%len(colors)]), 2)                
        frame_ind+=1
        print('Frame {}'.format(idx))
        cv2.imshow('track', canvas)
        cv2.imshow('det',canvas_det)
#        writer.write(canvas)
        if cv2.waitKey(1)==27:
            break