def extract_bbox(frame, refbbox, fa):
    bboxes = fa.face_detector.detect_from_image(frame[..., ::-1])
    if len(bboxes) != 0:
        bbox = max([(bb_intersection_over_union(bbox, refbbox), tuple(bbox))
                    for bbox in bboxes])[1]
    else:
        bbox = np.array([0, 0, 0, 0, 0])
    return np.maximum(np.array(bbox), 0)
def crop_video(person_id, video_id, video_path, args):
    utterance = video_path.split('#')[1]
    bbox_path = os.path.join(args.bbox_folder,
                             os.path.basename(video_path)[:-4] + '.txt')
    reader = imageio.get_reader(video_path)

    chunk_start = float(video_path.split('#')[2].split('-')[0])

    d = pd.read_csv(bbox_path)
    video_count = 0
    initial_bbox = None
    start = 0
    tube_bbox = None
    frame_list = []
    chunks_data = []

    try:
        for i, frame in enumerate(reader):
            bbox = np.array(d.iloc[i])

            if initial_bbox is None:
                initial_bbox = bbox
                start = i
                tube_bbox = bbox

            if bb_intersection_over_union(initial_bbox,
                                          bbox) < args.iou_with_initial or len(
                                              frame_list) >= args.max_frames:
                chunks_data += store(frame_list, tube_bbox, video_id,
                                     utterance, person_id, start, i,
                                     video_count, chunk_start, args)
                video_count += 1
                initial_bbox = bbox
                start = i
                tube_bbox = bbox
                frame_list = []
            tube_bbox = join(tube_bbox, bbox)
            frame_list.append(frame)
    except IndexError as e:
        None

    chunks_data += store(frame_list, tube_bbox, video_id, utterance, person_id,
                         start, i + 1, video_count, chunk_start, args)

    return chunks_data
def crop_video_neighbor(person_id, video_id, video_path, args):
    bbox_path = os.path.join(args.bbox_folder,
                             os.path.basename(video_path)[:-4] + '.txt')
    reader = imageio.get_reader(video_path)

    d = pd.read_csv(bbox_path)
    video_count = 0
    prev_bbox = None
    start = 0
    tube_bbox = None
    frame_list = []
    chunks_data = []

    try:
        for i, frame in enumerate(reader):
            bbox = np.array(d.iloc[i])

            if prev_bbox is None:
                prev_bbox = bbox
                start = i
                tube_bbox = bbox

            if bb_intersection_over_union(prev_bbox,
                                          bbox) < args.iou_with_initial or len(
                                              frame_list) >= args.max_frames:
                chunks_data += store(frame_list, tube_bbox, video_id,
                                     person_id, start, i, video_count, args)
                video_count += 1
                start = i
                tube_bbox = bbox
                frame_list = []
            prev_bbox = bbox
            tube_bbox = join(tube_bbox, bbox)
            frame_list.append(frame)
    except IndexError as e:
        None

    chunks_data += store(frame_list, tube_bbox, video_id, person_id, start,
                         i + 1, video_count, args)

    return chunks_data
Пример #4
0
def process_video(video_path, detector, args):
    video = imageio.get_reader(video_path)
    fps = video.get_meta_data()['fps']
    trajectories = []
    previous_frame = None
    chunks_data = []
    try:
        for i, frame in enumerate(video):
            if args.minimal_video_size > min(frame.shape[0], frame.shape[1]):
                return chunks_data
            if i % args.sample_rate != 0:
                continue
            predictions = detector.compute_prediction(frame[:, :, ::-1])
            keypoints = predictions.get_field('keypoints').keypoints
            scores = predictions.get_field("scores")
            keypoint_scores = predictions.get_field('keypoints').get_field(
                "logits")
            bboxes = predictions.bbox[:, :4]

            ## Check if valid person in bbox

            height_criterion = (
                (bboxes[:, 3] - bboxes[:, 1]) >
                args.mimial_person_size * frame.shape[1]).numpy()
            score_criterion = (scores > args.bbox_confidence_th).numpy()
            full_person_criterion = np.array(
                [check_full_person(kps) for kps in keypoint_scores])

            criterion = np.logical_and(height_criterion, score_criterion)
            bboxes_distractor = bboxes.numpy()[criterion]
            criterion = np.logical_and(full_person_criterion, criterion)
            bboxes_valid = bboxes.numpy()[criterion]

            ### Check if frame is valid
            if previous_frame is None:
                previous_frame = rgb2gray(
                    resize(frame, (256, 256),
                           preserve_range=True,
                           anti_aliasing=True,
                           mode='constant'))

                current_frame = previous_frame
                previous_intensity = np.median(frame.reshape(
                    (-1, frame.shape[-1])),
                                               axis=0)
                current_intensity = previous_intensity
            else:
                current_frame = rgb2gray(
                    resize(frame, (256, 256),
                           preserve_range=True,
                           anti_aliasing=True,
                           mode='constant'))
                current_intensity = np.median(frame.reshape(
                    (-1, frame.shape[-1])),
                                              axis=0)

            flow_quantiles = check_camera_motion(current_frame, previous_frame)
            camera_criterion = flow_quantiles[1] > args.camera_change_threshold
            previous_frame = current_frame
            intensity_criterion = np.max(
                np.abs(previous_intensity -
                       current_intensity)) > args.intensity_change_threshold
            previous_intensity = current_intensity
            no_person_criterion = len(bboxes) < 0
            criterion = no_person_criterion or camera_criterion or intensity_criterion

            if criterion:
                store(video_path, trajectories, i, args, chunks_data, fps)
                trajectories = []

            ## For each trajectory check the criterion
            not_valid_trajectories = []
            valid_trajectories = []

            for trajectory in trajectories:
                tube_bbox = trajectory[0]
                number_of_intersections = 0
                current_bbox = None
                for bbox in bboxes_valid:
                    intersect = bb_intersection_over_union(tube_bbox, bbox) > 0
                    if intersect:
                        current_bbox = bbox

                for bbox in bboxes_distractor:
                    intersect = bb_intersection_over_union(tube_bbox, bbox) > 0
                    if intersect:
                        number_of_intersections += 1

                if current_bbox is None:
                    not_valid_trajectories.append(trajectory)
                    continue

                if number_of_intersections > 1:
                    not_valid_trajectories.append(trajectory)
                    continue

                if not one_box_inside_other(trajectory[0], current_bbox):
                    not_valid_trajectories.append(trajectory)
                    continue

                if len(trajectory[2]) >= args.max_frames:
                    not_valid_trajectories.append(trajectory)
                    continue

                valid_trajectories.append(trajectory)

            store(video_path, not_valid_trajectories, i, args, chunks_data,
                  fps)
            trajectories = valid_trajectories

            ## Assign bbox to trajectories, create new trajectories
            for bbox in bboxes_valid:
                intersect = False
                for trajectory in trajectories:
                    tube_bbox = trajectory[0]
                    intersect = bb_intersection_over_union(tube_bbox, bbox) > 0
                    if intersect:
                        #trajectory[1] = join(tube_bbox, bbox)
                        trajectory[2].append(frame)
                        break

                ## Create new trajectory
                if not intersect:
                    trajectories.append([
                        compute_aspect_preserved_bbox(bbox, args.increase), i,
                        [frame]
                    ])

            if len(chunks_data) > args.max_crops:
                break

    except IndexError:
        None

    store(video_path, trajectories, i + 1, args, chunks_data, fps)
    return chunks_data
Пример #5
0
    def _assign_detections_to_tracks(self, detections, frame_id, save=False):
        # if there are no tracks yet, all detections are new tracks
        if len(self.tracks) == 0:
            for det in detections:
                t = Track()
                t.add_to_track(det)
                self.tracks.append(t)
            return True
        # assign detections to existing tracks
        for track in self.tracks:
            track.has_match = False
            predicted_next_bb = track.get_predicted_next_bb()
            for det in detections:
                # singular tracks search radially
                if track.is_singular():
                    iou = util.bb_intersection_over_union(
                        predicted_next_bb, det.bbox)
                    dist = util.dist_btwn_bb_centroids(predicted_next_bb,
                                                       det.bbox)
                    if dist < const.MAX_PIXELS_DIST_TRACK_START and iou > const.MIN_IOU_TRACK_START:
                        track.add_to_track(det)
                        track.has_match = True
                        track.num_misses = 0
                        break
                # established tracks search in predicted location
                elif track.is_established():
                    # TODO: get distance, iou to det
                    iou = util.bb_intersection_over_union(
                        predicted_next_bb, det.bbox)
                    dist = util.dist_btwn_bb_centroids(predicted_next_bb,
                                                       det.bbox)
                    if dist < const.MAX_PIXELS_DIST_TRACK and iou > const.MIN_IOU_TRACK:
                        # print 'pred_n=', predicted_next_bb
                        # print 'iou={},dist={}'.format(iou, dist)
                        sim = self.face_rec_engine.compute_similarity(
                            det.fvec, track.get_latest_fvec())
                        # print 'sim={}'.format(sim)
                        track.add_to_track(det)
                        track.has_match = True
                        track.num_misses = 0
                        # TODO: handle case where decision is tough (2 detections very close)
                        break
            # if no track was assigned, give penalty to track
            if not track.has_match:
                # delete singular tracks that didn't get assigned (probably false detection)
                if track.num_misses > 0:
                    if track.is_singular():
                        track.delete_me = True
                        # print 'delete singular track'
                    else:
                        # TODO: continue track using predicted state
                        track.propagate_track(frame_id=frame_id)
                track.num_misses += 1
            else:
                # reset match flag
                track.has_match = False

        for i, det in enumerate(detections):
            # if det hasn't been assigned yet, create new tracks
            if det.num_matches == 0:
                # print 'new track created. len(tracks)={}, num_det={}'.format(len(tracks),len(detections))
                t = Track()
                t.add_to_track(det)
                self.tracks.append(t)
            elif det.num_matches > 1:
                # print 'multiple assignment!! (num_matches({})={})'.format(i, det.num_matches)
                pass
                # TODO: resolve detections with multiple matches

        # print 'num_tracks = {}, num dets = {}'.format(len(tracks), len(detections))
        # cleanup any duplicate tracks that have formed (TODO: how do they form?)
        self._delete_duplicate_tracks()
        # save dead tracks before deletion
        if save:
            self._save_tracks_to_json()
        # remove dead tracks
        self.tracks = [
            t for t in self.tracks
            if (t.is_dead() is False and t.delete_me is False)
        ]
        # for i, track in enumerate(tracks):
        #     print '{}: {}'.format(i, track.get_latest_bb())
        return True