Example #1
0
def male_fly_patch(img, mask, body_center, rotate_angle, crop_size=400):
    # legalize body_center
    body_center = bound_point((body_center[0], body_center[1]), img)

    # create a patch centered on the fly
    fly_patch = ImagePatch(img, mask)
    fly_patch = fly_patch.recenter(crop_size, crop_size, body_center)

    # extract contours of the patch
    contours = find_contours(fly_patch.img, fly_patch.mask, type='wings')

    # mask out everything except the contour containing the fly
    fly_center = (crop_size // 2, crop_size // 2)
    for contour in sorted(contours, key=lambda x: len(x)):
        if in_contour(fly_center, contour):
            # create new mask for fly region (still keeping circular ROI)
            fly_mask = mask_from_contour(fly_patch.img, contour)
            fly_patch.mask = cv2.bitwise_and(fly_patch.mask, fly_mask)

            # apply mask to the fly_patch image
            fly_patch.img = cv2.bitwise_and(fly_patch.img,
                                            fly_patch.img,
                                            mask=fly_patch.mask)
            break
    else:
        # return None if no contour contains the fly center
        return None

    # orient fly vertically
    fly_patch = fly_patch.orient('vertical', rotate_angle=rotate_angle)

    # return the resulting fly patch
    return fly_patch
def load_data():
    X = []
    y = []

    for anno in tqdm(get_annotations()):
        img = cv2.imread(anno.image_path, 0)

        mask = img_to_mask(img)
        contours = find_contours(img, mask=mask, type='core')

        for contour in contours:
            label = contour_label(anno, contour)
            if label in ['male', 'female']:
                label = 'one'
            if label is not None:
                X.append(make_features(contour))
                y.append(CATEGORIES.index(label))

    # assemble features
    X = np.array(X, dtype=float)

    # assemble labels
    y = np.array(y, dtype=int)

    report_labels_classification(y, CATEGORIES)

    return X, y
Example #3
0
def load_data():
    X = []
    y = []

    img_count = 0

    for anno in tqdm(get_annotations()):
        img = cv2.imread(anno.image_path, 0)

        mask = img_to_mask(img)
        contours = find_contours(img, mask=mask, type='core')

        male = None
        female = None
        ok = True

        # sift through contours to find one corresponding to male and the other to female
        for contour in contours:
            label = contour_label(anno, contour)
            if label == 'male':
                if male is None:
                    male = contour
                else:
                    ok = False
                    anno.warn('Found two males, skipping...')
                    break
            elif label == 'female':
                if female is None:
                    female = contour
                else:
                    ok = False
                    anno.warn('Found two females, skipping...')
                    break

        if (male is not None) and (female is not None) and ok:
            male_patch = crop_to_contour(img, male)
            female_patch = crop_to_contour(img, female)

            X.append(make_features(male, female, male_patch, female_patch))
            y.append(CATEGORIES.index('mf'))

            X.append(make_features(female, male, female_patch, male_patch))
            y.append(CATEGORIES.index('fm'))

            img_count += 1

    # assemble features
    X = np.array(X, dtype=float)

    # assemble labels
    y = np.array(y, dtype=int)

    print('Used {} annotated images.'.format(img_count))
    report_labels_classification(y, CATEGORIES)

    return X, y
Example #4
0
def main():
    # get first annotation
    anno = next(get_annotations())
    image_path = anno.image_path

    # read the image
    img = cv2.imread(image_path, 0)

    # extract contours
    mask = img_to_mask(img)
    contours = find_contours(img, mask=mask, type='core')

    # pick out the largest contour
    contour = largest_contour(contours)

    # crop and rotate
    patch = crop_to_contour(img, contour)
    patch = patch.rotate(patch.estimate_angle() + np.pi / 2)

    # display result
    plt.imshow(patch.img)
    plt.show()
Example #5
0
def main():
    # parse command-line arguments
    parser = ArgumentParser()
    parser.add_argument('--no_display', action='store_true')
    parser.add_argument('--write_video', action='store_true')
    parser.add_argument('-i', '--input', type=str, default='test4.mp4')
    parser.add_argument('-o', '--output', type=str, default='output.avi')
    args = parser.parse_args()

    # prepare video
    cap, props = read_video(args.input)
    _, img = cap.read()
    img = img[:, :, 0]
    mask = img_to_mask(img)

    if args.write_video:
        video_writer = write_video(args.output, props)

    # load predictors
    is_fly_predictor = IsFlyPredictor()
    id_predictor = IdPredictor()
    pose_predictor = {type: PosePredictor(type) for type in ['male', 'female']}
    wing_predictor = WingPredictor()

    # display-specific actions
    if not args.no_display:
        open_window()
        colors = {
            'female': (255, 0, 0),
            'male': (0, 0, 255),
            'both': (255, 0, 255),
            'neither': None
        }

    frames = 0
    tick = perf_counter()
    prof = Profiler()

    while True:
        frame_start = perf_counter()

        # read frame
        prof.tick('I/O')
        ok, img = cap.read()
        prof.tock('I/O')

        if not ok:
            break

        if not args.no_display:
            out = img.copy()

        img = img[:, :, 0]

        frames += 1

        # extract contours
        prof.tick('Find fly contours')
        contours = find_contours(img, mask=mask, type='core')

        # draw contours with a color associated with the class
        contours_by_label = {'neither': [], 'one': [], 'both': []}

        # sort contours into bins: zero flies, one fly and two flies
        for contour in contours:
            label = is_fly_predictor.predict(contour)
            contours_by_label[label].append(contour)

        prof.tock('Find fly contours')

        results = {}
        if len(contours_by_label['one']) == 2 and len(
                contours_by_label['both']) == 0:
            prof.tick('ID as male/female')

            contour_1 = contours_by_label['one'][0]
            contour_2 = contours_by_label['one'][1]

            patch_1 = crop_to_contour(img, contour_1)
            patch_2 = crop_to_contour(img, contour_2)

            label = id_predictor.predict(contour_1, contour_2, patch_1,
                                         patch_2)

            if label == 'mf':
                results['male'] = dict(contour=contour_1, patch=patch_1)
                results['female'] = dict(contour=contour_2, patch=patch_2)
            elif label == 'fm':
                results['female'] = dict(contour=contour_1, patch=patch_1)
                results['male'] = dict(contour=contour_2, patch=patch_2)

            prof.tock('ID as male/female')

            prof.tick('Determine orientation')

            for type in ['male', 'female']:
                result = results[type]
                (cx, cy), angle = pose_predictor[type].predict(result['patch'])
                result.update(dict(cx=cx, cy=cy, angle=angle))

            prof.tock('Determine orientation')

            # predict wing angle
            prof.tick('Determine wing angles')
            patch_m = male_fly_patch(
                img, mask, (results['male']['cx'], results['male']['cy']),
                results['male']['angle'])

            if patch_m is not None:
                wing_angle_right, wing_angle_left = wing_predictor.predict(
                    patch_m)
                if wing_angle_right is not None:
                    results['male']['wing_angle_right'] = wing_angle_right
                if wing_angle_left is not None:
                    results['male']['wing_angle_left'] = wing_angle_left

            prof.tock('Determine wing angles')
        elif len(contours_by_label['one']) == 0 and len(
                contours_by_label['both']) == 1:
            results['both'] = dict(contour=contours_by_label['both'][0])
            results['both']['patch'] = crop_to_contour(
                img, results['both']['contour'])
            results['both']['cx'], results['both']['cy'] = results['both'][
                'patch'].estimate_center(absolute=True)
        else:
            print('Unexpected case.')
            continue

        # when profiling, skip the drawing steps
        if args.no_display:
            continue

        # illustrate the results
        for label, result in results.items():
            # draw outline
            cv2.drawContours(out, [result['contour']], -1, colors[label], 3)

            if label not in ['male', 'female']:
                continue

            # draw center
            center = bound_point((result['cx'], result['cy']), out)
            cv2.circle(out, center, 5, colors[label], -1)

            # draw arrow in direction of orientation
            MA, ma = result['patch'].estimate_axes()
            arrow_from_point(out, center, 0.3 * MA, result['angle'],
                             colors[label])

            if label == 'male':
                if 'wing_angle_right' in result:
                    arrow_angle = result['angle'] + result[
                        'wing_angle_right'] - np.pi
                    arrow_from_point(out, center, 0.3 * MA, arrow_angle,
                                     (255, 255, 0))
                if 'wing_angle_left' in result:
                    arrow_angle = result['angle'] - result[
                        'wing_angle_left'] - np.pi
                    arrow_from_point(out, center, 0.3 * MA, arrow_angle,
                                     (255, 255, 0))

        # display image
        show_image(out, downsamp=2)

        if args.write_video:
            video_writer.write(out)

        # figure out how much extra time the GUI should wait before proceeding
        t_frame_ms = 1e3 * (perf_counter() - frame_start)
        t_extra_ms = int(round(props.t_ms - t_frame_ms))

        # handle GUI tasks
        key = cv2.waitKey(max(t_extra_ms, 1))

        # process input keys
        if key == ord('q'):
            break

    tock = perf_counter()

    prof.stop()

    if args.write_video:
        video_writer.release()

    print('Total frames: {}'.format(frames))
    print('Elapsed time: {}'.format(tock - tick))
    print('Throughput: {:0.3f}'.format(frames / (tock - tick)))
def load_data(tol_radians=0.1):
    X = {'male': [], 'female': []}
    y = {'male': [], 'female': []}

    hog = make_hog()

    img_count = 0
    hand_labeled_count = 0

    for anno in tqdm(get_annotations()):
        # keep track of whether any data is actually used from this file
        used_file = False

        img = cv2.imread(anno.image_path, 0)

        mask = img_to_mask(img)
        contours = find_contours(img, mask=mask, type='core')

        for contour in contours:
            type = contour_label(anno, contour)

            if type == 'male' and anno.has('ma') and anno.has('mh'):
                type = 'male'
                head = anno.get('mh')[0]
                abdomen = anno.get('ma')[0]
            elif type == 'female' and anno.has('fa') and anno.has('fh'):
                type = 'female'
                head = anno.get('fh')[0]
                abdomen = anno.get('fa')[0]
            else:
                continue

            # make patch, which will compute the angle from the image
            patch = crop_to_contour(img, contour)

            # compute angle from labels
            label_angle = np.arctan2(abdomen[1] - head[1],
                                     head[0] - abdomen[0])

            # find out if the image is flipped or not
            diff = abs(angle_diff(patch.estimate_angle(), label_angle))

            if diff <= tol_radians:
                label = 'normal'
            elif np.pi - tol_radians <= diff <= np.pi + tol_radians:
                label = 'flipped'
            else:
                anno.warn(
                    'Could not properly determine whether image is flipped (diff={:0.1f} degrees)'
                    .format(degrees(diff)))
                continue

            # orient patch vertically
            hog_patch = make_hog_patch(patch)

            # add original data
            X[type].append(patch_to_features(hog_patch, hog))
            y[type].append(CATEGORIES.index(label))

            hand_labeled_count += 1
            used_file = True

            # augment data by flipping the image and inverting the label
            hog_patch_flipped = hog_patch.rotate180()
            label_flipped = CATEGORIES[1 - CATEGORIES.index(label)]

            # add this additional feature
            X[type].append(patch_to_features(hog_patch_flipped, hog))
            y[type].append(CATEGORIES.index(label_flipped))

        if used_file:
            img_count += 1

    # assemble features
    X = {k: np.array(v, dtype=float) for k, v in X.items()}

    # assemble labels
    y = {k: np.array(v, dtype=int) for k, v in y.items()}

    print('Used {} annotated images.'.format(img_count))
    print('Used {} hand-labeled flies.'.format(hand_labeled_count))
    print()

    print('Male classifier:')
    report_labels_classification(y['male'], CATEGORIES)
    print()

    print('Female classifier:')
    report_labels_classification(y['female'], CATEGORIES)

    return X, y
Example #7
0
def load_data(tol_radians=0.1):
    hog = make_hog()

    X = []
    y = []

    img_count = 0
    hand_labeled_count = 0

    for anno in tqdm(get_annotations()):
        img = cv2.imread(anno.image_path, 0)

        mask = img_to_mask(img)
        contours = find_contours(img, mask=mask, type='core')

        for contour in contours:
            type = contour_label(anno, contour)
            if type == 'male':
                break
        else:
            continue

        if anno.count('mw') == 2 and anno.has('mh') and anno.has(
                'ma') and anno.has('mp2'):
            mh = anno.get('mh')[0]
            ma = anno.get('ma')[0]
            mp2 = anno.get('mp2')[0]
        else:
            continue

        # make patch, which will compute the angle from the image
        body_patch = crop_to_contour(img, contour)

        # compute angle from labels
        label_angle = np.arctan2(ma[1] - mh[1], mh[0] - ma[0])

        # find out if the image is flipped or not
        rotate_angle = body_patch.estimate_angle()
        diff = abs(angle_diff(rotate_angle, label_angle))

        if diff <= tol_radians:
            pass
        elif np.pi - tol_radians <= diff <= np.pi + tol_radians:
            rotate_angle = rotate_angle + np.pi
        else:
            anno.warn(
                'Could not properly determine whether image is flipped (diff={:0.1f} degrees)'
                .format(degrees(diff)))
            continue

        # find center of fly
        body_center = body_patch.estimate_center(absolute=True)

        # create patch centered on fly
        fly_patch = male_fly_patch(img, mask, body_center, rotate_angle)
        if fly_patch is None:
            continue

        origin = bound_point((body_center[0], body_center[1]), img)
        mp2_rel = [mp2[0] - origin[0], origin[1] - mp2[1]]

        rot_mat = get_rotation_matrix(np.pi / 2 - rotate_angle)
        mp2_rot = rot_mat.dot(mp2_rel)

        wings = []
        for mw in anno.get('mw'):
            wing_rel = [mw[0] - origin[0], origin[1] - mw[1]]
            wing_rot = rot_mat.dot(wing_rel) - mp2_rot
            angle = np.arctan(abs(wing_rot[0]) / abs(wing_rot[1]))

            wings.append({'x': wing_rot[0], 'angle': angle})
        if len(wings) != 2:
            anno.warn('Length of wings is not 2 for some reason, skipping...')
            continue

        if wings[0]['x'] > wings[1]['x']:
            # wing 0 is right, wing 1 is left
            right_wing_angle = wings[0]['angle']
            left_wing_angle = wings[1]['angle']
        else:
            # wing 1 is right, wing 0 is left
            right_wing_angle = wings[1]['angle']
            left_wing_angle = wings[0]['angle']

        # create a hog patches for both wings
        data = []
        data.append({
            'hog_patch': make_hog_patch(fly_patch),
            'angle': right_wing_angle
        })
        data.append({
            'hog_patch': make_hog_patch(fly_patch.flip('horizontal')),
            'angle': left_wing_angle
        })

        # add data to X and y
        for datum in data:
            X.append(patch_to_features(datum['hog_patch'], hog))
            y.append(datum['angle'])
            hand_labeled_count += 1

            if DEBUG:
                plt.imshow(datum['hog_patch'].img)
                plt.show()

        # increment img_count to indicate that this file was actually used
        img_count += 1

    # assemble features
    X = np.array(X, dtype=float)

    # assemble labels
    y = np.array(y, dtype=float)

    print('Used {} annotated images.'.format(img_count))
    print('Used {} hand-labeled wings.'.format(hand_labeled_count))

    report_labels_regression(np.degrees(y),
                             filename=get_file('output', 'graphs',
                                               'labels_wing.eps'),
                             units='Wing Angle (degrees)')

    return X, y