def _stitch_images(image_stitching_config):
    for data in image_stitching_config.data:
        # Load the corner locations
        img1_corners = np.load(data.corner_locs_1)["corner_locations"]
        img2_corners = np.load(data.corner_locs_2)["corner_locations"]

        # Read in the input images
        img1 = etai.read(data.image_1)
        img2 = etai.read(data.image_2)

        # Compute HOG feature vectors for every detected corner
        hog_features_1 = _get_HOG_descriptors(img1_corners, img1)
        hog_features_2 = _get_HOG_descriptors(img2_corners, img2)

        # Match the feature vectors
        img_1_pts, img_2_pts = _match_keypoints(hog_features_1, hog_features_2,
                                                img1_corners, img2_corners,
                                                img1, img2)

        # Tune this parameter in "requests/image_stitching_request.json"
        # to specify the number of corresponding points to use when computing
        # the homography matrix
        no_correspondence = image_stitching_config.parameters.no_correspondence

        # Compute the homography matrix that relates image 1 and image 2
        H = _get_homography(img_1_pts[1:no_correspondence + 1],
                            img_2_pts[1:no_correspondence + 1])

        # Stitching the images by applying the homography matrix to image 2
        final_img = _overlap(img1, img2, H)

        # Write the final stitched image
        etai.write(final_img, data.stitched_image)
Пример #2
0
def draw_labeled_image(sample, label_fields, outpath, annotation_config=None):
    """Draws an annotated version of the image sample with its label field(s)
    overlaid to disk.

    Args:
        sample: a :class:`fiftyone.core.sample.Sample` instance
        label_fields: the list of :class:`fiftyone.core.labels.ImageLabel`
            fields to render
        outpath: the path to write the annotated image
        annotation_config (None): an :class:`AnnotationConfig` specifying how
            to render the annotations
    """
    if annotation_config is None:
        annotation_config = _DEFAULT_ANNOTATION_CONFIG

    image_labels = etai.ImageLabels()
    for label_field in label_fields:
        label = sample[label_field]
        if label is None:
            continue

        image_labels.merge_labels(label.to_image_labels(name=label_field))

    img = etai.read(sample.filepath)

    anno_img = etaa.annotate_image(img,
                                   image_labels,
                                   annotation_config=annotation_config)

    etai.write(anno_img, outpath)
Пример #3
0
def _perform_canny_edge_detection(canny_edge_config):
    for data in canny_edge_config.data:
        in_img = etai.read(data.input_image)
        sobel_horiz = np.load(data.sobel_horizontal_result)["filtered_matrix"]
        sobel_vert = np.load(data.sobel_vertical_result)["filtered_matrix"]
        (g_intensity, orientation) = _create_intensity_orientation_matrices(
            sobel_horiz, sobel_vert)
        if data.gradient_intensity is not None:
            etai.write(g_intensity, data.gradient_intensity)
        if data.gradient_orientation is not None:
            etai.write(orientation, data.gradient_orientation)
        np.save('out/gradient_orientation.npy', orientation)
        etai.write(g_intensity, 'out/g_intensity.jpg')
        g_suppressed = _non_maximum_suppression(g_intensity, orientation,
                                                in_img)
        etai.write(g_suppressed, 'out/g_suppressed.jpg')
        g_thresholded = _double_thresholding(
            g_suppressed, canny_edge_config.parameters.low_threshold,
            canny_edge_config.parameters.high_threshold)
        g_strong = _hysteresis(g_thresholded,
                               canny_edge_config.parameters.low_threshold,
                               canny_edge_config.parameters.high_threshold)
        g_strong = g_strong.astype(int)
        #sio.savemat('/home/zixu/canny.mat',dict([('in_img',in_img),('sobel_horiz',sobel_horiz),('sobel_vert',sobel_vert),('g_intensity',g_intensity),('orientation',orientation),('g_suppressed',g_suppressed),('g_thresholded',g_thresholded),('g_strong',g_strong)]))
        etai.write(g_strong, data.image_edges)
Пример #4
0
def q1():
    img = etai.read('veggie-stand.jpg')
    #first compute the superpixels on the image we loaded
    S, C = get_superpixel(img, 230)
    display_save(img, S, 'q1')
    # compute histograms on a superpixel

    v = histvec(img, S == 115, 10)

    # show mask section
    img2 = img.copy()
    img2[S != 115] = 0
    plt.figure()
    plt.imshow(img2)
    plt.savefig('q1_mask.png')
    plt.close()

    # plot and compare
    plt.figure()
    plt.subplot(131)
    plt.bar(np.arange(len(v)), v)

    plt.title("Student_output")
    solution_hist = np.load('q1_histogram.npy')
    plt.subplot(132)
    plt.bar(np.arange(len(solution_hist)), solution_hist)

    plt.title("Solution_output")
    plt.subplot(133)
    plt.plot(np.arange(len(v)), abs(v - solution_hist))
    plt.title("Error")
    #plt.show()
    plt.savefig('q1_result.png')
    plt.close()
Пример #5
0
def test_SVHN(train_PCA, train_mean, train_PCA_socre, train_label, test_box,
              test_data_path, num_neighbor, num_to_test):
    print("\n****** Start testing SVHN dataset with " + str(num_neighbor) +
          " nearest neighbors ******")
    Img_data_all = test_box.getAllDigitStructure_ByDigit()
    print("****** Loaded " + str(len(Img_data_all)) +
          " images from SVHN dataset ******")
    #initialize some values for analysis
    num_total_test = 0
    num_false_class = 0
    test_histogram = []
    test_false_histogram = []
    if num_to_test == -1:
        num_to_test = len(Img_data_all)

    for cur_img_data in Img_data_all[:num_to_test]:
        cur_img_gray = etai.read(test_data_path + "/" +
                                 cur_img_data['filename'],
                                 flag=cv2.IMREAD_GRAYSCALE)
        height, width = cur_img_gray.shape
        cur_num_bbox = len(cur_img_data['boxes'])
        num_total_test += cur_num_bbox
        for j in range(cur_num_bbox):
            #check bounding box does not lay outside of image
            cur_box = cur_img_data['boxes'][j]
            x_idx_1 = int(max(cur_box['left'], 0))
            x_idx_2 = int(min(cur_box['left'] + cur_box['width'], width))
            y_idx_1 = int(max(cur_box['top'], 0))
            y_idx_2 = int(min(cur_box['top'] + cur_box['height'], height))
            #extract bounded image and resize to 28*28
            box_img = cur_img_gray[y_idx_1:y_idx_2, x_idx_1:x_idx_2]
            box_img_resize = resize_SVHN_img(box_img)
            #test with knn
            box_class = kNN(box_img_resize, train_PCA, train_PCA_socre,
                            train_mean, train_label, num_neighbor)
            if box_class == 0:
                box_class = 10
            #save histogram
            cur_box_hist = cur_box
            cur_box_hist['filename'] = cur_img_data['filename']
            cur_box_hist['classify'] = box_class
            test_histogram.append(cur_box_hist)
            if box_class != cur_box['label']:
                num_false_class += 1
                test_false_histogram.append(cur_box_hist)

    Error_rate = num_false_class / num_total_test
    print("****** Finish testing SVHN dataset. Totoal " + str(num_total_test) +
          " images tested. " + str(num_total_test - num_false_class) +
          " images are labeled correctly. Error rate = " +
          str(Error_rate * 100) + "% ******")
    return Error_rate, test_histogram, test_false_histogram
Пример #6
0
def test_SVHN_sample(train_PCA, train_mean, train_PCA_socre, train_label,
                     test_box, test_data_path, num_neighbor):
    print("\n****** Start testing SVHN dataset with " + str(num_neighbor) +
          " nearest neighbors ******")
    Img_data_all = test_box.getAllDigitStructure_ByDigit()
    print("****** Loaded " + str(len(Img_data_all)) +
          " images from SVHN dataset ******")
    false_sample = False
    correct_sample = False
    for cur_img_data in Img_data_all:
        cur_img_gray = etai.read(test_data_path + "/" +
                                 cur_img_data['filename'],
                                 flag=cv2.IMREAD_GRAYSCALE)
        height, width = cur_img_gray.shape
        cur_num_bbox = len(cur_img_data['boxes'])
        for j in range(cur_num_bbox):
            #check bounding box does not lay outside of image
            cur_box = cur_img_data['boxes'][j]
            x_idx_1 = int(max(cur_box['left'], 0))
            x_idx_2 = int(min(cur_box['left'] + cur_box['width'], width))
            y_idx_1 = int(max(cur_box['top'], 0))
            y_idx_2 = int(min(cur_box['top'] + cur_box['height'], height))
            #extract bounded image and resize to 28*28
            box_img = cur_img_gray[y_idx_1:y_idx_2, x_idx_1:x_idx_2]
            box_img_resize = resize_SVHN_img(box_img)
            #test with knn
            cur_class = kNN(box_img_resize, train_PCA, train_PCA_socre,
                            train_mean, train_label, num_neighbor)
            if cur_class == 0:
                cur_class = 10
            if cur_class != cur_box['label'] and false_sample == False:
                F_img = box_img
                F_label = [cur_class, int(cur_box['label'])]
                false_sample = True
            elif cur_class == cur_box['label'] and correct_sample == False:
                T_img = box_img
                T_label = [cur_class, int(cur_box['label'])]
                correct_sample = True
            if correct_sample and false_sample:
                break
        if correct_sample and false_sample:
            break

    _, axs = plt.subplots(ncols=2, nrows=1)
    axs[0].imshow(T_img, cmap='gray')
    axs[0].set_title("True Class: " + str(T_label[1]) + " ,Classified as " +
                     str(T_label[0]))
    axs[1].imshow(F_img, cmap='gray')
    axs[1].set_title("True Class: " + str(F_label[1]) + " ,Classified as " +
                     str(F_label[0]))
    plt.show()
Пример #7
0
def _find_line_segments(find_segments_config):
    for data in find_segments_config.data:
        in_img = etai.read(data.input_image)
        gradient_orientation = np.load("out/gradient_orientation.npy")
        full_segment = _hough_line_seg(in_img, gradient_orientation)
        #print(data)
        temp_dict_list = []
        temp = defaultdict()
        for i in range(full_segment.shape[0]):
            temp = defaultdict()
            temp["No"] = i + 1
            temp["coordinates"] = [(full_segment[i, 0], full_segment[i, 1]),
                                   (full_segment[i, 2], full_segment[i, 3])]
            temp_dict_list.append(temp)
        segment_out = defaultdict(lambda: defaultdict())
        segment_out["Line_segments"] = temp_dict_list

        etas.write_json(segment_out, data.line_segments)
Пример #8
0
def _perform_convolution(convolution_config):
    '''Performs convolution of an input image with a kernel specified
    by the configuration parameters, and writes the result to the
    path specified by "filtered_image".

    Args:
        convolution_config: the configuration file for the module
    '''
    kernel_type = convolution_config.parameters.kernel_type
    if kernel_type == "x_derivative":
        kernel = _create_x_derivative_kernel()
    elif kernel_type == "y_derivative":
        kernel = _create_y_derivative_kernel()
    elif kernel_type == "sobel_vertical":
        kernel = _create_sobel_vertical_kernel()
    elif kernel_type == "sobel_horizontal":
        kernel = _create_sobel_horizontal_kernel()
    else:
        # this will be the Gaussian kernel
        # (make sure to remove this comment!)
        kernel = _create_gaussian_kernel(
            convolution_config.parameters.gaussian_sigma)

    for data in convolution_config.data:
        in_img = etai.read(data.input_image)
        if convolution_config.parameters.image_type == "grayscale":
            in_img = etai.rgb_to_gray(in_img)
        else:
            # if the image should be a color image, convert the grayscale
            # image to color (simply converts the image into a 3-channel
            # image)
            if etai.is_gray(in_img):
                in_img = etai.gray_to_rgb(in_img)
        if convolution_config.parameters.image_max_range == 1:
            in_img = (in_img.astype(float)) / 255.0

        filtered_image = _convolve(kernel, in_img)
        if data.filtered_matrix:
            etau.ensure_basedir(data.filtered_matrix)
            np.savez(data.filtered_matrix, filtered_matrix=filtered_image)
        if data.filtered_image:
            etau.ensure_basedir(data.filtered_image)
            etai.write(filtered_image, data.filtered_image)
Пример #9
0
def embed_image(impath):
    '''Embeds the image using VGG-16 and stores the embeddeding as an .npz file
    on disk.

    Args:
        impath: path to an image to embed
    '''
    img = etai.read(impath)
    rimg = etai.resize(img, 224, 224)

    vgg16 = etav.VGG16()
    embedded_vector = vgg16.evaluate([rimg], layer=vgg16.fc2l)[0]

    logger.info("Image embedded to vector of length %d", len(embedded_vector))
    logger.info("%s", embedded_vector)

    outpath = _abspath("out/result_embed_image.npz")
    etau.ensure_basedir(outpath)
    np.savez_compressed(outpath, v=embedded_vector)
    logger.info("Result saved to '%s'", outpath)
Пример #10
0
def embed_image(impath):
    '''Embeds the image using VGG-16 and stores the embeddeding as an .npz file
    on disk, using VideoFeaturizer to handle I/O.

    Args:
        impath: path to an image to embed
    '''
    img = etai.read(impath)

    # Invoke the Featurizer using the "with" syntax to automatically handle
    # calling the start() and stop() methods
    with VGG16Featurizer() as vfeaturizer:
        embedding = vfeaturizer.featurize(img)

    logger.info("Image embedded to vector of length %d", len(embedding))
    logger.info("%s", embedding)

    outpath = _abspath("out/result_embed_image.npz")
    etau.ensure_basedir(outpath)
    np.savez_compressed(outpath, v=embedding)
    logger.info("Result saved to '%s'", outpath)
Пример #11
0
def example(file_name):
    # Uncomment the images according to the question
    img = etai.read(file_name)[:, :, :3]
    file_name_w_ext = file_name.split('.')[0]

    # first compute the superpixels on the image we loaded
    S, C = get_superpixel(img, 180)
    display_save(img, S, file_name_w_ext)
    # next compute the feature reduction for the segmentation (histograms)
    hist_values = img_reduce(img, S, C)
    print(
        'Please click on the superpixel you want to be the key \n on which to base the foreground extraction.\n\n'
    )
    select_keyindex = True
    if select_keyindex:

        key_coordinates = get_correspondences(img)[0]
        x = int(key_coordinates[0])
        y = int(key_coordinates[1])
        keyindex = S[y, x]

    # Perform graph-cut
    output_student = graphcut(S, C, hist_values, keyindex)
    plt.figure()
    plt.subplot(131)
    img2 = img.copy()
    img2[S != keyindex] = 0
    plt.imshow(img2)
    plt.title("keyindex")
    plt.subplot(132)
    plt.imshow(output_student)
    plt.title("graphcut mask")
    plt.subplot(133)
    out_img = img.copy()
    out_img[output_student != 1] = 255
    plt.imshow(mark_boundaries(out_img, S))
    plt.title("Superpixels and fg")
    #plt.show()
    plt.savefig(file_name_w_ext + '_result.png')
    plt.close()
Пример #12
0
def q2():
    img = etai.read('porch1.png')[:, :, :3]
    #first compute the superpixels on the image we loaded
    S, C = get_superpixel(img, 300)
    display_save(img, S, 'q2')
    #compute adjacency matrix
    student_A = seg_neighbor(S)
    print(ave_deg(student_A))
    solution_A = np.load("Adjacency.npy")
    # plot and compare
    plt.figure()
    plt.subplot(131)
    plt.imshow(student_A)
    plt.title("student output")
    plt.subplot(132)
    plt.imshow(solution_A)
    plt.title("solution output")
    plt.subplot(133)
    plt.imshow(student_A - solution_A)
    plt.title("Error")
    #plt.show()
    plt.savefig('q2_result.png')
    plt.close()
Пример #13
0
def _find_corners(harris_corner_config):
    for data in harris_corner_config.data:
        sobel_horiz = np.load(data.sobel_horizontal_result)["filtered_matrix"]
        sobel_vert = np.load(data.sobel_vertical_result)["filtered_matrix"]
        corner_response, corner_locations = _get_harris_corner(
            sobel_horiz, sobel_vert,
            harris_corner_config.parameters.window_half_size,
            harris_corner_config.parameters.threshold)
        corner_locs_after_sup = non_max_suppression(
            corner_response, harris_corner_config.parameters.non_max_radius)
        if data.corner_locations:
            etau.ensure_basedir(data.corner_locations)
            np.savez(data.corner_locations,
                     corner_locations=corner_locs_after_sup)
        if data.corners_img_before_sup or data.corners_img_after_sup:
            in_img = etai.read(data.input_image)
            if data.corners_img_before_sup:
                corners_viz_before_sup = _visualize_corners(
                    in_img, corner_locations)
                etai.write(corners_viz_before_sup, data.corners_img_before_sup)
            if data.corners_img_after_sup:
                corners_viz_after_sup = _visualize_corners(
                    in_img, corner_locs_after_sup)
                etai.write(corners_viz_after_sup, data.corners_img_after_sup)
Пример #14
0
def draw_labeled_image(sample,
                       outpath,
                       label_fields=None,
                       annotation_config=None):
    """Draws an annotated version of the image sample with its label field(s)
    overlaid to disk.

    Args:
        sample: a :class:`fiftyone.core.sample.Sample` instance
        outpath: the path to write the annotated image
        label_fields (None): a list of :class:`fiftyone.core.labels.ImageLabel`
            fields to render. If omitted, all compatiable fields are rendered
        annotation_config (None): an :class:`AnnotationConfig` specifying how
            to render the annotations
    """
    if label_fields is None:
        label_fields = _get_image_label_fields(sample)

    if annotation_config is None:
        annotation_config = AnnotationConfig.default()

    image_labels = etai.ImageLabels()
    for label_field in label_fields:
        label = sample[label_field]
        if label is None:
            continue

        image_labels.merge_labels(label.to_image_labels(name=label_field))

    img = etai.read(sample.filepath)

    anno_img = etaa.annotate_image(img,
                                   image_labels,
                                   annotation_config=annotation_config)

    etai.write(anno_img, outpath)
Пример #15
0
def q3():
    img = etai.read('flower1.jpg')
    # first compute the superpixels on the image we loaded
    S, C = get_superpixel(img, 180)
    display_save(img, S, 'q3')
    # next compute the feature reduction for the segmentation (histograms)
    hist_values = img_reduce(img, S, C)
    print(
        'Please click on the superpixel you want to be the key \n on which to base the foreground extraction.\n\n'
    )
    select_keyindex = True
    if select_keyindex:

        key_coordinates = get_correspondences(img)[0]
        x = int(key_coordinates[0])
        y = int(key_coordinates[1])
        keyindex = S[y, x]

    # Perform graph-cut
    output_student = graphcut(S, C, hist_values, keyindex, False)
    # plot and compare
    solution_output = np.load("solution_q3_mask.npy")

    plt.figure()
    plt.subplot(131)
    plt.imshow(output_student)
    plt.title("student output")
    plt.subplot(132)
    plt.imshow(solution_output)
    plt.title("solution output")
    plt.subplot(133)
    plt.imshow(output_student - solution_output)
    plt.title("Error")
    #plt.show()
    plt.savefig('q3_result.png')
    plt.close()
Пример #16
0
def main(n):
    '''
    This function will find the homography matrix and then use it to find corresponding marker in football image 2
    '''
    # reading the images
    img1 = etai.read('football1.jpg')
    img2 = etai.read('football2.jpg')

    filepath = 'football_pts_' + str(n) + '.npy'
    # get n corresponding points
    if not os.path.exists(filepath):
        get_correspondences(img1, img2, n)

    correspondence_pts = np.load(filepath)

    XY1 = correspondence_pts[0]
    XY2 = correspondence_pts[1]
    # plotting the Fooball image 1 with marker 33
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.imshow(img1)
    u = [1210, 1701]
    v = [126, 939]
    ax.plot(u, v, color='yellow')
    ax.set(title='Football image 1')
    plt.xlim(0, img1.shape[1])
    plt.ylim(0, img1.shape[0])
    plt.gca().invert_yaxis()
    plt.show()
    #------------------------------------------------
    # FILL YOUR CODE HERE
    # Your code should estimate the homogrphy and draw the
    # corresponding yellow line in the second image.

    yVec = np.ones((3 * n))  # vector y
    aMat = np.zeros((3 * n, 9))  # Matrix A
    for i in range(n):
        # form matrix a and y for LR
        yVec[3 * i] = XY2[i, 0]
        yVec[3 * i + 1] = XY2[i, 1]
        aMat[3 * i, 0:2] = XY1[i, :]
        aMat[3 * i, 2] = 1
        aMat[3 * i + 1, 3:5] = XY1[i, :]
        aMat[3 * i + 1, 5] = 1
        aMat[3 * i + 2, 6:8] = XY1[i, :]
        aMat[3 * i + 2, 8] = 1

    #solve for x*=argmin||Ax-y||_2
    xVec = np.matmul(
        np.matmul(np.linalg.inv((np.matmul(aMat.T, aMat))), aMat.T), yVec)

    #Form homogenous transformation matrix
    tMat = np.zeros((3, 3))
    tMat[0, :] = xVec[0:3]
    tMat[1, :] = xVec[3:6]
    tMat[2, :] = xVec[6:9]

    line1MatHomo = np.array([[u[0], u[1]], [v[0], v[1]], [1, 1]], dtype=float)

    # calculate corresponding points
    line2MatHomo = np.matmul(tMat, line1MatHomo)

    # check if new line go out of image
    '''
    if line2MatHomo[1,1] > img2.shape[0]:
        y_diff_all = line2MatHomo[1,1]-line2MatHomo[1,0]
        x_diff_all = line2MatHomo[0,1]-line2MatHomo[0,0]
        y_diff = line2MatHomo[1,1]-img2.shape[0]
        x_diff = (x_diff_all/y_diff_all)*y_diff
        #print(x_diff)
        line2MatHomo[1,1] = img2.shape[0]
        line2MatHomo[0,1] = line2MatHomo[0,1]-x_diff
    '''
    u2Vec = line2MatHomo[0, :]
    v2Vec = line2MatHomo[1, :]

    # plotting the Fooball image 1 with marker 33
    fig2 = plt.figure()
    ax2 = fig2.add_subplot(111)
    ax2.imshow(img2)
    ax2.plot(u2Vec, v2Vec, color='yellow')
    ax2.set(title='Football image 2')
    ax2.set_adjustable('box-forced')
    plt.xlim(0, img2.shape[1])
    plt.ylim(0, img2.shape[0])
    plt.gca().invert_yaxis()
    plt.show()
Пример #17
0
    def _parse_image(self, image_or_path):
        if etau.is_str(image_or_path):
            return etai.read(image_or_path)

        return np.asarray(image_or_path)
Пример #18
0
    def get_image(self):
        image_or_path = self.current_sample
        if etau.is_str(image_or_path):
            return etai.read(image_or_path)

        return np.asarray(image_or_path)
Пример #19
0
 def get_image(self):
     return etai.read(self.current_sample.filepath)
Пример #20
0
#  this needs to be rewritten.
# note that the last part is just swapping axes so that HWC --> CHW
transform = BaseTransform(net.size, rgb_means, rgb_std, (2, 0, 1))
object_detector = ObjectDetector(net, detector, transform, num_classes, True,
                                 300, 0.1)

print('Loading image..')
# Noting that the mean in this code is given in [0,255] range, but then the
# stdev is 1, 1, 1, which doesn't make great sense to me.
# There is no documentation about the expectation manipulation of the data...
#
# the etai.read will read [0, 255] but the etai.to_double will map to [0,1]
#
# the baseTransform in this library will do all the resizing and datatype
# conversion
image = etai.read('http://images.cocodataset.org/val2017/000000252219.jpg')
#image = etai.read('http://images.cocodataset.org/val2017/000000397133.jpg')
#image = etai.read('http://images.cocodataset.org/val2017/000000037777.jpg')
# I just did the following to explore the possibility that none of the persons
# in the above images were detected because of the data format issue mainly.
# It did not seem to be the case.
#image = etai.rgb_to_bgr(image)
# Let's get a VOC image
#image = etai.read('/scratch/jason-data/voc2007/VOCdevkit/VOC2007/JPEGImages/000001.jpg')

image = etai.to_float(image)

print('running the prediction')
with torch.no_grad():
    detect_bboxes = object_detector.predict(image)
Пример #21
0
from collections import defaultdict
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import

import cv2
import gzip
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
import sys

from dig_struct import *

from eta.core.config import Config, ConfigError
import eta.core.image as etai
import eta.core.module as etam
import eta.core.serial as etas
import scipy.io as sio

import matplotlib.pyplot as plt

base_svhn_path = "../data/test"
dsf = DigitStructFile(base_svhn_path + "/digitStruct.mat")
Img_data_all = dsf.getAllDigitStructure_ByDigit()
cur_img_data = Img_data_all[345]
cur_img_gray = etai.read(base_svhn_path + "/" + cur_img_data['filename'],
                         flag=cv2.IMREAD_GRAYSCALE)
cv2.imshow('img1', cur_img_gray)
cv2.waitKey()
Пример #22
0
from builtins import *
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import

import os

import cv2

import eta.core.image as etai


def plot(img):
    cv2.imshow("*** Press any key to exit ***", etai.rgb_to_bgr(img))
    cv2.waitKey(0)
    cv2.destroyAllWindows()


here = os.path.dirname(__file__)
path1 = os.path.join(here, "data/water.jpg")
path2 = os.path.join(here, "data/logo.png")

img1 = etai.resize(etai.read(path1), width=1024)
img2 = etai.resize(etai.read(path2), width=400)

x0 = etai.Width("30%").render(img=img1)
y0 = etai.Height("15%").render(img=img1)
img3 = etai.overlay(img1, img2, x0=x0, y0=y0)

plot(img3)
Пример #23
0
#  this needs to be rewritten.
# note that the last part is just swapping axes so that HWC --> CHW
transform = BaseTransform(net.size, rgb_means, rgb_std, (2, 0, 1))
object_detector = ObjectDetector(net, detector, transform, num_classes, True,
                                 300, 0.1)

dataset = etad.LabeledImageDataset(
    "/scratch/jason-data/blitz_20200309/vehicle-10/manifest.json")

imagesetlabels = etai.ImageSetLabels()

with torch.no_grad():
    for (image_path, labels_path) in dataset.iter_paths():
        print("working with image %s" % image_path)

        image = etai.to_float(etai.read(image_path))
        detect_bboxes = object_detector.predict(image)

        for class_id, class_collection in enumerate(detect_bboxes):
            print("[%02d] %s --> %s" %
                  (class_id, labelmap[class_id], len(class_collection)))

        # hack to only keep cars
        cars = detect_bboxes[7]
        objects = [to_detected_object("vehicle", v, image) for v in cars]
        doc = etao.DetectedObjectContainer(objects=objects)
        imagesetlabels[os.path.basename(image_path)].add_objects(doc)

imagesetlabels.write_json('/tmp/detections.json')

kill_me_now()