Esempio n. 1
0
def resc(patch):
    """
    :param patch:  [image,mask]
    :return: random rescaling of the pair [image,mask]

    --- Rescaling reinforces axons size diversity ---
    """


    s = random.choice([0.5, 0.75, 1.0, 1.5, 2.0])
    data_rescale=[]
    for scale in s:

        image_rescale = rescale(patch[0], scale)
        mask_rescale = rescale(patch[1], scale)
        s_r = mask_rescale.shape[0]
        q_h, r_h = divmod(256-s_r,2)

        if q_h > 0 :
            image_rescale = np.pad(image_rescale,(q_h, q_h+r_h), mode = "reflect")
            mask_rescale = np.pad(mask_rescale,(q_h, q_h+r_h), mode = "reflect")
        else :
            patches = extract_patch(image_rescale,mask_rescale, 256)
            i = np.random.randint(len(patches), size=1)
            image_rescale,mask_rescale = patches[i]

        mask_rescale = preprocessing.binarize(np.array(mask_rescale), threshold=0.001)
        data_rescale = [image_rescale, mask_rescale]

    return data_rescale
def test_iradon_angles():
    """
    Test with different number of projections
    """
    size = 100
    # Synthetic data
    image = np.tri(size) + np.tri(size)[::-1]
    # Large number of projections: a good quality is expected
    nb_angles = 200
    radon_image_200 = radon(image, theta=np.linspace(0, 180, nb_angles,
                                                     endpoint=False))
    reconstructed = iradon(radon_image_200)
    delta_200 = np.mean(abs(rescale(image) - rescale(reconstructed)))
    assert delta_200 < 0.03
    # Lower number of projections
    nb_angles = 80
    radon_image_80 = radon(image, theta=np.linspace(0, 180, nb_angles,
                                                    endpoint=False))
    # Test whether the sum of all projections is approximately the same
    s = radon_image_80.sum(axis=0)
    assert np.allclose(s, s[0], rtol=0.01)
    reconstructed = iradon(radon_image_80)
    delta_80 = np.mean(abs(image / np.max(image) -
                           reconstructed / np.max(reconstructed)))
    # Loss of quality when the number of projections is reduced
    assert delta_80 > delta_200
Esempio n. 3
0
def read_image(name, size=None, debug=False):
    """ read image and segmentation, returns RGB + alpha composite """
    image = imread(name) / 255.

    if image.shape[2] == 4:
        alpha = image[...,3]
        image = image[...,:3]
    else:
        segmentation_name = os.path.splitext(name)[0][:-6] + '-label.png'
        segmentation = imread(segmentation_name)
        alpha = np.logical_or(segmentation[...,0], segmentation[...,1]) * 1.

    if size is not None:
        scale_x = float(size[0]) / image.shape[1]
        scale_y = float(size[1]) / image.shape[0]
        scale = min(scale_x, scale_y)

        if debug:
            print name, size[0], size[1], image.shape[1], image.shape[0], scale, image.shape[1]*scale, image.shape[0]*scale

        if scale > 1.0:
            print 'Image %s smaller than requested size' % name

        if scale != 1.0:
            image = rescale(image, scale, order=3)
            alpha = rescale(alpha, scale, order=0)

    return np.dstack((image, alpha))
 def scale_images(self, data, scaleRatio=-1):
     if scaleRatio <= 0:
         scaleRatio = self.scaleRatio
     if data.ndim == 3:
         for i in xrange(data.shape[0]):
             scale_dim = self.rng.random_integers(1)
             if scale_dim:
                 #Scale in first dimension
                 img_scaled = st.rescale(data[i], scale=(scaleRatio, 1))
                 scaled_shape = img_scaled.T.shape
                 I = numpy.eye(scaled_shape[0], scaled_shape[1])
                 data[i] = numpy.dot(I, img_scaled)
             else:
                 #Scale in the second dimension
                 img_scaled = st.rescale(data[i], scale=(1, scaleRatio))
                 scaled_shape = img_scaled.T.shape
                 I = numpy.eye(scaled_shape[0], scaled_shape[1])
                 data[i] = numpy.dot(img_scaled, I)
     else:
         scale_dim = self.rng.random_integers(1)
         if scale_dim:
             #Scale in first dimension
             img_scaled = st.rescale(data, scale=(scaleRatio, 1))
             scaled_shape = img_scaled.T.shape
             I = numpy.eye(scaled_shape[0], scaled_shape[1])
             data = numpy.dot(I, img_scaled)
         else:
             #Scale in the second dimension
             img_scaled = st.rescale(data, scale=(1, scaleRatio))
             scaled_shape = img_scaled.T.shape
             I = numpy.eye(scaled_shape[0], scaled_shape[1])
             data = numpy.dot(img_scaled, I)
     return data
def standardize_roi_height(roi, standard_height=20, rel_height=0.666):
    h = standard_height * 5

    y = roi.sum(1)
    yp = y[y >= y.max() * rel_height]

    pw = yp.shape[0]
    sf = standard_height / pw

    sh = int(np.ceil(float(roi.shape[0]) * sf))
    if sh <= h:                 # if the scale factor estimation isn't off try to rescale according to the central part
        res = rescale(roi, sf)
    else:
        if h < roi.shape[0]:    # if the thing is too big, squeez it down
            sf = h / roi.shape[0]
            res = rescale(roi, sf)
        else:                   # if the scale factor estimation is off,
            res = roi           # but the image is still smaller than the standard, just center.

    # TODO: the centering should depend on the symmetry of the word (4 cases: are, gone, to, for)
    # w = res.shape[1]
    # c = int(h / 2)
    # os_p = int(np.floor(res.shape[0] / 2))
    # os_m = int(np.ceil(res.shape[0] / 2))
    # uni = np.zeros((h, w))
    # uni[c - os_m: c + os_p, :] = res

    # Pad
    zer = np.zeros((1, res.shape[1]))
    uni = np.append(zer, res, axis=0)
    uni = np.append(uni, zer, axis=0)
    uni = uni / uni.max()

    return uni
Esempio n. 6
0
def test_rescale_invalid_scale():
    x = np.zeros((10, 10, 3))
    with testing.raises(ValueError):
        rescale(x, (2, 2),
                multichannel=False, anti_aliasing=False, mode='constant')
    with testing.raises(ValueError):
        rescale(x, (2, 2, 2),
                multichannel=True, anti_aliasing=False, mode='constant')
Esempio n. 7
0
def test_rescale_multichannel_defaults():
    # multichannel should always default to False as of 0.16
    x = np.zeros((8, 3), dtype=np.double)
    scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant')
    assert_equal(scaled.shape, (16, 6))

    x = np.zeros((8, 8, 3), dtype=np.double)
    scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant')
    assert_equal(scaled.shape, (16, 16, 6))
Esempio n. 8
0
def pyramid(I1, I2):
	if (I1.shape[0]<=500 and I1.shape[1]<=500):
		return findBestRoll(I1, I2, range(-15, 15), range(-15, 15))
	else:
		I1R = rescale(I1, 0.5)
		I2R = rescale(I2, 0.5)
		rangeValues = pyramid(I1R, I2R)
		print(rangeValues)
		return findBestRoll(I1, I2, range(rangeValues[0]*2-2, rangeValues[0]*2+3), range(rangeValues[1]*2-2, rangeValues[1]*2+3))
Esempio n. 9
0
def rescale_images(im1, im2, pts):
    p1, p2, p3, p4 = pts
    len1 = np.sqrt((p2[1] - p1[1])**2 + (p2[0] - p1[0])**2)
    len2 = np.sqrt((p4[1] - p3[1])**2 + (p4[0] - p3[0])**2)
    dscale = len2/len1
    if dscale < 1:
        im1 = sktr.rescale(im1, dscale)
    else:
        im2 = sktr.rescale(im2, 1./dscale)
    return im1, im2
Esempio n. 10
0
def test_warp_clip():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1

    outx = rescale(x, 3, order=3, clip=False)
    assert outx.min() < 0

    outx = rescale(x, 3, order=3, clip=True)
    assert_almost_equal(outx.min(), 0)
    assert_almost_equal(outx.max(), 1)
Esempio n. 11
0
def scale_lesion(lesion, size):
    """ scale segmented lesion to uniform size """
    image = lesion[...,:3]
    alpha = lesion[...,3]

    scale = float(size) / max(*alpha.shape)
    if scale != 1.0:
        image = rescale(image, scale, order=3)
        alpha = rescale(alpha, scale, order=0)

    return np.dstack((image, alpha))
Esempio n. 12
0
def fit_in_box(s, s_mask, s_max, t_max):
    # Resize foreground and mask so area fits in box
    y_ratio = float(t_max[0])/s_max[0]
    x_ratio = float(t_max[1])/s_max[1]
    if y_ratio > x_ratio:
        s = rescale(s, x_ratio)
        s_mask = rescale(s_mask, x_ratio)
    else:
        s = rescale(s, y_ratio)
        s_mask = rescale(s_mask, y_ratio)
    return s, s_mask
Esempio n. 13
0
def test_warp_clip():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1

    with expected_warnings(['The default mode', 'The default multichannel']):
        outx = rescale(x, 3, order=3, clip=False)
    assert outx.min() < 0

    with expected_warnings(['The default mode', 'The default multichannel']):
        outx = rescale(x, 3, order=3, clip=True)
    assert_almost_equal(outx.min(), 0)
    assert_almost_equal(outx.max(), 1)
Esempio n. 14
0
def test_warp_clip():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1

    outx = rescale(x, 3, order=3, clip=False,
                   multichannel=False, anti_aliasing=False, mode='constant')
    assert outx.min() < 0

    outx = rescale(x, 3, order=3, clip=True,
                   multichannel=False, anti_aliasing=False, mode='constant')
    assert_almost_equal(outx.min(), 0)
    assert_almost_equal(outx.max(), 1)
Esempio n. 15
0
def random_backgrond(weights=[2, 8, 4, 3, 0.2, 0.2, 0.5], start_level=1, end_level=None):
    img = np.random.normal(0, 1, (start_level, start_level)) * weights[0]
    i = start_level - 1
    for i, w in enumerate(weights[1:], start_level):
        r = np.random.normal(0, 1, (2**i, 2**i))
        img = rescale(img, 2) + w*r

    if end_level and end_level != i:
        img = rescale(img, 2**(end_level - i))

    img = (img - img.min())
    img /= img.max()
    return img
Esempio n. 16
0
def test_rescale_multichannel_defaults():
    # ensure multichannel=None matches the previous default behaviour

    # 2D: multichannel should default to False
    x = np.zeros((8, 3), dtype=np.double)
    with expected_warnings(['The default mode', 'The default multichannel']):
        scaled = rescale(x, 2, order=0)
    assert_equal(scaled.shape, (16, 6))

    # 3D: multichannel should default to True
    x = np.zeros((8, 8, 3), dtype=np.double)
    with expected_warnings(['The default mode', 'The default multichannel']):
        scaled = rescale(x, 2, order=0,)
    assert_equal(scaled.shape, (16, 16, 3))
def agrandissement(img):
    img_bilinear = transform.rescale(img, scale=2, mode='wrap', preserve_range=True, order=1)
    # order=1 <=> bi-linear (skimage.transform.wrap)
    img_nearNeighbor = transform.rescale(image=img, scale=2, mode='wrap', preserve_range=True, order=0)
    # order=0 <=> nearest neighbor (skimage.transform.wrap)

    print(quadratique(img_nearNeighbor, img_bilinear))  # debug

    fig = plt.figure()
    fig.add_subplot(1, 2, 1)
    plt.imshow(img_bilinear, cmap='gray')
    fig.add_subplot(1, 2, 2)
    plt.imshow(img_nearNeighbor, cmap='gray')
    plt.show()
Esempio n. 18
0
def test_keep_range():
    image = np.linspace(0, 2, 25).reshape(5, 5)

    out = rescale(image, 2, preserve_range=False, clip=True, order=0)
    assert out.min() == 0
    assert out.max() == 2

    out = rescale(image, 2, preserve_range=True, clip=True, order=0)
    assert out.min() == 0
    assert out.max() == 2

    out = rescale(image.astype(np.uint8), 2, preserve_range=False,
                  clip=True, order=0)
    assert out.min() == 0
    assert out.max() == 2 / 255.0
Esempio n. 19
0
def random_scaling(image, normal, size, a, b):
    scale = random.uniform(a, b);
    #print scale
    #resize images
    img_r = transform.rescale(image, scale);
    norm_r = transform.rescale(normal, scale);
    img_r, norm_r = random_crop(img_r, norm_r, size);
    #TODO modify depth : divide by scale
    #modify normals
    #for line in range(norm_r.shape[0]):
    #    for col in range(norm_r.shape[1]):
    #        norm_r[line,col,2] = norm_r[line,col,2] * scale;
    #        norm = np.linalg.norm(norm_r[line,col]);
    #        norm_r[line,col] = norm_r[line,col]/norm;
    return img_r, norm_r;
def rescale(root_new, root_old, img_path, ann_path, out_shape):
  try:
    img = io.imread(root_old+"/"+img_path)
  except Exception as E:
    print E
  h, w, _ = img.shape
  f_h, f_w = float(out_shape)/h, float(out_shape)/w
  trans_img = transform.rescale(img, (f_h, f_w))
  num_objs = 0
  with open(root_old+"/"+ann_path, 'r') as f:
    ann = f.readline()
    ann = ann.rstrip()
    ann = ann.split(' ')
    ann = [float(i) for i in ann]
    num_objs = len(ann) / 5
    for idx in xrange(num_objs):
      ann[idx * 5 + 0] = int(f_w * ann[idx * 5 + 0])
      ann[idx * 5 + 1] = int(f_h * ann[idx * 5 + 1])
      ann[idx * 5 + 2] = int(f_w * ann[idx * 5 + 2])
      ann[idx * 5 + 3] = int(f_h * ann[idx * 5 + 3])
    # Write the new annotations to file
    with open(root_new+"/"+ann_path, 'w') as f_new:
      for val in ann:
        f_new.write(str(val)+' ')
  # Save the new image
  io.imwrite(root_new+"/"+img_path, trans_img)
def read_image(filename, float, scale=1):
    I = io.imread(filename)
    if not scale == 1:
        I = rescale(I, (scale, scale))
    if float:
        I = img_as_float(I)
    return I
def img_rescale(img, scale):
    original_y, original_x = img.shape
    if scale > 1:
        img = tf.rescale(img, scale, clip=True)
        scaled_y, scaled_x = img.shape
        dx = (scaled_x - original_x) // 2
        dy = (scaled_y - original_y) // 2
        img = img[dy: (dy + original_y), dx: (dx + original_x)]
        return img
    else:
        tmp_img = np.zeros(img.shape)
        img = tf.rescale(img, scale)
        scaled_y, scaled_x = img.shape
        tmp_img[((original_y - scaled_y) // 2):((original_y - scaled_y) // 2 + scaled_y),
                ((original_x - scaled_x) // 2):((original_x - scaled_x) // 2 + scaled_x)] = img
        return tmp_img
Esempio n. 23
0
def resize_photo(photo, keep_ratio=False):
    if keep_ratio:
        if photo.shape[1] >= photo.shape[2]:
            #print photo.shape
            st_photo = rescale(photo_to_skim(photo),
                               (224./float(photo.shape[1]),
                                224./float(photo.shape[1])))
        else:
            st_photo = rescale(photo_to_skim(photo),
                               (224./float(photo.shape[2]),
                                224./float(photo.shape[2])))
    else:
        st_photo = rescale(photo_to_skim(photo),
                           (224./float(photo.shape[1]),
                            224./float(photo.shape[2])))
    return skim_to_photo(st_photo)
Esempio n. 24
0
def test_iradon_sart():
    debug = False

    image = rescale(PHANTOM, 0.8)
    theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
    theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
    for theta, error_factor in ((theta_ordered, 1.),
                                (theta_missing_wedge, 2.)):
        sinogram = radon(image, theta, circle=True)
        reconstructed = iradon_sart(sinogram, theta)

        if debug:
            from matplotlib import pyplot as plt
            plt.figure()
            plt.subplot(221)
            plt.imshow(image, interpolation='nearest')
            plt.subplot(222)
            plt.imshow(sinogram, interpolation='nearest')
            plt.subplot(223)
            plt.imshow(reconstructed, interpolation='nearest')
            plt.subplot(224)
            plt.imshow(reconstructed - image, interpolation='nearest')
            plt.show()

        delta = np.mean(np.abs(reconstructed - image))
        print('delta (1 iteration) =', delta)
        assert delta < 0.02 * error_factor
        reconstructed = iradon_sart(sinogram, theta, reconstructed)
        delta = np.mean(np.abs(reconstructed - image))
        print('delta (2 iterations) =', delta)
        assert delta < 0.014 * error_factor
        reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
        delta = np.mean(np.abs(reconstructed - image))
        print('delta (1 iteration, clip) =', delta)
        assert delta < 0.018 * error_factor

        np.random.seed(1239867)
        shifts = np.random.uniform(-3, 3, sinogram.shape[1])
        x = np.arange(sinogram.shape[0])
        sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
                                               sinogram[:, i])
                                     for i in range(sinogram.shape[1])).T
        reconstructed = iradon_sart(sinogram_shifted, theta,
                                    projection_shifts=shifts)
        if debug:
            from matplotlib import pyplot as plt
            plt.figure()
            plt.subplot(221)
            plt.imshow(image, interpolation='nearest')
            plt.subplot(222)
            plt.imshow(sinogram_shifted, interpolation='nearest')
            plt.subplot(223)
            plt.imshow(reconstructed, interpolation='nearest')
            plt.subplot(224)
            plt.imshow(reconstructed - image, interpolation='nearest')
            plt.show()

        delta = np.mean(np.abs(reconstructed - image))
        print('delta (1 iteration, shifted sinogram) =', delta)
        assert delta < 0.022 * error_factor
def normalize_dataset():
    max_pixels = config.getint("image_processing", "max_number_normalised_pixels")
    images_per_category = config.getint("classifiers", "max_number_images_per_category")
    max_side_size = config.getint("image_processing", "max_side_size")
    all_categories = mongo_driver.get_categories_values()
    i = 0
    for category in all_categories:
        print("adding category : {}".format(category))
        all_edges_cursor = mongo_driver.get_edges_from_category(category, images_per_category)

        for row in all_edges_cursor:

            # Scale the image edges to normalize it
            edges = np.array(row["edges_data"])
            edges = np.asfarray(edges)
            max_dim = np.max(edges.shape)
            scale = max_side_size / max_dim
            edges_scaled = rescale(edges, scale)

            # Flatten 2D edges to 1D vector
            pixels_vector = np.array(edges_scaled).flatten()

            if pixels_vector.size < max_pixels:
                diff = max_pixels - pixels_vector.size
                # Fill up vector with false values to normalise images
                pixels_vector = np.concatenate([pixels_vector, [False] * diff])

            # Save it to DB
            mongo_driver.save_normalized_data(np.array(pixels_vector).tolist(), i)
        i += 1
Esempio n. 26
0
def my_read_image2(data_path, dataset_str):

    classes = os.listdir(data_path)
    classes = [c for c in classes if not c.startswith('.')]

    imgs = []
    labels = []
    names = []

    # np.savetxt('/Users/Sara/Desktop/data/pkl/all25/path_folders_'+ dataset_str +'.csv', classes, delimiter=',', fmt='%s')
    for index, item in enumerate(classes):
        path = data_path + classes[index]
        # if os.path.isdir(path):
        if 1:
            print(path)
            files = os.listdir(path)
            files = [f for f in files if not f.startswith('.')]
            # np.savetxt('/Users/Sara/Desktop/data/pkl/all/path_files_' + dataset_str + '.csv', files, delimiter=',', fmt='%s')
            for f in files:
                if dataset_str in f:
                    test = io.imread(os.path.join(path, f))
                    test = test[66:532, 105:671, :]
                    # plt.imshow(test)
                    test = rgb2gray(test)
                    test = rescale(test, [0.1, 0.1])
                    # test = resize(test, (50, 50))
                    # num_feature = test.shape
                    dim = numpy.int(reduce(lambda x, y: x * y, test.shape))
                    imgs.append(numpy.reshape(test, (dim)))  # vectorizing
                    names.append(f[0:13])  # its name
                    labels.append(index)
                    # labels.append(1)
    np.savetxt('class2idx.csv', classes, delimiter=',', fmt='%s')
    return [imgs, labels, names, classes]
Esempio n. 27
0
def detectSpots(image, spot_size, scaledown=1):
	print('detectSpots')
	if(scaledown <= 1):
		smallImg = rescale(image, scaledown)
	else:
		# Scaling up would not make sense.
		scaledown = 1
		smallImg = image
	
	seg = _segment_watershed(smallImg)
	#seg = _segment_threshold(smallImg)
	spots = _detect_spots_from_segmentation(seg)

	#spots = _detect_spots_localmax(smallImg, spot_size*scaledown)
	#spots = _detect_spots_hough_circle(smallImg, spot_size*scaledown)
	#spots = _detect_spots_blob_log(smallImg, spot_size*scaledown)
	spots = spots/scaledown
	spots = np.round(spots)
	spots = spots.astype(int)
	
	if(len(spots) < 25):
		raise Exception('Did not find all spots. %i instead %i'%(len(spots), 25))
	
	spots = _sortPoints(spots);
	
	return spots
Esempio n. 28
0
def load_augmented_image(path):
  """Return a image given path 
  .33 proba to be original image
  .33 proba to be flipped image
  .33 proba to be shrinked 
    --> (image between 200&100px)
  """
  proba = rand()
  if proba < .33:
    img = load_image(path, resize=True)
    return img
  elif proba < .66:
    img = load_image(path, resize=True)
    return np.fliplr(img)
  else:
    # Load the background and the original image
    img_back = np.ones([224,224,3]) * rand(3)
    img_orig = load_image(path, resize=False)

    # Maybe flip the image
    if rand()>.5:
      img_orig = np.fliplr(img_orig)

    # Reshape original image (to fit to a max size of 200px)
    max_new_shape = max(img_orig.shape)
    downscale_factor =  round(rand()*100+100) / max_new_shape
    img_orig = rescale(img_orig, downscale_factor)

    # Put img_orig on the background
    yy,xx,_ = img_orig.shape
    y, x ,_ = img_back.shape
    y = int(rand()*(y-yy))
    x = int(rand()*(x-xx))
    img_back[y:yy+y,x:xx+x] = img_orig
    return img_back
Esempio n. 29
0
def test_rescale():
    # same scale factor
    x = np.zeros((5, 5), dtype=np.double)
    x[1, 1] = 1
    scaled = rescale(x, 2, order=0)
    ref = np.zeros((10, 10))
    ref[2:4, 2:4] = 1
    assert_almost_equal(scaled, ref)

    # different scale factors
    x = np.zeros((5, 5), dtype=np.double)
    x[1, 1] = 1
    scaled = rescale(x, (2, 1), order=0)
    ref = np.zeros((10, 5))
    ref[2:4, 1] = 1
    assert_almost_equal(scaled, ref)
Esempio n. 30
0
def addArtificialData():
    print "here"
    baseName = os.path.basename(leftEyePath)
    print baseName
    data_dir = os.path.join(projectPath,baseName)
    print data_dir
    files = os.listdir(data_dir)
    files = [f for f in files if f.split('.')[-1]=='txt']
    print files
    data = []
    for f in files:
        label = f.split('.')[0]
        filePath = os.path.join(data_dir,f)
        with open(filePath,'r') as r:
            for image in r:
                data.append(image.strip())
    #print data
    for f in data:
        parentDir =  os.path.dirname(f)
        image_name = f.split('/')[-1].split('.')[0]
        scale_image_name = os.path.join(parentDir,image_name+'_s.jpg')
        roate_image_name = os.path.join(parentDir,image_name+'_r.jpg')
        print image_name
        img = io.imread(f,as_grey=True)
        scale_image = rescale(img,0.9)
        rotated_image = rotate(img,5,resize=False)
        print img.shape
        print scale_image.shape
        print rotated_image.shape
        io.imsave(scale_image_name,scale_image)
        io.imsave(roate_image_name,rotated_image)
        raw_input()
Esempio n. 31
0
def start_ransac(img1, img2, brief=True, common_factor=0.25):

    img1 = transform.rescale(img1, common_factor, multichannel=False)
    img2 = transform.rescale(img2, common_factor, multichannel=False)

    print(img1.shape)
    print(img2.shape)

    if brief:
        #BRIEF
        keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
        keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)

        extractor = BRIEF()

        extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[extractor.mask]
        descriptors1 = extractor.descriptors

        extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[extractor.mask]
        descriptors2 = extractor.descriptors

        matches12 = match_descriptors(descriptors1,
                                      descriptors2,
                                      cross_check=True)
    else:
        #ORB
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        orb.detect_and_extract(img1)
        keypoints1 = orb.keypoints
        desciptors1 = orb.descriptors

        orb.detect_and_extract(img2)
        keypoints2 = orb.keypoints
        desciptors2 = orb.descriptors

        matches12 = match_descriptors(desciptors1,
                                      desciptors2,
                                      cross_check=True)

    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = \
        ransac((src, dst), transform.SimilarityTransform, min_samples=4, residual_threshold=2)

    model_robust_tmatrix = np.copy(model_robust.params)
    model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2] / common_factor
    model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2] / common_factor

    img1_ = img1
    img2_ = warp(img2, model_robust.inverse)

    if False:

        fig = plt.figure(constrained_layout=True)
        gs = fig.add_gridspec(3, 2)
        f_ax1 = fig.add_subplot(gs[0, :])
        plot_matches(f_ax1, img1, img2, keypoints1, keypoints2, matches12)
        f_ax1.axis('off')
        f_ax2 = fig.add_subplot(gs[1, 0])
        f_ax2.imshow(img1)
        f_ax2.axis('off')
        f_ax2.set_title("img1")
        f_ax3 = fig.add_subplot(gs[1, 1])
        f_ax3.imshow(img1_)
        f_ax3.axis('off')
        f_ax3.set_title("img1_")
        #f_ax4 = fig.add_subplot(gs[1, 2])
        #f_ax4.imshow(img3_)
        #f_ax4.axis('off')
        #f_ax4.set_title("img3_")
        f_ax5 = fig.add_subplot(gs[2, 0])
        f_ax5.imshow(img2)
        f_ax5.axis('off')
        f_ax5.set_title("img2")
        f_ax6 = fig.add_subplot(gs[2, 1])
        f_ax6.imshow(img2_)
        f_ax6.axis('off')
        f_ax6.set_title("img2_")
        #f_ax7 = fig.add_subplot(gs[2, 2])
        #f_ax7.imshow(img4_)
        #f_ax7.axis('off')
        #f_ax7.set_title("img4_")
        plt.show()

    return model_robust_tmatrix
Esempio n. 32
0
def align(im, ref, method="scharr", **kargs):
    """Use one of a variety of algroithms to align two images.

    Args:
        im (ndarray) image to align
        ref (ndarray) reference array

    Keyword Args:
        method (str or None):
            If given specifies which module to try and use.
            Options: 'scharr', 'chi2_shift', 'imreg_dft', 'cv2'
        box (integer, float, tuple of images or floats):
            Used with ImageArray.crop to select a subset of the image to use for the aligning process.
        oversample (int):
            Rescale the image and reference image by constant factor before finding the translation vector.
        **kargs (various): All other keyword arguments are passed to the specific algorithm.


    Returns
        (ImageArray or ndarray) aligned image

    Notes:
        Currently three algorithms are supported:
            - image_registration module's chi^2 shift: This uses a dft with an automatic
              up-sampling of the fourier transform for sub-pixel alignment. The metadata
              key *chi2_shift* contains the translation vector and errors.
            - imreg_dft module's similarity function. This implements a full scale, rotation, translation
              algorithm (by default cosntrained for just translation). It's unclear how much sub-pixel translation
              is accomodated.
            - cv2 module based affine transform on a gray scale image.
              from: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
    """
    # To be consistent with x-y co-ordinate systems
    align_methods = {
        "scharr": (_align_scharr, imreg_dft),
        "chi2_shift": (_align_chi2_shift, chi2_shift),
        "imreg_dft": (_align_imreg_dft, imreg_dft),
        "cv2": (_align_cv2, cv2),
    }
    for meth in list(align_methods.keys()):
        mod = align_methods[meth][1]
        if mod is None:
            del align_methods[meth]
    method = method.lower()
    new_type = im.dtype
    if not len(align_methods):
        raise ImportError(
            "align requires one of imreg_dft, chi2_shift or cv2 modules to be available."
        )
    if method not in align_methods:
        raise ValueError(
            f"{method} is not available either because it is not recognised or there is a missing module"
        )

    if "box" in kargs:
        box = kargs.pop("box")
        if not isIterable(box):
            box = [box]
        working = im.crop(*box, copy=True)
        if ref.shape != working.shape:
            ref = ref.view(ImageArray).crop(*box, copy=True)
    else:
        working = im

    scale = kargs.pop("scale", None)

    if scale:
        working = working.rescale(scale, order=3)
        ref = transform.rescale(ref, scale, order=3)

    prefilter = kargs.pop("prefilter", True)

    tvec, data = align_methods[method][0](working, ref, **kargs)

    if scale:
        tvec /= scale
    new_im = im.shift((tvec[1], tvec[0]), prefilter=prefilter).astype(new_type)
    for k, v in data.items():
        new_im[k] = v
    new_im["tvec"] = tuple(tvec)
    new_im["translation_limits"] = new_im.translate_limits("tvec")
    return new_im
Esempio n. 33
0
                print(textBox3)
                print(textBox4)
                print(
                    "--------------------------------------------------------")

                # SCALE
                # img = transform.rescale(img, 1.0/4.0)
                # img = util.img_as_ubyte(img)
                # print(img[0][0][0])
                img = util.img_as_ubyte(
                    transform.resize(crop_image(img), (imgHeight, imgWidth)))

                # DOWN RESOLUTION
                img = util.img_as_ubyte(
                    transform.resize(
                        transform.rescale(img, random.uniform(0.65, 1.0)),
                        (imgHeight, imgWidth)))

                # VERTICAL STRIP LINES
                strip_delta = getRandom(math.ceil(imgWidth / 30),
                                        math.ceil(imgWidth / 20))
                strip_vert_variation = 3
                strip_height = 0
                strip_x = strip_delta
                while (strip_x < (imgWidth - EPS)):
                    insert_strip_line(img, strip_x)
                    var_min = max(-strip_vert_variation,
                                  -min(textBox1[1], textBox2[1]))
                    var_max = min(
                        strip_vert_variation,
                        imgHeight - 1 - max(textBox3[1], textBox4[1]))
from maad import sound, features
from maad.util import power2dB, plot2d
from skimage import transform
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import NMF

#%%
# Load audio from disk
# --------------------
# Load the audio file and compute the spectrogram.
s, fs = sound.load('../../data/spinetail.wav')
Sxx, tn, fn, ext = sound.spectrogram(s, fs, nperseg=1024, noverlap=512)

Sxx_db = power2dB(Sxx, db_range=70)
Sxx_db = transform.rescale(
    Sxx_db, 0.5, anti_aliasing=True,
    multichannel=False)  # rescale for faster computation
plot2d(Sxx_db, figsize=(4, 10), extent=ext)

#%%
# Filter the spectrogram with 2D wavelets
# ---------------------------------------
# Compute feature with ``shape_features_raw`` to get the raw output of the
# spectrogram filtered by the filterbank composed of 2D Gabor wavelets. This
# raw output can be fed to the NMF algorithm to decompose the spectrogram into
# elementary basis spectrograms.

shape_im, params = features.shape_features_raw(Sxx_db, resolution='low')

# Format the output as an array for decomposition
X = np.array(shape_im).reshape([len(shape_im), Sxx_db.size]).transpose()
Esempio n. 35
0
import torch
from skimage import io, transform
from time import time
from torch import nn
from time import time
from torchvision.transforms.functional import resize
from PIL import Image
from torch.nn.functional import upsample
img = io.imread('data/britney.png')
def timereps(reps, func):
    start = time()
    [func() for _ in range(0, reps)]
    end = time()
    return (end - start) / reps

average_duration = timereps(10, lambda : transform.rescale(img, [2,2], mode='constant'))
print(f'transform.rescale : {average_duration}')


tensor_img = torch.Tensor(img)
tensor_img = tensor_img.cuda().unsqueeze(0)
tensor_img = tensor_img.expand(1, -1, -1, -1).permute(0, 3, 1, 2)
up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)

average_duration = timereps(1000, lambda : up(tensor_img))
print(f'nn.Upsample (GPU): {average_duration}')

average_duration = timereps(1000, lambda : upsample(tensor_img, scale_factor=2, mode='bilinear', align_corners=False))
print(f'nn.functional.upsample (GPU) : {average_duration}')

tensor_img = tensor_img.cpu()
Esempio n. 36
0
def video_image(image_or_file,
                duration=None,
                zoom=None,
                opacity=None,
                **kwargs):
    """
    Creates a :epkg:`ImageClip`.
    Créé une vidéo à partir d'une image.

    @param      image_or_file   image or file
    @param      duration        duration or None if not known
    @param      zoom            applies a zoom on the image
    @param      opacity         opacity of the image (0 for transparent, 255 for opaque)
    @param      kwargs          additional parameters for :epkg:`ImageClip`
    @return                     :epkg:`ImageClip`

    If *duration* is None, it will be fixed when the image is
    composed with another one. The image remains wherever it is placed.
    """
    if isinstance(image_or_file, str):
        img = Image.open(image_or_file)
        return video_image(img,
                           duration=duration,
                           zoom=zoom,
                           opacity=opacity,
                           **kwargs)
    elif isinstance(image_or_file, numpy.ndarray):
        if zoom is not None:
            from skimage.transform import rescale
            img = rescale(image_or_file, zoom)
            return video_image(img,
                               duration=duration,
                               opacity=opacity,
                               **kwargs)
        else:
            img = image_or_file
            if len(img.shape) != 3:
                raise ValueError("Image is not RGB or RGBA shape={0}".format(
                    img.shape))
            if img.shape[2] == 3:
                from skimage.io._plugins.pil_plugin import pil_to_ndarray
                pilimg = Image.fromarray(img).convert('RGBA')
                img = pil_to_ndarray(pilimg)
                if opacity is None:
                    opacity = 255
            if isinstance(opacity, int):
                img[:, :, 3] = opacity
            elif isinstance(opacity, float):
                img[:, :, 3] = int(opacity * 255)
            elif opacity is not None:
                raise TypeError("opacity should be int or float or None")
            return ImageClip(img,
                             duration=duration,
                             transparent=True,
                             **kwargs)
    elif isinstance(image_or_file, Image.Image):
        from skimage.io._plugins.pil_plugin import pil_to_ndarray
        if image_or_file.mode != 'RGBA':
            image_or_file = image_or_file.convert('RGBA')
        if zoom is not None:
            image_or_file = image_or_file.resize(zoom)
        img = pil_to_ndarray(image_or_file)
        return video_image(img, duration=duration, opacity=opacity, **kwargs)
    else:
        raise TypeError("Unable to create a video from type {0}".format(
            type(image_or_file)))
Esempio n. 37
0
 def downsample(img):
     rescaled_img = rescale(img,
                            1.0 / scale_down_factor,
                            preserve_range=True,
                            mode='constant')
     return rescaled_img
Esempio n. 38
0
def calculate_image():
    """
    ==================================
    ``Viewer for BOFO project`` with some plugin
    ==================================
    A viewer for BOFO project for viewing collections of images with the
    `detecting algorithm` and a slider plugin.
    """

    from skimage import io
    from skimage.viewer import ImageViewer
    from skimage.transform import (rescale, resize)
    from math import (sin, cos, pi)
    import numpy as np
    from skimage.draw import line

    #################
    # configuration #
    #################

    file_name = "C:/Users/502640129/Desktop/Boo/bofo_algorithm/image_small/test20/test03_31.jpg"
    # file_name = "pizero1.jpg"
    rescale_ratio = 1
    circle_center_y = 200
    circle_center_x = 227
    maxValue = 16
    start_angle = 51  # degree
    end_angle = 319  # degree
    diameter = 180

    valuePerAngle = maxValue / (end_angle - start_angle)
    start_angle = start_angle / 180 * pi  # degree
    end_angle = end_angle / 180 * pi  # degree

    ID = 10
    OD = 180

    #############
    # main loop #
    #############

    # read the image
    img = io.imread(file_name, True)  # 'True' means gray
    img = rescale(img, rescale_ratio, mode='reflect')

    # calculate the gray
    # img = (img[:, :, 0] + img[:, :, 1] + img[:, :, 2]) / 3
    # img[:, :] = 1

    grayList = []

    for angle in range(51 * 2, 319 * 2, 1):
        # 求权重补偿值
        weightAngle = angle / 2
        if (int(weightAngle / 45) % 2) == 0:

            weightAngle = weightAngle - int(weightAngle / 45) * 45
        else:
            weightAngle = (weightAngle - (int(weightAngle / 45) - 1) *
                           45) - (weightAngle - int(weightAngle / 45) * 45) * 2

        # 求沿着画的这条线段上所有灰度值的和
        angle = float(angle / 180 * pi) / 2
        xx = int(circle_center_x + ID * cos(angle))
        yy = int(circle_center_y - ID * sin(angle))
        xxx = int(circle_center_x + OD * cos(angle))
        yyy = int(circle_center_y - OD * sin(angle))
        rr, cc = line(xx, yy, xxx, yyy)
        temp = np.sum(img[rr, cc])

        # 进行权重补偿
        temp = temp * (1 / cos(weightAngle / 180 * pi))
        grayList = grayList + [temp]

    # print(grayList)
    # print(grayList[0])
    # print(np.where(np.min(grayList)))
    # print(grayList[535])

    re = np.where(grayList == np.min(grayList))
    # print(re)
    re = re[0][0] / 2 + 51
    # print(re[0][0])
    angle = re / 180 * pi
Esempio n. 39
0
                    file_name, '*' + string_in_file_name + str(num_roi) +
                    '-*-labels.tif'):
                list_labels_names.append(file_name)
        list_file_names.append(max(list_files_tif, key=len))

for element in list_file_names:

    os.makedirs(
        "/Users/alessandropasqui/Desktop/my_data_256_factor2_black-padding_original/cells/"
        + str(element[:-49]) + "/images")
    #os.rename(str(cwd)+"/"+str(element),"/Users/alessandropasqui/Desktop/my_data/cells/"+str(element[:-4])+"/images/"+str(element[:-4])+".png")
    im = io.imread(element)
    im = rescale_intensity(im, out_range=(0, 255))
    im = np.uint8(im)
    imRGB = color.gray2rgb(im)
    imRGB = rescale(imRGB, 64.0 / 200.0, anti_aliasing=False)
    #imRGB = rescale(imRGB, 1.0 / 2.0 , anti_aliasing=False)
    #print(imRGB.shape)
    """
    black_matrix = np.zeros(shape=(256,256,3))
    for x in range(64,(64+128)):
        for y in range(64,((64+128))):
            black_matrix[y][x]=imRGB[y-64][x-64]
    """

    imRGB = np.pad(imRGB, ((64, 64), (64, 64), (0, 0)),
                   'constant',
                   constant_values=0)
    #imRGB = np.pad(imRGB, ((64, 64), (64, 64), (0, 0)), 'reflect')

    print(imRGB.shape)
Esempio n. 40
0
    c = np.where(tt.reshape(tt.shape[0]*tt.shape[1],)==0)[0]
    x = xv.reshape(tt.shape[0]*tt.shape[1])
    y = yv.reshape(tt.shape[0]*tt.shape[1])
    c = random.shuffle(c)
    x = x[c]
    y = y[c]
    Contours2[stack,y[500::],x[50,:,:]] = 10 """
#Contours[Contours2==0] =10
from skimage.transform import rescale
Labels_t = Labels
Labels = Labels / 8
Labels_2 = np.zeros(
    (Labels.shape[0], Labels.shape[1] / 2, Labels.shape[2] * 0.5),
    dtype='uint8')
for stack in range(Labels.shape[0]):
    Labels_2[stack, :, :] = np.uint(rescale(Labels[stack, :, :], 0.5) * 8)
Labels_4 = np.zeros(
    (Labels_2.shape[0], Labels_2.shape[1] * 0.5, Labels_2.shape[2] * 0.5),
    dtype='uint8')
for stack in range(Labels_2.shape[0]):
    Labels_4[stack, :, :] = np.uint(rescale(Labels_2[stack, :, :], 0.5) * 8)
Labels_8 = np.zeros(
    (Labels_4.shape[0], Labels_4.shape[1] * 0.5, Labels_4.shape[2] * 0.5),
    dtype='uint8')
for stack in range(Labels_4.shape[0]):
    Labels_8[stack, :, :] = np.uint(rescale(Labels_4[stack, :, :], 0.5) * 8)
Labels_16 = np.zeros(
    (Labels_8.shape[0], Labels_8.shape[1] * 0.5, Labels_8.shape[2] * 0.5),
    dtype='uint8')
for stack in range(Labels_4.shape[0]):
    Labels_16[stack, :, :] = np.uint(rescale(Labels_8[stack, :, :], 0.5) * 8)
Esempio n. 41
0
 def process_image(img):
     return 2 * color.rgb2gray(transform.rescale(img[34:194], 0.5)) - 1
Esempio n. 42
0
def plot_episodes_tracks(exp_batch, experiment, checkpoint, town_name,
                         exp_suite):

    # We build the measurement file used for the benchmarks.
    meas_file = os.path.join(
        '_benchmarks_results',
        exp_batch + '_' + experiment + '_' + str(checkpoint) +
        '_drive_control_output_' + exp_suite + '_' + town_name,
        'measurements.csv')
    # We build the summary file used for the benchmarks.
    summary_file = os.path.join(
        '_benchmarks_results',
        exp_batch + '_' + experiment + '_' + str(checkpoint) +
        '_drive_control_output_' + exp_suite + '_' + town_name, 'summary.csv')

    image_location = map.__file__[:-7]
    carla_map = map.CarlaMap(town_name, 0.164, 50)

    # Split the measurements for each of the episodes
    episodes_positions, travelled_distances = split_episodes(meas_file)

    # Get causes of end
    end_cause = get_causes_of_end(summary_file)

    print("End casues ", len(end_cause))
    print(end_cause)

    # Prepare the folder where the results are going to be written
    root_folder = "_logs"
    paths_dir = os.path.join(root_folder, exp_batch, experiment,
                             'drive_' + exp_suite + '_' + town_name + '_paths')

    # Create the paths just in case they don't exist.
    if not os.path.exists(paths_dir):
        os.makedirs(paths_dir)

    if not os.path.exists(os.path.join(paths_dir, str(checkpoint))):
        os.mkdir(os.path.join(paths_dir, str(checkpoint)))

    # For each position vec in all episodes
    count = 0  # To count the number

    # Color pallet for the causes of episodes to end
    end_color_palete = [
        [255, 0, 0, 255],  # Red for timeout
        [0, 255, 0, 255],  # Green for success
        [0, 0, 255, 255],  # Blue for End pedestrian
        [255, 255, 0, 255],  # Yellow for end car
        [255, 0, 255, 255],  # Magenta for end other
    ]
    print("Number of episodes ", len(episodes_positions))

    # We instance an image that is going to have all the final position plots
    map_image_dots = Image.open(
        os.path.join(image_location, town_name + '.png'))
    map_image_dots.load()
    map_image_dots = np.asarray(map_image_dots, dtype="int32")

    for episode_vec in episodes_positions:

        map_image = Image.open(os.path.join(image_location,
                                            town_name + '.png'))
        map_image.load()
        map_image = np.asarray(map_image, dtype="int32")

        travel_this_episode = 0
        previous_pos = episode_vec[0]
        # This is for plotting the path driven by the car.
        for point in episode_vec[1:]:

            travel_this_episode += sldist(point, previous_pos)
            previous_pos = point
            point[1] = point[1] - 3
            point[0] = point[0] - 2
            value = travel_this_episode / travelled_distances[count]

            color_palate_inst = [0 + (value * x) for x in [255, 0, 0]]
            color_palate_inst.append(255)

            point.append(0.0)

            plot_on_map(map_image, carla_map.convert_to_pixel(point),
                        color_palate_inst, 8)

        # Plot the end point on the path map
        plot_on_map(map_image, carla_map.convert_to_pixel(point),
                    end_color_palete[int(end_cause[count])], 16)
        # Plot the end point on the map just showing the dots
        plot_on_map(map_image_dots, carla_map.convert_to_pixel(point),
                    end_color_palete[int(end_cause[count])], 16)

        count += 1
        map_image = rescale(map_image.astype('float'), 1.0 / 4.0)
        plot_test_image(
            map_image,
            os.path.join(paths_dir, str(checkpoint),
                         str(count) + '.png'))

    map_image_dots = rescale(map_image_dots.astype('float'), 1.0 / 4.0)
    plot_test_image(map_image_dots,
                    os.path.join(paths_dir, str(checkpoint), 'all_dots.png'))
Esempio n. 43
0
    def transfer_style(self,
                       img_style,
                       img_content,
                       length=512,
                       ratio=1e5,
                       n_iter=512,
                       init="-1",
                       verbose=False):
        """
            Transfers the style of the artwork to the input image.

            :param numpy.ndarray img_style:
                A style image with the desired target style.

            :param numpy.ndarray img_content:
                A content image in floating point, RGB format.
        """
        # assume that convnet input is square
        orig_dim = min(self.net.blobs["data"].shape[2:])
        # rescale the images
        scale = max(length / float(max(img_style.shape[:2])),
                    orig_dim / float(min(img_style.shape[:2])))
        img_style = rescale(img_style, STYLE_SCALE * scale)
        scale = max(length / float(max(img_content.shape[:2])),
                    orig_dim / float(min(img_content.shape[:2])))
        img_content = rescale(img_content, scale)
        # compute style representations
        self._rescale_net(img_style)
        layers = self.weights["style"].keys()
        net_in = self.transformer.preprocess("data", img_style)
        gram_scale = float(img_content.size) / img_style.size
        G_style = _compute_reprs(net_in, self.net, layers, [], gram_scale=1)[0]
        # compute content representations
        self._rescale_net(img_content)
        layers = self.weights["content"].keys()
        net_in = self.transformer.preprocess("data", img_content)
        F_content = _compute_reprs(net_in, self.net, [], layers)[1]
        # generate initial net input
        # "content" = content image, see kaishengtai/neuralart
        img0 = self.transformer.preprocess("data", img_content)
        # compute data bounds
        data_min = -self.transformer.mean["data"][:, 0, 0]
        data_max = data_min + self.transformer.raw_scale["data"]
        data_bounds = [(data_min[0], data_max[0])]*(img0.size/3) + \
                      [(data_min[1], data_max[1])]*(img0.size/3) + \
                      [(data_min[2], data_max[2])]*(img0.size/3)
        # optimization params
        grad_method = "L-BFGS-B"
        reprs = (G_style, F_content)
        minfn_args = {
            "args": (self.net, self.weights, self.layers, reprs, ratio),
            "method": grad_method,
            "jac": True,
            "bounds": data_bounds,
            "options": {
                "maxcor": 8,
                "maxiter": n_iter,
                "disp": verbose
            }
        }

        # optimize
        # set the callback function
        def callback(xk):
            self.grad_iter += 1
            print("Iteration: " + str(self.grad_iter) + '/' + str(n_iter))

        minfn_args["callback"] = callback
        res = minimize(style_optfn, img0.flatten(), **minfn_args).nit
        return res
Esempio n. 44
0
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its *sinogram*:
"""

import numpy as np
import matplotlib.pyplot as plt

from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale

image = imread(data_dir + "/phantom.png", as_gray=True)
image = rescale(image, scale=0.4, mode='reflect', multichannel=False)

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))

ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)

theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram,
           cmap=plt.cm.Greys_r,
           extent=(0, 180, 0, sinogram.shape[0]),
           aspect='auto')
Esempio n. 45
0
def test_rescale_invalid_scale():
    x = np.zeros((10, 10, 3))
    with pytest.raises(ValueError):
        rescale(x, (2, 2), multichannel=False)
    with pytest.raises(ValueError):
        rescale(x, (2, 2, 2), multichannel=True)
Esempio n. 46
0
def find_half_hist(img):
    hist = ske.histogram(img, nbins=256)[0]
    i, s = 0, 0
    while (s < img.shape[0] * img.shape[1] / 2):
        s += hist[i]
        i += 1
    return i


plt.close('all')
plt.rcParams['image.cmap'] = 'gray'

ticket = skio.imread(image_path, as_gray=True)
if args.downscale:
    ticket = skt.rescale(ticket, 1024 / ticket.shape[1], preserve_range=True)

## Canny
ticket_contours = canny(ticket,
                        sigma=2 * (ticket.shape[0] * ticket.shape[1])**.5 /
                        1500)
ticket_contours = morpho.dilation(ticket_contours, morpho.disk(3))

## Transfo de Hough
hspace, angles, distances = skt.hough_line(ticket_contours)
hspacep, angles, distances = skt.hough_line_peaks(hspace, angles, distances)
normalized_hspacep = (hspacep - np.min(hspacep)) / (np.max(hspacep) -
                                                    np.min(hspacep))

## Rotation
angles_candidats = []
Esempio n. 47
0
def get_small_simple_map_data(obs, show_original=False, show_resacel=False):
    use_small_map = P.use_small_map

    map_width = 64
    small_map_width = 32

    resize_type = np.uint8
    save_type = np.float16

    m_height_origin = obs.observation["minimap"][C._M_HEIGHT]
    if show_original:
        imgplot = plt.imshow(m_height_origin)
        plt.show()
    m_height_rescaled = rescale(m_height_origin,
                                0.5,
                                preserve_range=True,
                                anti_aliasing=False)
    if show_resacel:
        imgplot = plt.imshow(m_height_rescaled)
        plt.show()

    m_visible_origin = obs.observation["minimap"][C._M_VISIBILITY]
    if show_original:
        imgplot = plt.imshow(m_visible_origin)
        plt.show()
    m_visible_rescaled = rescale(m_visible_origin,
                                 0.5,
                                 order=0,
                                 preserve_range=True,
                                 anti_aliasing=False).astype(resize_type)
    if show_resacel:
        imgplot = plt.imshow(m_visible_rescaled)
        plt.show()

    m_relative_origin = obs.observation["minimap"][C._M_RELATIVE]
    if show_original:
        imgplot = plt.imshow(m_relative_origin)
        plt.show()
    m_relative_rescaled = rescale(m_relative_origin,
                                  0.5,
                                  order=0,
                                  preserve_range=True,
                                  anti_aliasing=False).astype(resize_type)
    if show_resacel:
        imgplot = plt.imshow(m_relative_rescaled)
        plt.savefig('fig/m_relative_rescaled.pdf')
        plt.show()

    if use_small_map:
        m_height = np.expand_dims(
            m_height_rescaled.reshape(-1, small_map_width, small_map_width),
            -1).astype(save_type) / 255.0
        m_visible = get_one_hot(
            m_visible_rescaled.reshape(-1, small_map_width, small_map_width),
            4).astype(save_type)
        m_relative = get_one_hot(
            m_relative_rescaled.reshape(-1, small_map_width, small_map_width),
            5).astype(save_type)
    else:
        m_height = np.expand_dims(
            obs.observation["minimap"][C._M_HEIGHT].reshape(
                -1, map_width, map_width), -1).astype(save_type) / 255.0
        m_visible = get_one_hot(
            obs.observation["minimap"][C._M_VISIBILITY].reshape(
                -1, map_width, map_width), 4).astype(save_type)
        m_relative = get_one_hot(
            obs.observation["minimap"][C._M_RELATIVE].reshape(
                -1, map_width, map_width), 5).astype(save_type)

    #do not use screen information
    #s_relative = obs.observation["screen"][C._S_RELATIVE].reshape(-1, map_width, map_width)

    out_channels = 1 + 4 + 5

    simple_map_data = np.concatenate([m_height, m_visible, m_relative], axis=3)
    #out_map_data = np.transpose(simple_map_data, [0, 2, 3, 1])

    out_data = np.squeeze(simple_map_data, axis=0)

    #print('out_data.shape:', out_data.shape)

    return out_data
Esempio n. 48
0
def mls_rigid_deformation_inv(image, p, q, alpha=1.0, density=1.0):
    ''' Rigid inverse deformation
    ### Params:
        * image - ndarray: original image
        * p - ndarray: an array with size [n, 2], original control points
        * q - ndarray: an array with size [n, 2], final control points
        * alpha - float: parameter used by weights
        * density - float: density of the grids
    ### Return:
        A deformed image.
    '''
    height = image.shape[0]
    width = image.shape[1]
    # Change (x, y) to (row, col)
    q = q[:, [1, 0]]
    p = p[:, [1, 0]]

    # Make grids on the original image
    gridX = np.linspace(0, width, num=int(width * density), endpoint=False)
    gridY = np.linspace(0, height, num=int(height * density), endpoint=False)
    vy, vx = np.meshgrid(gridX, gridY)
    grow = vx.shape[0]  # grid rows
    gcol = vx.shape[1]  # grid cols
    ctrls = p.shape[0]  # control points

    # Compute
    reshaped_p = p.reshape(ctrls, 2, 1, 1)  # [ctrls, 2, 1, 1]
    reshaped_q = q.reshape((ctrls, 2, 1, 1))  # [ctrls, 2, 1, 1]
    reshaped_v = np.vstack(
        (vx.reshape(1, grow, gcol), vy.reshape(1, grow,
                                               gcol)))  # [2, grow, gcol]

    w = 1.0 / np.sum(
        (reshaped_p - reshaped_v)**2, axis=1)**alpha  # [ctrls, grow, gcol]
    w[w == np.inf] = 2**31 - 1
    pstar = np.sum(w * reshaped_p.transpose(1, 0, 2, 3), axis=1) / np.sum(
        w, axis=0)  # [2, grow, gcol]
    phat = reshaped_p - pstar  # [ctrls, 2, grow, gcol]
    qstar = np.sum(w * reshaped_q.transpose(1, 0, 2, 3), axis=1) / np.sum(
        w, axis=0)  # [2, grow, gcol]
    qhat = reshaped_q - qstar  # [ctrls, 2, grow, gcol]
    reshaped_phat1 = phat.reshape(ctrls, 1, 2, grow,
                                  gcol)  # [ctrls, 1, 2, grow, gcol]
    reshaped_phat2 = phat.reshape(ctrls, 2, 1, grow,
                                  gcol)  # [ctrls, 2, 1, grow, gcol]
    reshaped_qhat = qhat.reshape(ctrls, 1, 2, grow,
                                 gcol)  # [ctrls, 1, 2, grow, gcol]
    reshaped_w = w.reshape(ctrls, 1, 1, grow,
                           gcol)  # [ctrls, 1, 1, grow, gcol]

    mu = np.sum(np.matmul(
        reshaped_w.transpose(0, 3, 4, 1, 2) *
        reshaped_phat1.transpose(0, 3, 4, 1, 2),
        reshaped_phat2.transpose(0, 3, 4, 1, 2)),
                axis=0)  # [grow, gcol, 1, 1]
    reshaped_mu = mu.reshape(1, grow, gcol)  # [1, grow, gcol]
    neg_phat_verti = phat[:, [1, 0], ...]  # [ctrls, 2, grow, gcol]
    neg_phat_verti[:, 1, ...] = -neg_phat_verti[:, 1, ...]
    reshaped_neg_phat_verti = neg_phat_verti.reshape(
        ctrls, 1, 2, grow, gcol)  # [ctrls, 1, 2, grow, gcol]
    mul_right = np.concatenate((reshaped_phat1, reshaped_neg_phat_verti),
                               axis=1)  # [ctrls, 2, 2, grow, gcol]
    mul_left = reshaped_qhat * reshaped_w  # [ctrls, 1, 2, grow, gcol]
    Delta = np.sum(np.matmul(mul_left.transpose(0, 3, 4, 1, 2),
                             mul_right.transpose(0, 3, 4, 1, 2)),
                   axis=0).transpose(0, 1, 3, 2)  # [grow, gcol, 2, 1]
    Delta_verti = Delta[..., [1, 0], :]  # [grow, gcol, 2, 1]
    Delta_verti[..., 0, :] = -Delta_verti[..., 0, :]
    B = np.concatenate((Delta, Delta_verti), axis=3)  # [grow, gcol, 2, 2]
    try:
        inv_B = np.linalg.inv(B)  # [grow, gcol, 2, 2]
        flag = False
    except np.linalg.linalg.LinAlgError:
        flag = True
        det = np.linalg.det(B)  # [grow, gcol]
        det[det < 1e-8] = np.inf
        reshaped_det = det.reshape(grow, gcol, 1, 1)  # [grow, gcol, 1, 1]
        adjoint = B[:, :, [[1, 0], [1, 0]], [[1, 1],
                                             [0, 0]]]  # [grow, gcol, 2, 2]
        adjoint[:, :, [0, 1],
                [1, 0]] = -adjoint[:, :, [0, 1], [1, 0]]  # [grow, gcol, 2, 2]
        inv_B = (adjoint / reshaped_det).transpose(2, 3, 0,
                                                   1)  # [2, 2, grow, gcol]

    vqstar = reshaped_v - qstar  # [2, grow, gcol]
    reshaped_vqstar = vqstar.reshape(1, 2, grow, gcol)  # [1, 2, grow, gcol]

    # Get final image transfomer -- 3-D array
    temp = np.matmul(reshaped_vqstar.transpose(2, 3, 0, 1),
                     inv_B).reshape(grow, gcol,
                                    2).transpose(2, 0, 1)  # [2, grow, gcol]
    norm_temp = np.linalg.norm(temp, axis=0, keepdims=True)  # [1, grow, gcol]
    norm_vqstar = np.linalg.norm(vqstar, axis=0,
                                 keepdims=True)  # [1, grow, gcol]
    transformers = temp / norm_temp * norm_vqstar + pstar  # [2, grow, gcol]

    # Correct the points where pTwp is singular
    if flag:
        blidx = det == np.inf  # bool index
        transformers[0][blidx] = vx[blidx] + qstar[0][blidx] - pstar[0][blidx]
        transformers[1][blidx] = vy[blidx] + qstar[1][blidx] - pstar[1][blidx]

    # Removed the points outside the border
    transformers[transformers < 0] = 0
    transformers[0][transformers[0] > height - 1] = 0
    transformers[1][transformers[1] > width - 1] = 0

    # Mapping original image
    transformed_image = image[tuple(transformers.astype(
        np.int16))]  # [grow, gcol]

    # Rescale image
    transformed_image = rescale(transformed_image,
                                scale=1.0 / density,
                                mode='reflect')

    return transformed_image
Esempio n. 49
0
def main():
    """
    Reads in the data, NotreDame1

    Command line usage: python main.py -p | --pair <image pair name>

    -p | --pair - flag - required. specifies which image pair to match

    """

    # create the command line parser
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "-p",
        "--pair",
        required=True,
        help=
        "Either notre_dame, mt_rushmore, or e_gaudi. Specifies which image pair to match"
    )

    args = parser.parse_args()

    # (1) Load in the data
    image1_color, image2_color, eval_file = load_data(args.pair)

    # You don't have to work with grayscale images. Matching with color
    # information might be helpful. If you choose to work with RGB images, just
    # comment these two lines

    image1 = rgb2gray(image1_color)
    # Our own rgb2gray coefficients which match Rec.ITU-R BT.601-7 (NTSC) luminance conversion - only mino
    # performance improvements and could be confusing to students image1 = image1[:,:,0] * 0.2989 + image1[:,:,
    # 1] * 0.5870 + image1[:,:,2] * 0.1140
    image2 = rgb2gray(image2_color)
    # image2 = image2[:,:,0] * 0.2989 + image2[:,:,1] * 0.5870 + image2[:,:,2] * 0.1140

    # make images smaller to speed up the algorithm. This parameter
    # gets passed into the evaluation code, so don't resize the images
    # except for changing this parameter - We will evaluate your code using
    # scale_factor = 0.5, so be aware of this
    scale_factor = 0.5

    # Bilinear rescaling
    image1 = np.float32(rescale(image1, scale_factor))
    image2 = np.float32(rescale(image2, scale_factor))

    # width and height of each local feature, in pixels
    feature_width = 16

    # (2) Find distinctive points in each image. See Szeliski 4.1.1
    # !!! You will need to implement get_interest_points. !!!

    # print(image1)

    print("Getting interest points...")

    # (x1, y1) = student.get_interest_points(image1, feature_width)
    # (x2, y2) = student.get_interest_points(image2, feature_width)

    # For development and debugging get_features and match_features, you will likely
    # want to use the ta ground truth points, you can comment out the preceding two
    # lines and uncomment the following line to do this. Note that the ground truth
    # points for mt. rushmore will not produce good results, so you'll have to use
    # your own function for that image pair.

    (x1, y1, x2, y2) = cheat_interest_points(eval_file, scale_factor)

    # if you want to view your corners uncomment these next lines!

    # plt.imshow(image1, cmap="gray")
    # plt.scatter(x1, y1, alpha=0.9, s=3)
    # plt.show()

    # plt.imshow(image2, cmap="gray")
    # plt.scatter(x2, y2, alpha=0.9, s=3)
    # plt.show()

    print("Done!")

    # 3) Create feature vectors at each interest point. Szeliski 4.1.2
    # !!! You will need to implement get_features. !!!

    print("Getting features...")

    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    print("Done!")

    # 4) Match features. Szeliski 4.1.3
    # !!! You will need to implement match_features !!!

    print("Matching features...")

    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    print("Done!")

    # 5) Evaluation and visualization

    # The last thing to do is to check how your code performs on the image pairs
    # we've provided. The evaluate_correspondence function below will print out
    # the accuracy of your feature matching for your 50 most confident matches,
    # 100 most confident matches, and all your matches. It will then visualize
    # the matches by drawing green lines between points for correct matches and
    # red lines for incorrect matches. The visualizer will show the top
    # num_pts_to_visualize most confident matches, so feel free to change the
    # parameter to whatever you like.

    print("Matches: " + str(matches.shape[0]))

    num_pts_to_visualize = 50

    evaluate_correspondence(image1_color, image2_color, eval_file,
                            scale_factor, x1, y1, x2, y2, matches, confidences,
                            num_pts_to_visualize, args.pair + '_matches.jpg')
def process_image(fname):
    img = rescale(imread(fname),
                  SCALE_COEFF,
                  preserve_range=True,
                  mode='reflect')
    return img.astype(DTYPE)
Esempio n. 51
0
path_in = '/home/rendus/modelnet/test/'
path_out = '/home/rendus/modelnet/out/test/'

norm_size = [55.0, 74.0]
input_size = [228.0, 304.0]
mid_size = [228.0 * 2, 304.0 * 2]

#file=files[i]

dir_name, img_name = path.split(file)

#read and resize normal map
normal = io.imread(path_in + 'normal/' + img_name)
#normal_cv = cv2.imread(path_in+'normal/'+img_name)
normal_r = transform.rescale(
    normal, max(mid_size[0] / normal.shape[0], mid_size[1] / normal.shape[1]))
normal_r = central_crop(normal_r, mid_size)
#read and resize depth map
depth = io.imread(path_in + 'depth/' + img_name)
depth_r = transform.rescale(
    depth, max(mid_size[0] / depth.shape[0], mid_size[1] / depth.shape[1]))
depth_r = central_crop(depth_r, mid_size)

#compute line drawing
#normal_r = cv2.bilateralFilter(normal_cv, d=5, sigmaColor=100, sigmaSpace=100)

depth_cont = feature.canny(depth_r,
                           sigma=1,
                           low_threshold=0.05,
                           high_threshold=0.1)
low_thresh = 0.30
Esempio n. 52
0
 vs=np.random.randint(im_height-100,size=n_obj_in_a_scene)  
 us=np.random.randint(im_width-100,size=n_obj_in_a_scene)  
 
 for order in range(n_obj_in_a_scene):
     obj_id = obj_order[order%obj_order.shape[0]]
     idx = model_idx[obj_id,obj_idxes[obj_id][ids[obj_id]]]
     patch = inout.load_im(crop_fns[idx])        
     patch_mask = inout.load_im(crop_masks[idx])        
     ids[obj_id]+=1
     if(ids[obj_id>=model_maxinst[obj_id]]) or ids[obj_id]+1>=len(obj_idxes):
         np.random.shuffle(obj_idxes[obj_id])
         ids[obj_id]=0
         
     r_scale = min(max(0.1,random.gauss(mean_scale,mean_sigma)),1.5)
     r_rotate = random.random()*360-180
     patch = rescale(patch.astype(np.float32)/255,scale=r_scale)
     patch = rotate(patch,angle=r_rotate,resize=True)
     
     patch_mask = rescale(patch_mask.astype(np.float32)/255,scale=r_scale)
     patch_mask = rotate(patch_mask,angle=r_rotate,resize=True)
     patch_mask = patch_mask>0.5
     #random occlusion
     vu_mask = np.where(patch_mask)
     if(len(vu_mask[0]>0)):
         bbox =np.array([ np.min(vu_mask[0]), np.min(vu_mask[1]), np.max(vu_mask[0]),np.max(vu_mask[1])] )
         h =bbox[2]-bbox[0]
         w =bbox[3]-bbox[1]
         h_aug = int( (random.random()*0.5+0.0)*h) 
         w_aug = int( (random.random()*0.5+0.0)*w) 
         bbox_ct_v_t = int((bbox[0]+bbox[2])/2 )
         bbox_ct_u_t = int((bbox[1]+bbox[3])/2 )
Esempio n. 53
0
def mls_affine_deformation_inv(image, p, q, alpha=1.0, density=1.0):
    ''' Affine inverse deformation
    ### Params:
        * image - ndarray: original image
        * p - ndarray: an array with size [n, 2], original control points
        * q - ndarray: an array with size [n, 2], final control points
        * alpha - float: parameter used by weights
        * density - float: density of the grids
    ### Return:
        A deformed image.
    '''
    height = image.shape[0]
    width = image.shape[1]
    # Change (x, y) to (row, col)
    q = q[:, [1, 0]]
    p = p[:, [1, 0]]

    # Make grids on the original image
    gridX = np.linspace(0, width, num=int(width * density), endpoint=False)
    gridY = np.linspace(0, height, num=int(height * density), endpoint=False)
    vy, vx = np.meshgrid(gridX, gridY)
    grow = vx.shape[0]  # grid rows
    gcol = vx.shape[1]  # grid cols
    ctrls = p.shape[0]  # control points

    # Compute
    reshaped_p = p.reshape(ctrls, 2, 1, 1)  # [ctrls, 2, 1, 1]
    reshaped_q = q.reshape((ctrls, 2, 1, 1))  # [ctrls, 2, 1, 1]
    reshaped_v = np.vstack(
        (vx.reshape(1, grow, gcol), vy.reshape(1, grow,
                                               gcol)))  # [2, grow, gcol]

    w = 1.0 / np.sum(
        (reshaped_p - reshaped_v)**2, axis=1)**alpha  # [ctrls, grow, gcol]
    w[w == np.inf] = 2**31 - 1
    pstar = np.sum(w * reshaped_p.transpose(1, 0, 2, 3), axis=1) / np.sum(
        w, axis=0)  # [2, grow, gcol]
    phat = reshaped_p - pstar  # [ctrls, 2, grow, gcol]
    qstar = np.sum(w * reshaped_q.transpose(1, 0, 2, 3), axis=1) / np.sum(
        w, axis=0)  # [2, grow, gcol]
    qhat = reshaped_q - qstar  # [ctrls, 2, grow, gcol]

    reshaped_phat = phat.reshape(ctrls, 2, 1, grow,
                                 gcol)  # [ctrls, 2, 1, grow, gcol]
    reshaped_phat2 = phat.reshape(ctrls, 1, 2, grow,
                                  gcol)  # [ctrls, 2, 1, grow, gcol]
    reshaped_qhat = qhat.reshape(ctrls, 1, 2, grow,
                                 gcol)  # [ctrls, 1, 2, grow, gcol]
    reshaped_w = w.reshape(ctrls, 1, 1, grow,
                           gcol)  # [ctrls, 1, 1, grow, gcol]
    pTwq = np.sum(reshaped_phat * reshaped_w * reshaped_qhat,
                  axis=0)  # [2, 2, grow, gcol]
    try:
        inv_pTwq = np.linalg.inv(pTwq.transpose(2, 3, 0,
                                                1))  # [grow, gcol, 2, 2]
        flag = False
    except np.linalg.linalg.LinAlgError:
        flag = True
        det = np.linalg.det(pTwq.transpose(2, 3, 0, 1))  # [grow, gcol]
        det[det < 1e-8] = np.inf
        reshaped_det = det.reshape(1, 1, grow, gcol)  # [1, 1, grow, gcol]
        adjoint = pTwq[[[1, 0], [1, 0]], [[1, 1],
                                          [0, 0]], :, :]  # [2, 2, grow, gcol]
        adjoint[[0, 1],
                [1, 0], :, :] = -adjoint[[0, 1],
                                         [1, 0], :, :]  # [2, 2, grow, gcol]
        inv_pTwq = (adjoint / reshaped_det).transpose(2, 3, 0,
                                                      1)  # [grow, gcol, 2, 2]
    mul_left = reshaped_v - qstar  # [2, grow, gcol]
    reshaped_mul_left = mul_left.reshape(1, 2, grow, gcol).transpose(
        2, 3, 0, 1)  # [grow, gcol, 1, 2]
    mul_right = np.sum(reshaped_phat * reshaped_w * reshaped_phat2,
                       axis=0)  # [2, 2, grow, gcol]
    reshaped_mul_right = mul_right.transpose(2, 3, 0, 1)  # [grow, gcol, 2, 2]
    temp = np.matmul(np.matmul(reshaped_mul_left, inv_pTwq),
                     reshaped_mul_right)  # [grow, gcol, 1, 2]
    reshaped_temp = temp.reshape(grow, gcol, 2).transpose(2, 0,
                                                          1)  # [2, grow, gcol]

    # Get final image transfomer -- 3-D array
    transformers = reshaped_temp + pstar  # [2, grow, gcol]

    # Correct the points where pTwp is singular
    if flag:
        blidx = det == np.inf  # bool index
        transformers[0][blidx] = vx[blidx] + qstar[0][blidx] - pstar[0][blidx]
        transformers[1][blidx] = vy[blidx] + qstar[1][blidx] - pstar[1][blidx]

    # Removed the points outside the border
    transformers[transformers < 0] = 0
    transformers[0][transformers[0] > height - 1] = 0
    transformers[1][transformers[1] > width - 1] = 0

    # Mapping original image
    transformed_image = image[tuple(transformers.astype(
        np.int16))]  # [grow, gcol]

    # Rescale image
    transformed_image = rescale(transformed_image,
                                scale=1.0 / density,
                                mode='reflect')

    return transformed_image
Esempio n. 54
0
Corner detection
================

Detect corner points using the Harris corner detector and determine the
subpixel position of corners ([1]_, [2]_).

.. [1] https://en.wikipedia.org/wiki/Corner_detection
.. [2] https://en.wikipedia.org/wiki/Interest_point_detection

"""

from matplotlib import pyplot as plt

from skimage import io, color, transform
from skimage.feature import corner_harris, peak_local_max

# load in different images to see where the Harris Corner Detector finds corners
image = transform.rescale(
    color.rgb2gray(
        io.imread("/Users/qingyuan/Desktop/大三下/CV/homework_set_2/data/matching"
                  "/RISHLibrary2.jpg")), 0.25)

harris_response = corner_harris(image)
# Note: Feel free to play with these parameters to investigate their effects
coords = peak_local_max(harris_response, min_distance=5, threshold_rel=0.005)

plt.imshow(image, cmap=plt.cm.gray)
plt.plot(coords[:, 1], coords[:, 0], '+r', markersize=15)
plt.axis((-100, image.shape[1] + 100, image.shape[0] + 100, -100))
plt.show()
# Recover rotation and scaling differences with log-polar transform
# =================================================================
#
# In this second example, the images differ by both rotation and scaling (note
# the axis tick values). By remapping these images into log-polar space,
# we can recover rotation as before, and now also scaling, by phase
# correlation.

# radius must be large enough to capture useful info in larger image
radius = 1500
angle = 53.7
scale = 2.2
image = data.retina()
image = img_as_float(image)
rotated = rotate(image, angle)
rescaled = rescale(rotated, scale, channel_axis=-1)
image_polar = warp_polar(image, radius=radius, scaling='log', channel_axis=-1)
rescaled_polar = warp_polar(rescaled,
                            radius=radius,
                            scaling='log',
                            channel_axis=-1)

fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax = axes.ravel()
ax[0].set_title("Original")
ax[0].imshow(image)
ax[1].set_title("Rotated and Rescaled")
ax[1].imshow(rescaled)
ax[2].set_title("Log-Polar-Transformed Original")
ax[2].imshow(image_polar)
ax[3].set_title("Log-Polar-Transformed Rotated and Rescaled")
Esempio n. 56
0
            feature_vector = np.fromfile(open(os.environ['ROOT_DIR']+grid_fp+fn, 'br'))
            feature_vector = feature_vector.reshape(1, -1)
            xtest = xgb.DMatrix(feature_vector)
            score = bst.predict(xtest, output_margin=True, ntree_limit=bst.best_ntree_limit)
            origin = scoremap[y:y+stride, x:x+stride]
            comp = np.absolute(origin) - np.absolute(score)
            scoremap[y:y+stride, x:x+stride] = origin * (comp > 0) + score * (comp < 0)
        except:
            continue

    scoremap = 1 / (1 + np.exp(-scoremap))
    # scoremap = (scoremap - scoremap.min()) / (scoremap.max() - scoremap.min())
    gray = scoremap * 255
    gray = gray.astype(np.uint8)

    gray32 = rescale(gray, 1. / 32, anti_aliasing=True)
    gray32 = gray32 * 255
    gray32 = gray32.astype(np.uint8)

    if structure in valid_structure.keys():
        polygon = valid_structure[structure]
        rgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
        com = cv2.polylines(rgb.copy(), [polygon.astype(np.int32)], True, [0, 255, 0], 15, lineType=8)

        polygon = polygon / 32
        rgb32 = cv2.cvtColor(gray32, cv2.COLOR_GRAY2RGB)
        com32 = cv2.polylines(rgb32.copy(), [polygon.astype(np.int32)], True, [0, 255, 0], 2, lineType=8)
        filename = subpath + structure + '_' + str(section) + '_contour.tif'
        filename32 = downsubpath + structure + '_' + str(section) + '_contour.tif'
    else:
        com = gray
Esempio n. 57
0
def low_resolusion(image, sz):
    image = rescale(image, np.random.rand() / 2.0 + 0.5)
    image = resize(image, sz)
    return image
def calibrate(depth_data, lidar_scan): # lidar_scan
    global avg_heatmap, i
    rospy.loginfo("IN PROCESSING LOOP")
    intensities = lidar_scan.intensities
    ranges = lidar_scan.ranges

    bridge = CvBridge()
    try:
        depth_image = bridge.imgmsg_to_cv2(depth_data, depth_data.encoding
                ).astype(np.float32)
    except CvBridgeError as e:
        rospy.logerr(e)

    #cv2.imwrite("matching_image.png", depth_image)

    li_in_px = 2520
    #depth_image = cv2.imread(str(depth_image))[:,:,0].astype(np.float32)
    sns.heatmap(depth_image)
    plt.show()
    scan_width = 91

    imY, imX = depth_image.shape[:2] # image resolution
    filtered_image = filters.gaussian_filter(depth_image, 2, 0)
    #sns.heatmap(filtered_image)
    #plt.show()
    
    for m in range(imY):
        filtered_image[m, :] = np.divide(filtered_image[m, :], 
                np.nanmax(filtered_image[m, :]) + np.finfo(float).eps)
    #sns.heatmap(filtered_image, cmap="jet")
    #plt.show()

    #lidar_scan = lidar_scan.reshape(1, imX)
    #avg_lidar_scan = []
    #for l in range(scan_width):
        #avg_lidar_scan.append(np.mean(lidar_scan[0][(l*7):(l*7+7)]))

    #np.savetxt("ranges.txt", lidar_scan.ranges, delimiter=",")
    #np.savetxt("intensities.txt", lidar_scan.intensities, delimiter=",")
    franges = filters.gaussian_filter(ranges, 2, 0)
    fintensities = filters.gaussian_filter(intensities, 2, 0)
    scaled_ranges = franges * fintensities
    intenWrap = np.append(scaled_ranges, scaled_ranges)
    print(intenWrap.shape)
    scaled_lidar = rescale(np.array(scaled_ranges), (imX)/(scan_width), 
            multichannel=False, anti_aliasing=True, mode='constant')
    for n in range(li_in_px - len(scaled_lidar)):
       scaled_lidar = np.append(scaled_lidar, [0.0])
    scaled_lidar = scaled_lidar.reshape(1,li_in_px)
    scaled_lidar = filters.gaussian_filter(scaled_lidar, 2, 0)

    #full_scan = np.concatenate((np.random.rand(1,imX),
        #normalized_lidar, np.random.rand(1,imX)), axis=1)

    result = np.zeros((scaled_lidar.shape[1] - imX, imY))
    for j in range(scaled_lidar.shape[1] - imX):
        for k in range(imY):
            narrow_scan = scaled_lidar[0][j:imX+j].astype(np.float32)
            normalized_scan = narrow_scan/np.max(narrow_scan)
            filtered_img_row = filtered_image[k, :].astype(np.float32)
            result[j][k] = np.nansum((normalized_scan - filtered_img_row)**2, axis=0)

    avg_heatmap = (avg_heatmap + result) / 2.0
    i += 1
    rospy.loginfo("Iteration %s finished." % i)
Esempio n. 59
0
well = "B02"
for image_name in image_names:
    if well in image_name:
        list_slice.append(image_name)
    else:
        continue
len(list_slice)
# img_montage = make_montage(image_name=list_slice, row_num=3, col_num=3, image_size=512)


pixels = tfi.imread(list_slice[unique_rand(1, len(list_slice), 1)[0]])
pixels_float = pixels.astype('float64')
pixels_float = pixels_float / 65535.000
ch = pixels_float[:,:,0]

ch_r = rescale(ch,0.25,0.25)



img = np.array(img)
ch = img[0,:,:]
img  = skimage.transform.rescale(ch,[512,512], anti_aliasing=False)


img_montage = make_montage(image_name=image_name, row_num=3, col_num=3, image_size=401)

# PIL_image = Image.fromarray(np.uint8(img_montage))
# PIL_image.show()

# save montages to temp
for i in range(5):
Esempio n. 60
0
# y scale factor
if l_y > y_max:
    y_sf = y_max / l_y
else:
    y_sf = 1

# determine worst case scale factor
if x_sf < y_sf:
    sf = x_sf
else:
    sf = y_sf

# Scale image
if sf < 1:
    input_image = img_as_ubyte(rescale(input_image, sf, anti_aliasing=True, multichannel=True))
else:
    input_image = img_as_ubyte(input_image)

# get new x/y size
l_x, l_y = input_image.shape[0], input_image.shape[1]

# Convert image matrix to excel matrix
wb = Workbook()
ws = wb.active
ws.title = "converted_image"

set_col_height = False

# Output excel workbook containing cell pixelated image
for row in range(0, l_x):