Example #1
0
def loadImage(path, basename='', color='rgb'):
  ''' Load JPEG image from file, 

      Returns
      --------
      IM : 2D or 3D array, size H x W x nColors
           dtype will be float64, with each pixel in range (0,1)
  '''
  path = str(path)
  if len(basename) > 0:
    path = os.path.join(path, basename)
  if color == 'gray' or color == 'grey':
    IM = imread(path, as_grey=True)
    assert IM.ndim == 2
  else:
    IM = imread(path, as_grey=False)
    if not IM.ndim == 3:
      raise ValueError('Color image not available.')

  if IM.dtype == np.float:
    MaxVal = 1.0
  elif IM.dtype == np.uint8:
    MaxVal = 255
  else:
    raise ValueError("Unrecognized dtype: %s" % (IM.dtype))
  assert IM.min() >= 0.0
  assert IM.max() <= MaxVal

  IM = np.asarray(IM, dtype=np.float64)
  if MaxVal > 1:
    IM /= MaxVal
  return IM
Example #2
0
File: na3.py Project: ja999/sem5
def main():
  imgs = MultiImage(data_dir + '/multipage.tif')

  for a, i in zip(range(0, 4), [1, 9, 7, 8]):
    fig = plt.figure()
    ax = fig.add_axes([-0.1, -0.1, 1.2, 1.2])
    # ax.set_axis_off()
    im = data.imread('samolot0' + str(i) + '.jpg', as_grey = True)
    im = invert(im)
    im = process(im)
    out = np.ones_like(im)
    io.imshow(out)
    contours = measure.find_contours(im, 0.9)
    for n, contour in enumerate(contours):
      plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color = 'white')
    plt.savefig(str(a) + '.jpg', bbox_inches = 0, frameon = False)

  fig = plt.figure()
  grid = AxesGrid(fig, rect = (1, 1, 1), nrows_ncols = (2, 2), axes_pad = 0.1)

  for i in range(0, 4):
    frame = data.imread(str(i) + '.jpg')
    grid[i].imshow(frame)
    grid[i].set_xticks([])
    grid[i].set_yticks([])

  plt.savefig('na3.jpg')
Example #3
0
def load(pos_dir, neg_dir, hard_examples=[]):
    gc.disable()
    images, labels = [], []
    files = listdir(pos_dir)
    for i, f in enumerate(files):
        if not is_image(f):
            continue
        if "hard_example" in f and (
            hard_examples is None or not any([f.startswith(prefix) for prefix in hard_examples])
        ):
            continue
        if i > 0 and i % 1000 == 0:
            print "loaded {0}/{1} positive".format(i, len(files))
        images.append(data.imread(join(pos_dir, f))[:, :, :3])
        labels.append(1)
    files = listdir(neg_dir)
    for i, f in enumerate(files):
        if not is_image(f):
            continue
        if "hard_example" in f and (
            hard_examples is None or not any([f.startswith(prefix) for prefix in hard_examples])
        ):
            continue
        if i > 0 and i % 1000 == 0:
            print "loaded {0}/{1} negative".format(i, len(files))
        images.append(data.imread(join(neg_dir, f))[:, :, :3])
        labels.append(0)
    gc.enable()
    return images, labels
def read_files(dir, dataset):
    """
    yield data_type(train? val? test?), numpy.ndarray('uint8')
    """
    dir_path = os.path.join(dir,dataset)
    if dataset=='train':
        for(root, dirs, files) in os.walk(dir_path):
            for file in files:
                if not '.txt' in file:
                    label = file.split("_")[0]
                    img_filepath = os.path.join(root,file)
                    yield label, data.imread(img_filepath)
    elif dataset=='val':
        for(root, dirs, files) in os.walk(dir_path):
            for file in files:
                if '.txt' in file:
                    # this is val_annotaions.txt
                    f = open(os.path.join(root,file), 'r')
                    while 1:
                        line = f.readline()
                        if not line: break
                        line_seg = line.split()
                        img_filepath = os.path.join(root,'images',line_seg[0])
                        label = line_seg[1]
                        yield label, data.imread(img_filepath)
                    f.close()
def main():
    for file_path in glob.glob("/home/lucas/Downloads/Lucas/GSK 10uM/*.JPG"):

        img = data.imread(file_path, as_grey=True)

        img = transform.resize(img, [600, 600])
        img_color = transform.resize(data.imread(file_path), [600, 600])

        img[img >img.mean()-0.1] = 0

        # io.imshow(img)
        # io.show()
        #
        edges = canny(img)
        bordas_fechadas = closing(img > 0.1, square(15)) # fechando gaps
        fill_cells = ndi.binary_fill_holes(bordas_fechadas)
        # io.imshow(fill_cells)
        # io.show()
        img_label = label(fill_cells, background=0)
        n= 0
        for  x in regionprops(img_label):
            if x.area < 2000 and x.area > 300:
                n +=1
                print x.area
                minr, minc, maxr, maxc = x.bbox
                try:
                    out_path_name = file_path.split("/")[-1].rstrip(".JPG")
                    io.imsave("out/cell_{}_pic_{}_area_{}.png".format(n, out_path_name, str(round(x.area))),img_color[minr-3: maxr+3, minc-3: maxc+3])
                    #io.show()
                except:
                    pass
Example #6
0
File: Zad_2.py Project: gracz21/KCK
def main():
    plt.figure(figsize=(25, 24))
    planes = ['samolot00.jpg', 'samolot01.jpg', 'samolot03.jpg', 'samolot04.jpg', 'samolot05.jpg','samolot07.jpg',
              'samolot08.jpg', 'samolot09.jpg', 'samolot10.jpg', 'samolot11.jpg', 'samolot12.jpg', 'samolot13.jpg',
              'samolot14.jpg', 'samolot15.jpg', 'samolot16.jpg', 'samolot17.jpg', 'samolot18.jpg', 'samolot20.jpg']
    i = 1
    for file in planes:
        img = data.imread(file, as_grey=True)
        img2 = data.imread(file)
        ax = plt.subplot(6, 3, i)
        ax.axis('off')
        img **= 0.4
        img = filter.canny(img, sigma=3.0)
        img = morphology.dilation(img, morphology.disk(4))
        img = ndimage.binary_fill_holes(img)
        img = morphology.remove_small_objects(img, 1000)
        contours = measure.find_contours(img, 0.8)
        ax.imshow(img2, aspect='auto')
        for n, contour in enumerate(contours):
            ax.plot(contour[:, 1], contour[:, 0], linewidth=1.5)
            center = (sum(contour[:, 1])/len(contour[:, 1]), sum(contour[:, 0])/len(contour[:, 0]))
            ax.scatter(center[0], center[1], color='white')
        i += 1

    plt.savefig('zad2.pdf')
Example #7
0
def Comparer_image_plot(NumImg):
    
    liste1 = os.listdir("images")
    for i in liste1:
        if str(NumImg) in i:
            ImgPolyp = i
            break
    liste2 = os.listdir("graph_images12")
    for j in liste2:
        if str(NumImg) in j:
            PlotPolyp = j
            break
    
    polDia = data.imread("images\\"+ ImgPolyp)
    
    polDia_plot = data.imread("graph_images12\\"+ PlotPolyp)
    
    plt.subplot(1,2,1)
    plt.title("Image initiale:" + ImgPolyp)
    plt.imshow(polDia)
    
    plt.subplot(1,2,2)
    plt.title("Graph image apres analyse:"+ PlotPolyp)
    plt.imshow(polDia_plot)
    plt.show()
Example #8
0
def read_image(name, size=None, debug=False):
    """ read image and segmentation, returns RGB + alpha composite """
    image = imread(name) / 255.

    if image.shape[2] == 4:
        alpha = image[...,3]
        image = image[...,:3]
    else:
        segmentation_name = os.path.splitext(name)[0][:-6] + '-label.png'
        segmentation = imread(segmentation_name)
        alpha = np.logical_or(segmentation[...,0], segmentation[...,1]) * 1.

    if size is not None:
        scale_x = float(size[0]) / image.shape[1]
        scale_y = float(size[1]) / image.shape[0]
        scale = min(scale_x, scale_y)

        if debug:
            print name, size[0], size[1], image.shape[1], image.shape[0], scale, image.shape[1]*scale, image.shape[0]*scale

        if scale > 1.0:
            print 'Image %s smaller than requested size' % name

        if scale != 1.0:
            image = rescale(image, scale, order=3)
            alpha = rescale(alpha, scale, order=0)

    return np.dstack((image, alpha))
def process():
    data = {
            0: 0,
            1: 0,
            2: 0,
            3: 0,
            4: 0,
            5: 0,
            6: 0,
            7: 0,
            8: 0,
            }
    sobel_v_image = imread('sobel_v2.jpg')
    sobel_h_image = imread('sobel_h2.jpg')
    canny_image = imread('canny2.jpg')
    row, col = canny_image.shape
    for r in xrange(row):
        for c in xrange(col):
            if sobel_v_image[r][c] < 0 or sobel_h_image[r][c] < 0:
                print sobel_v_image[r][c], sobel_h_image[r][c]
            if canny_image[r][c] != 255:
                continue
            interval = which_interval(sobel_v_image[r][c], sobel_h_image[r][c])
            data[interval] += 1

    print data
Example #10
0
def xformImage(filename, doPlot=False):
    
    #config area
    
    sigma=4
    mean_multiplier=2
    
    #end config area
    
#     img_gray = misc.imread(filename, True)
    img_gray = imread(filename, as_grey=True)
    img_color = imread(filename)
#     print(img_color.shape)
#     img_gray = resize(skimage.img_as_float(img_gray), (400,400))
    
    img_color_masked = copy.deepcopy(img_color)
#     img_color_masked = resize(img_color_masked, (400,400))
#     img_color_resized = resize(img_color, (200,200))
#     img_color = misc.imread(filename, False)
    
    img_gray = ndimage.gaussian_filter(img_gray, sigma=sigma)
    
    m,n = img_gray.shape
    
#     sx = ndimage.sobel(img_gray, axis=0, mode='constant')
#     sy = ndimage.sobel(img_gray, axis=1, mode='constant')
#     sob = np.hypot(sx, sy)
    
    mask = (img_gray > img_gray.mean()*mean_multiplier).astype(np.float)
    
    labels = morphology.label(mask)
    
    center_label = labels[m/2, m/2]
    
    labels[labels != center_label] = 0
    labels[labels == center_label] = 1
    
#     img_test = copy.deepcopy(img_color)
    
#     img_test[ labels == 0, : ] = 0
#     sob [ labels == 0] = 0
    img_color_masked [labels == 0, :] = 0
#     img_test = ndimage.gaussian_filter(img_test, 3)
    if doPlot:
        f, (ax_gray, ax_color, ax_sob) = plt.subplots(ncols=3)
        ax_gray.imshow(img_color_masked, cmap=plt.cm.get_cmap('gray'))
        ax_gray.axis('off')
#         ax_sob.imshow(sob, cmap=plt.cm.get_cmap('gray'))
        ax_sob.axis('off')
        ax_color.imshow(img_color, cmap=plt.cm.get_cmap('gray'))
        ax_color.axis('off')
        plt.show()    
    
    
    return np.reshape(img_color_masked, -1)
Example #11
0
    def load_image(path, as_grey=False, to_float=True):

        if DEBUG:
            im = imread(path, as_grey)
            im = (im - np.amin(im) * 1.0) / (np.amax(im) - np.amin(im))
            return im

        # Load image
        image = imread(path, as_grey)

        if to_float:
            # Convert to floating point matrix
            image = image.astype(np.float32)

        return image
Example #12
0
def pestFeatureExtraction(filename):
	selem = disk(8)
	image = data.imread(filename,as_grey=True)
	thresh = threshold_otsu(image)
	elevation_map = sobel(image)
	markers = np.zeros_like(image)

	if ((image<thresh).sum() > (image>thresh).sum()):
		markers[image < thresh] = 1
		markers[image > thresh] = 2
	else:
		markers[image < thresh] = 2
		markers[image > thresh] = 1

	segmentation = morphology.watershed(elevation_map, markers)
	segmentation = dilation(segmentation-1, selem)
	segmentation = ndimage.binary_fill_holes(segmentation)

	segmentation = np.logical_not(segmentation)
	image[segmentation]=0;

	hist = np.histogram(image.ravel(),256,[0,1])

	hist = list(hist[0])
	hist[:] = [float(x) / (sum(hist) - hist[0]) for x in hist]
	hist.pop(0)

	features = np.empty( (1, len(hist)), 'float' )
	
	a = np.array(list(hist))
	f = a.astype('float')
	features[0,:]=f[:]

	return features
Example #13
0
def test_iterations(n_steps, l, n_detectors, filename, labelname, filtr):
    image = data.imread("os.png", as_grey=True)
    image /= np.max(image)
    per_steps = np.arange(0, 361, 20)
    per_steps[len(per_steps) - 1] = 360
    result = np.zeros(len(per_steps))
    for i, j in enumerate(per_steps):
        if j == 0:
            j += 1
        sin = createSinogram(image, j, l, n_detectors)
        sin2 = sin.copy()
        sin2 /= np.max(sin2)
        if filtr == True:
            sin2 = sinogram_filter(sin2)
            invImage = createImage(sin2, j, l, n_detectors, 200)
        else:
            invImage = createImage(sin2, j, l, n_detectors, 200)

        cpy = invImage.copy()
        if filtr == True:
            for x in range(len(cpy)):
                for y in range(len(cpy[x])):
                    if cpy[x][y] != 0:
                        cpy[x][y] = abs(cpy[x][y])
        cpy /= np.max(cpy)
        result[i] = ret_dif_bet_2_inputs(image, cpy)

        savetest(per_steps, result, filename, labelname)
Example #14
0
    def loadFile(self):
        self.path = filedialog.askopenfilename()
        if self.path == () or self.path == "":
            self.path = ""
            return
        self.LoadLabel.config(text=self.path)
        fileExtension = self.path.split(".")
        if len(fileExtension) < 2 or not fileExtension[1] == "dcm":
            newImage = data.imread(self.path, as_grey=True)
            self.images.changePlot((2, 3, 1), newImage)
            return

        lastNamez, firstNamez, id, birthdayz, sexz, datez, timez, newImage = readDicomData(
            self.path)
        self.images.changePlot((2, 3, 1), newImage)
        self.lastNameDicom.delete("1.0", tk.END)
        self.lastNameDicom.insert("1.0", lastNamez)
        self.NameDicom.delete("1.0", tk.END)
        self.NameDicom.insert("1.0", firstNamez)
        self.idDicom.delete("1.0", tk.END)
        self.idDicom.insert("1.0", id)
        self.dateDicom.config(text=datez)
        self.timeDicom.config(text=timez)
        self.birthh.delete("1.0", tk.END)
        self.birthh.insert("1.0", birthdayz)
        sex = sexz.upper()
        if sex == "M":
            self.sexx.current(0)
        elif sex == "F":
            self.sexx.current(1)
def main():
    inData = data.imread("resources/in.png", as_grey=True)

    plt.subplot(2, 3, 1)
    plt.title("Original image")
    plt.imshow(inData, cmap='gray')

    plt.subplot(2, 3, 2)
    plt.xlabel("Emiter/detector rotation")
    plt.ylabel("Number of receiver")
    plt.title("Radon transform image")

    radonImage = radonTransform(inData,
                                stepSize=step,
                                detectorsNumber=detectorsNumber,
                                detectorsWidth=detectorWidth)
    plt.imshow(radonImage,
               cmap='gray',
               extent=[0, 180, len(radonImage), 0],
               interpolation=None)
    inverseRadonImage = inverseRadonTransform(radonImage,
                                              stepSize=step,
                                              detectorsWidth=detectorWidth)
    saveDicomFile("out.dcm", "Andrzej", "1234", "male", "20070304",
                  inverseRadonImage)
    #write_dicom(inverseRadonImage,'pretty.dcm')
    plt.subplot(2, 3, 4)
    plt.title("Inverse Radon transform image")
    plt.imshow(inverseRadonImage, cmap='gray')

    plt.show()
Example #16
0
def segment_slic(path, n_segments=N_SEGMENTS):
    img = img_as_float(imread(path))
    segments = slic(img, n_segments=n_segments, compactness=10, sigma=1)
    # segments = watershed(img, markers=250, compactness=0.1)
    # segments = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
    # segments = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
    return img, segments
def main(argv):

    # lendo imagem
    image1 = img_as_float(data.imread("data/" + argv[0]))

    channels = 'gray'
    # verificando número de canais
    if len(image1.shape) == 3:
        channels = None

    # imagem borrada com filtro gaussiano
    img_borrada = filters.gaussian_filter(image1, sigma=3.0)

    # diferença da imagem original com a borrada
    g_mascara = np.subtract(image1, img_borrada)

    # high boost
    g_image = np.add(np.multiply(2, g_mascara), image1)

    # salvando imagem
    plt.imsave(argv[1], g_image, cmap=channels)  #saving

    # plotagem
    plt.figure().suptitle("filtro da mediana")
    plt.subplot(1, 2, 2)
    plt.imshow(g_image, cmap=channels)
    plt.title('Depois')
    plt.axis('off')
    # Divide a área de plotagem: 1 linha e 3 colunas.
    plt.subplot(1, 2, 1)  # A área ativa é a 1.
    plt.imshow(image1, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
Example #18
0
    def __init__(self, im_path):
        """Accepts the path to a tif or tiff file,
           then attempts to output a numpy uint8 array
           that has the shape (z, y, x, color)"""
        if not path.isfile(im_path):
            raise IOError("File does not exist")
        if not im_path.lower().endswith(('.tif', '.tiff')):
            raise IOError("File type incorrect")

        self.path = im_path
        self.im = data.imread(im_path)
        self.im_shape = self.im.shape
        self.new_im = self.im
        color_axis = np.argmin(self.im_shape)
        if color_axis == 1:
            self.new_im = []
            print("Restacking array... current size: {}".format(str(self.im_shape)))
            for z_slice in range(self.im_shape[0]):
                for channel in range(self.im_shape[color_axis]):
                    s_data = self.im[z_slice]
                    self.new_im.append(np.dstack((s_data[0], s_data[1], s_data[2])))
            self.new_im = np.array(self.new_im, dtype="uint8")
            print("Restack completed. current size: {}".format(str(self.new_im.shape)))
        if color_axis != 1 and color_axis != 3:
            raise ValueError("File format not recognized")
Example #19
0
def getseg(gt_prefix, gt_ext, imname, fg_cat):
    path_name, ext = os.path.splitext(imname)
    gt_name = os.path.join(gt_prefix, '%s_gt.%s'%(path_name,gt_ext))
    #pdb.set_trace()
    seg = imread(gt_name)
    seg = [seg == fg_cat]
    return seg
Example #20
0
def images_loader_mini(input_path, scale):
    hr_images_train = []
    lr_images_train = []
    hr_images_test = []
    lr_images_test = []
    count = 0
    file_names = \
        [input_path + f for f in listdir(input_path) if (isfile(join(input_path, f)) and not f.startswith('.'))]
    print('Loading images')
    for file in tqdm(file_names):
        image = data.imread(file)
        image = set_image_alignment(image, scale)
        image_batch = get_split_images(image)
        image = resize_image(image, 1 / scale)
        image = set_image_alignment(image, scale)
        input_image = resize_image(image, 1 / scale)
        hr_images_test.append(image)
        lr_images_test.append(input_image)
        for i in range(0, image_batch.shape[0]):
            for j in range(0, image_batch.shape[1]):
                input_image = resize_image(image_batch[i][j], 1 / scale)
                hr_images_train.append(image_batch[i][j])
                lr_images_train.append(input_image)
                count += 1
    print(str(count) + ' images loaded')
    return hr_images_test, lr_images_test, hr_images_train, lr_images_train
def main(argv):
    # lendo imagem
    image1 = img_as_float(data.imread("data/" + argv[0]))
    channels = 'gray'
    
    # verificando número de canais
    if len(image1.shape) == 3:
        channels = None
    
    # alargamento de contraste
    final = exposure.rescale_intensity(image1)
    
    # salvando imagem
    plt.imsave(argv[1], final, cmap=channels)
    
    # plotagem
    plt.figure().suptitle("Transformação negativa")
    plt.subplot(1,2,2) 
    plt.imshow(final, cmap=channels)
    plt.title('Depois')
    plt.axis('off')
    # Divide a área de plotagem: 1 linha e 3 colunas.
    plt.subplot(1,2,1) # A área ativa é a 1.
    plt.imshow(image1, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
Example #22
0
def load_data(data_directory):
    ## Read all subdirectories in the data directory
    directories = [
        d for d in os.listdir(data_directory)
        if os.path.isdir(os.path.join(data_directory, d))
    ]

    ## Initialize image and label arrays
    images = []
    labels = []

    ## For every subdirectory in the data directory,
    ## read all jpg images name and append it to the images array,
    ## and append subdirecoty name to the labels array.
    for d in directories:
        plant_directory = os.path.join(data_directory, d)
        file_names = [
            os.path.join(plant_directory, filename)
            for filename in os.listdir(plant_directory)
            if filename.endswith(".jpg")
        ]

        for filename in file_names:
            images.append(data.imread(filename))
            labels.append(int(d))

    return images, labels
Example #23
0
def  WeinerFilter():
  e1 = cv2.getTickCount()
  astro = color.rgb2gray(data.imread(imagePath))
  psf = np.ones((5, 5)) / 25
  astro = conv2(astro, psf, 'same')
  astro += 0.1 * astro.std() * np.random.standard_normal(astro.shape)

  deconvolved, _ = restoration.unsupervised_wiener(astro, psf)

  fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})

  plt.gray()

  ax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())
  ax[0].axis('off')
  ax[0].set_title('Original Picture')

  ax[1].imshow(deconvolved)
  ax[1].axis('off')
  ax[1].set_title('Self tuned restoration')

  fig.subplots_adjust(wspace=0.02, hspace=0.2,
                      top=0.9, bottom=0.05, left=0, right=1)
  e2 = cv2.getTickCount()
  time = (e2 - e1)/ cv2.getTickFrequency() + (numberThreads) + numberThreadsPerCore + numberCores
  msgbox(msg= str(time) +" seconds", title="Execution Time", ok_button="OK")
  plt.show()
Example #24
0
 def parse(train_gt, train_img_dir, info=False, fast_train=False):
     from skimage.data import imread
     from scipy.ndimage import zoom
     if fast_train:
         train_X = np.zeros((500, im_size, im_size, 1))
         train_Y = np.zeros((500, coords_size))
     else:
         train_X = np.zeros((len(train_gt), im_size, im_size, 1))
         train_Y = np.zeros((len(train_gt), coords_size))
     for i, img_name in enumerate(train_gt):
         if i == 500 and fast_train:
             break
         img = imread(train_img_dir+'/'+img_name, as_grey=True)
         train_Y[i] = train_gt[img_name]
         for j in range(1, coords_size, 2):
             train_Y[i][j] *= im_size/img.shape[0]
         for j in range(0, coords_size, 2):
             train_Y[i][j] *= im_size/img.shape[1]
         img = zoom(img, [im_size/img.shape[0], im_size/img.shape[1]])
         img = (img / 255)
         train_X[i,:,:,0] = img
         del(img)
         if info and (i+1)%100 == 0:
             print('Image: ', i+1, end='\r')
     return train_X, train_Y
Example #25
0
def detect(model, test_img_dir):
    from os import listdir
    from skimage.data import imread
    from scipy.ndimage import zoom
    img_list = listdir(test_img_dir)
    data = np.zeros((100, im_size, im_size, 1))
    sizes = []
    k = 0
    ans = {}
    for i, img_name in enumerate(img_list):
        img = imread(test_img_dir+'/'+img_name, as_grey=True)
        sizes.append([img_name, img.shape])
        img = zoom(img, [im_size/img.shape[0], im_size/img.shape[1]])
        img = (img / 255)
        data[k,:,:,0] = img
        k += 1
        del(img)
        if (i+1)%100 == 0:
            print((i+1), ' images')
            points = model.predict(data, verbose=1)
            for i in range(len(points)):
                points[i][1::2] *= sizes[i][1][0]/im_size
                points[i][::2] *= sizes[i][1][1]/im_size
                ans[sizes[i][0]] = points[i]
            sizes = []
            k = 0
            data = np.zeros((100, im_size, im_size, 1))
    if k != 0:
        data = data[:k,:,:,:]
        points = model.predict(data, verbose=1)
        for i in range(len(points)):
            points[i][1::2] *= sizes[i][1][0]/im_size
            points[i][::2] *= sizes[i][1][1]/im_size
            ans[sizes[i][0]] = points[i]
    return ans
def process(q, iolock):
    global product_count
    global category_count
    global picture_count
    while True:
        d = q.get()
        if d is None:
            break

        product_count += 1
        product_id = str(d['_id'])
        category_id = str(d['category_id'])

        category_dir = os.path.join(images_dir, category_id)
        if not os.path.exists(category_dir):
            category_count += 1
            try:
                os.makedirs(category_dir)
            except:
                pass

        for e, pic in enumerate(d['imgs']):
            picture_count += 1
            picture = imread(io.BytesIO(pic['picture']))
            picture_file = os.path.join(category_dir,
                                        product_id + '_' + str(e) + '.jpg')
            if not os.path.isfile(picture_file):
                plt.imsave(picture_file, picture)
def main(argv):
    # lendo imagem
    image1 = data.imread("data/" + argv[0])

    # plotando histograma
    plt.hist((image1).flatten(), density=True, color='purple', bins=25)
    plt.show()
Example #28
0
def blob_counter(img, min_sigma=2, max_sigma=8, treshold=0.0001, overlap=0.6):
    img = imread(img, as_grey=True)
    bw = img.mean(axis=2)
    blobs_dog = [(x[0], x[1], x[2]) for x in feature.blob_dog(
        -bw, min_sigma=2, max_sigma=8, threshold=0.0001, overlap=0.6)]
    blobs_dog = set(blobs_dog)
    return str(len(blobs_dog))
Example #29
0
File: image.py Project: gracz21/KCK
def load_scenes(filename):
    zipped_scenes = []
    print 'Working on: ' + filename
    img = data.imread('scenes/' + filename, as_grey=True)
    tmp = img
    tmp = filter.canny(tmp, sigma=2.0)
    tmp = ndimage.binary_fill_holes(tmp)
    #tmp = morphology.dilation(tmp, morphology.disk(2))
    tmp = morphology.remove_small_objects(tmp, 2000)
    contours = measure.find_contours(tmp, 0.8)
    ymin, xmin = contours[0].min(axis=0)
    ymax, xmax = contours[0].max(axis=0)
    if xmax - xmin > ymax - ymin:
        xdest = 1000
        ydest = 670
    else:
        xdest = 670
        ydest = 1000
    src = np.array(((0, 0), (0, ydest), (xdest, ydest), (xdest, 0)))
    dst = np.array(((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)))
    tform3 = tf.ProjectiveTransform()
    tform3.estimate(src, dst)
    warped = tf.warp(img, tform3, output_shape=(ydest, xdest))
    tmp = filter.canny(warped, sigma=2.0)
    tmp = morphology.dilation(tmp, morphology.disk(2))
    descriptor_extractor.detect_and_extract(tmp)
    obj_key = descriptor_extractor.keypoints
    scen_desc = descriptor_extractor.descriptors
    zipped_scenes.append([warped, scen_desc, obj_key, filename])
    return zipped_scenes
Example #30
0
def read_image(image_path, image_shape=None, preserve_range=True):
    image = imread(image_path)
    if image_shape:
        image = resize(image, image_shape, preserve_range=preserve_range)
    image = image[None]

    return image
def show_train_data():
    """
    Plots original image, mask and image + mask
    """
    masks = train
    for ImageId in os.listdir('train_positives/dev/class1'):
        img = imread('train_positives/dev/class1/' + ImageId)
        img_masks = masks.loc[masks['ImageId'] == ImageId,
                              'EncodedPixels'].tolist()
        # Take the individual ship masks and create a single mask array for all ships
        if type(img_masks[0]) == str:
            all_masks = np.zeros((768, 768))
            for mask in img_masks:
                all_masks += rle_decode(mask)

            fig, axarr = plt.subplots(1, 3, figsize=(15, 40))
            axarr[0].axis('off')
            axarr[1].axis('off')
            axarr[2].axis('off')
            axarr[0].imshow(img)
            axarr[1].imshow(all_masks)
            axarr[2].imshow(img)
            axarr[2].imshow(all_masks, alpha=0.4)
            plt.tight_layout(h_pad=0.1, w_pad=0.1)
            #savefig(ImageId)
            plt.show()
        else:
            print('NaN')
def main():
    def shutdown(signal, widget):
        exit()
    signal.signal(signal.SIGINT, shutdown)
    app = QtGui.QApplication(sys.argv)

    timer = QtCore.QTimer()
    timer.start(500)
    timer.timeout.connect(lambda: None)

    loaded = lh.load_files(str(DATA_PATH), filter_unfinished=False)
    for l in loaded:
        jpg_io = io.BytesIO(l['jpg_image'])
        l['image'] = imread(jpg_io)
        l['save_path'] = os.path.join(DATA_PATH, l['filename'])
        if 'edited' not in l:
            l['edited'] = False

    dh.triangulation(
        loaded,
        do_triangulation=tr.triangulation,
        do_import=partial(dh.import_bone, SEGMENTATION_METHODS)
    )

    sys.exit(app.exec_())
Example #33
0
def show_result(eye, manual_segmentation):
    plt.imshow(data.imread(sys.argv[1]), cmap=plt.gray())
    plt.show()
    plt.imshow(eye, cmap=plt.gray())
    plt.show()
    plt.imshow(manual_segmentation, cmap=plt.gray())
    plt.show()
def ucf101():
    data_path = '/home/sl/Resource/UCF101/'
    store_path = './list/UCF101/'
    random_num = 4

    # make train.txt and val.txt
    data_dirs = sorted(os.listdir(data_path))
    train_list = []
    val_list = []
    label = -1
    count_for_train = 0
    count_for_val = 0
    mean_list = []
    label_list = []

    for data_dir in data_dirs:
        video_path = os.path.join(data_path, data_dir)
        video_dirs = sorted(os.listdir(video_path))
        label += 1
        label_list.append(data_dir)
        for video_dir in video_dirs:
            print("now for data dir %s\n" % video_dir)
            img_path = os.path.join(video_path, video_dir)
            if not random.randint(0, random_num) == random_num:
                train_list.append((img_path, label))
                count_for_train += 1
            else:
                val_list.append((img_path, label))
                count_for_val += 1
            # calculate mean of img
            imgs = sorted(os.listdir(img_path))
            for img in imgs:
                pic_path = os.path.join(img_path, img)
                pic = ski.imread(pic_path)
                mean_list.append(np.mean(pic, (0, 1)))

    os.chdir(store_path)
    train_file = open('train.txt', 'w')
    for item in train_list:
        train_file.write("%s %s \n" % (item[0], item[1]))
    train_file.close()

    val_file = open('val.txt', 'w')
    for item in val_list:
        val_file.write("%s %s \n" % (item[0], item[1]))
    val_file.close()

    label_file = open('label.txt', 'w')
    for item in label_list:
        label_file.write("%s \n" % item)
    label_file.close()

    mean = np.mean(np.array(mean_list), 0)
    others_file = open('others.txt', 'w')
    others_file.write("mean : %.2f %.2f %.2f\n" % (mean[0], mean[1], mean[2]))
    others_file.write("train num: %d\n" % count_for_train)
    others_file.write("val num: %d\n" % count_for_val)
    print("train num: ", count_for_train)
    print("val num: ", count_for_val)
    print("mean val: ", mean)
Example #35
0
def find_alpha(base_img, img, model_robust):
    # what type of interpolation
    # 0: nearest-neighbor
    # 1: bi-linear
    warp_order = 1

    output_shape, corner_min = find_output_shape(base_img, model_robust, channel)
    #print("output_shape", output_shape, corner_min)
    #print(model_robust.scale, model_robust.translation, model_robust.rotation)

    # This in-plane offset is the only necessary transformation for the base image
    offset = SimilarityTransform(translation= -corner_min)
    base_warped = warp(base_img[:,:,channel], offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    base_color = warp(base_img, offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    # warp image corners to new position in mosaic
    transform = (model_robust + offset).inverse

    #img_warped = warp(img[:,:,channel], transform, order=warp_order,
    #                  output_shape=output_shape, cval=-1)
    img_color = warp(img, transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    #base_mask = (base_warped != -1)
    #base_warped[~base_mask] = 0

    img_mask = (img_warped != -1)
    #img_warped[~img_mask] = 0

    #convert to rgb
    base_alpha = add_alpha(base_color, base_mask)
    img_alpha = np.dstack((img_color, img_mask))
    #base_alpha = np.dstack((base_color, base_mask))

    #plt.imsave(tmp_base, base_alpha )
    #plt.imsave(tmp_img, img_alpha )
    #cmd = [path_to_enblend, tmp_base, tmp_img, '-o', tmp_out]

    #p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    #output, err = p.communicate(b"input data that is passed to subprocess' stdin")
    #rc = p.returncode
    # remove alpha channel

    if os.path.exists(tmp_out):
        out = imread(tmp_out)[:,:,:3]
    else:
        print("couldnt find out image")
        print(rc, output, err)
        plt.figure()
        plt.imshow(base_alpha)
        plt.figure()#

        plt.imshow(img_alpha)
        plt.show()
        out = base_alpha[:,:,:3]
    #if you don't have enblend, you can use one of these
    #merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_edges = remove_empty_edges(merged_img)
    return tmp_alpha
def main(argv):
    # definindo mascara laplaciana com centro -4
    lap_4_ = np.array([[0., 1., 0.], [1., -4., 1.], [0., 1., 0.]], dtype=float)

    image1 = img_as_float(data.imread("data/" + argv[0]))
    channels = 'gray'

    # verificando numero de canais
    if len(image1.shape) == 3:
        channels = None
        lap_4_ = lap_4_[:, :, None]

    # aplicando convolução
    final = filters.convolve(image1, lap_4_)

    # correção de intensidades
    final_corr = final + image1

    # salvando imagem
    plt.imsave(argv[1], final, cmap=channels)

    # plotando resultados
    plt.figure().suptitle("Aguçamento laplaciano")
    plt.subplot(1, 2, 2)
    plt.imshow(final_corr, cmap=channels)
    plt.title('Depois')
    plt.axis('off')

    plt.subplot(1, 2, 1)
    plt.imshow(final, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
def load_data(data_directory):
    print("Inside Load_Data")
    directories = [
        d for d in os.listdir(data_directory)
        if os.path.isdir(os.path.join(data_directory, d))
    ]
    print("directories\n:", directories)
    labels = []
    images = []
    pdb.set_trace()
    for d in directories:
        label_directory = os.path.join(data_directory, d)
        #print("label_directory:", label_directory)
        file_names = [
            os.path.join(label_directory, f)
            for f in os.listdir(label_directory) if f.endswith(".ppm")
        ]
        #pdb.set_trace()

        for f in file_names:
            #images.append(skimage.data.imread(f))
            images.append(data.imread(f))
            labels.append(int(d))
            #pdb.set_trace()

    return images, labels
Example #38
0
def just_do_it(limit_cont):
	fig = plt.figure(facecolor='black')
	plt.gray()
	print("Rozpoczynam przetwarzanie obrazow...")
    
	for i in range(20):
		img = data.imread(images[i])

		gray_img = to_gray(images[i])				# samoloty1.pdf
		#gray_img = to_gray2(images[i],  1001, 0.2, 5, 9, 12) 	# samoloty2.pdf
		#gray_img = to_gray2(images[i],  641, 0.2, 5, 20, 5)	# samoloty3.pdf
		conts = find_contours(gray_img, limit_cont)
		centrs = [find_centroid(cont) for cont in conts]

		ax = fig.add_subplot(4,5,i)
		ax.set_yticks([])
		ax.set_xticks([])
		io.imshow(img)
		print("Przetworzono: " + images[i])
        
		for n, cont in enumerate(conts):
			ax.plot(cont[:, 1], cont[:, 0], linewidth=2)
            
		for centr in centrs:
			ax.add_artist(plt.Circle(centr, 5, color='white'))
            
	fig.tight_layout()
	#plt.show()
	plt.savefig('samoloty3.pdf')
Example #39
0
def process(filename):
    img = data.imread(filename + '.png')
    f = open(filename + '.txt', 'w')
    for x in range(len(img)):
        for y in range(len(img[x])):
            if img[x][y] == 0:
                f.write(str(y) + ',' + str(x) + '\n')
Example #40
0
def make_prediction():
    import sys
    import numpy as np
    import pandas as pd

    from skimage.data import imread
    from skimage.filters import threshold_adaptive
    from skimage.restoration import denoise_tv_bregman

    from sklearn.cross_validation import train_test_split
    from sklearn.grid_search import GridSearchCV

    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier

    from model_design import model_design
    classifier = model_design()

    X, IDs = [], range(6284, 12504)
    for ID in IDs:
        original = imread('../data/testResized/' + str(ID) +'.Bmp', as_grey=True)
        denoised = denoise_tv_bregman(original, 3)
        binarilized = threshold_adaptive(denoised, block_size=13, method='gaussian')
        feature = binarilized.reshape(1,400)[0]
        X.append(feature)
    X = np.array(X)

    y = classifier.predict(X)
    result = pd.DataFrame({'Id': IDs, 'Class': y})
    result.to_csv('../result/06-09-2015_AdaBoostXTC.csv', sep=',', index=None, columns=['Id', 'Class'])
def main(argv):
    # lendo imagem
    image1 = img_as_float(data.imread("data/" + argv[0]))
    channels = 'gray'

    # verificando número de canais
    if len(image1.shape) == 3:
        channels = None

    # aplicando filtro gaussiano
    final = filters.gaussian_filter(image1, sigma=float(argv[2]))

    # salvando imagem
    plt.imsave(argv[1], final, cmap=channels)

    # plotagem
    plt.figure().suptitle("filtro gaussiano")
    plt.subplot(1, 2, 2)
    plt.imshow(final, cmap=channels)
    plt.title('Depois')
    plt.axis('off')
    # Divide a área de plotagem: 1 linha e 3 colunas.
    plt.subplot(1, 2, 1)  # A área ativa é a 1.
    plt.imshow(image1, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
Example #42
0
def main(argv):
    # lendo imagem
    image1 = img_as_float(data.imread("data/" + argv[0]))
    channels = 'gray'
    
    # verificando número de canais
    if len(image1.shape) == 3:
        channels = None
    # transformação de potência
    final = np.power(image1, float(argv[2]))
    
    # salvando imagem
    plt.imsave(argv[1], final, cmap=channels)
    
    # plotagem
    plt.figure().suptitle("Transformação gama")
    plt.subplot(1,2,2) 
    plt.imshow(final, cmap=channels)
    plt.title('Depois')
    plt.axis('off')
    # Divide a área de plotagem: 1 linha e 3 colunas.
    plt.subplot(1,2,1) # A área ativa é a 1.
    plt.imshow(image1, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
Example #43
0
File: gui.py Project: M-Lewinski/CT
    def chooseFile(self):
        # self.images.config(state="readonly")
        self.path = filedialog.askopenfilename()
        if self.path == () or self.path == "":
            self.path = ""
            return
        self.pathLabel.config(text=self.path)
        # print(self.path)
        fileExtension = self.path.split(".")
        if len(fileExtension) < 2 or not fileExtension[1] == "dcm":
            # self.images["values"] = fileExtension[0].split("/")[-1]
            newImage = data.imread(self.path, as_grey=True)
            self.graph.changePlot((2, 2, 1), newImage)
            return

        lastName, firstName, id, birthday, gender, date, time, newImage = file.readDicomFileToNumpyArray(
            self.path)
        self.graph.changePlot((2, 2, 1), newImage)
        self.lastNameDicom.delete("1.0", tk.END)
        self.lastNameDicom.insert("1.0", lastName)
        self.firstNameDicom.delete("1.0", tk.END)
        self.firstNameDicom.insert("1.0", firstName)
        self.idDicom.delete("1.0", tk.END)
        self.idDicom.insert("1.0", id)
        self.dateDicom.config(text=date)
        self.timeDicom.config(text=time)
        self.birthDicom.delete("1.0", tk.END)
        self.birthDicom.insert("1.0", birthday)
        gender = gender.upper()
        if gender == "M":
            self.genderDicom.current(0)
        elif gender == "F":
            self.genderDicom.current(1)
Example #44
0
def test_save_buttons():
    viewer = get_image_viewer()
    sv = SaveButtons()
    viewer.plugins[0] += sv

    import tempfile
    fid, filename = tempfile.mkstemp(suffix='.png')
    os.close(fid)

    timer = QtCore.QTimer()
    timer.singleShot(100, QtGui.QApplication.quit)

    # exercise the button clicks
    sv.save_stack.click()
    sv.save_file.click()

    # call the save functions directly
    sv.save_to_stack()
    with expected_warnings(['precision loss']):
        sv.save_to_file(filename)

    img = data.imread(filename)

    with expected_warnings(['precision loss']):
        assert_almost_equal(img, img_as_uint(viewer.image))

    img = io.pop()
    assert_almost_equal(img, viewer.image)

    os.remove(filename)
Example #45
0
def load_data(data_dir):
    sub_dirs = [
        d for d in os.listdir(data_dir)
        if os.path.isdir(os.path.join(data_dir, d))
    ]

    # labels and images
    labels = []
    images = []

    # extract image files and labels
    for d in sub_dirs:
        label_dir = os.path.join(data_dir, d)  # extract directory
        filenames = [
            os.path.join(label_dir, f) for f in os.listdir(label_dir)
            if f.endswith(".ppm")
        ]

        # create list of file names and labels
        for f in filenames:
            images.append(data.imread(f))
            labels.append(int(d))

    # return images with their labels
    return labels, images
Example #46
0
def make_images(input_file, output_dir):
    data = bson.decode_file_iter(open(input_file, 'rb'))
    with warnings.catch_warnings():
        for item in tqdm(data):
            product_id = item['_id']
            for ix, pic in enumerate(item['imgs']):
                warnings.filterwarnings('error')
                try:
                    img = imread(io.BytesIO(pic['picture']))
                    imsave(f"{output_dir}/{product_id}_{ix}.jpg", img)
                except Warning:
                    warnings.filterwarnings('ignore')
                    img = imread(io.BytesIO(pic['picture']))
                    imsave(f'{output_dir}/low_contrast/{product_id}_{ix}.jpg',
                           img)
                    imsave(f"{output_dir}/{product_id}_{ix}.jpg", img)
Example #47
0
def load_data(path):
    folder = []
    images = []
    labels = []
    for a, b, c in os.walk(ROOT_PATH):
        if a.split('/')[-1][0:5] == 'train':
            folder.append(a)
    for i in range(len(folder)):
        #print(folder[i])
        one_img = []
        one_csv = []
        for a1, b1, c1 in os.walk(folder[i]):
            for f in c1:
                if f.split('.')[-1] == 'jpg':
                    one_img.append(os.path.join(a1, f))
                elif f.split('.')[-1] == 'csv':
                    one_csv.append(os.path.join(a1, f))
        for j in range(len(one_img)):
            images.append(data.imread(one_img[j]))
        csv_file = one_csv[0]
        ooo = pd.read_csv(csv_file, sep=';')
        #print(ooo.shape[0])
        for i in range(ooo.shape[0]):
            labels.append(ooo['Label'][i])
    print('Load Suecced')
    return images, labels
Example #48
0
def test_save_buttons():
    viewer = get_image_viewer()
    sv = SaveButtons()
    viewer.plugins[0] += sv

    import tempfile
    fid, filename = tempfile.mkstemp(suffix='.png')
    os.close(fid)

    timer = QtCore.QTimer()
    timer.singleShot(100, QtGui.QApplication.quit)

    # exercise the button clicks
    sv.save_stack.click()
    sv.save_file.click()

    # call the save functions directly
    sv.save_to_stack()
    with expected_warnings(['precision loss']):
        sv.save_to_file(filename)

    img = data.imread(filename)

    with expected_warnings(['precision loss']):
        assert_almost_equal(img, img_as_uint(viewer.image))

    img = io.pop()
    assert_almost_equal(img, viewer.image)

    os.remove(filename)
Example #49
0
def testGenerator():
    # test all images in the directory
    assert os.path.exists(test_dir), "local image path doesnt exist"
    imgs = []
    for p in getPaths(test_dir):
        # read and scale inputs
        img = data.imread(p, as_grey=False)
        img = trans.resize(img, im_shape)
        img = np.expand_dims(img, axis=0)
        # inference
        out_img = model.predict(img)
        # thresholding
        out_img[out_img>0.5] = 1.
        out_img[out_img<=0.5] = 0.
        print ("tested: {0}".format(p))
        # get filename
        img_name = ntpath.basename(p)
        img_name = img_name.split('.')[0]
        # save individual output masks
        ROs = np.reshape(out_img[0,:,:,0], (im_h, im_w))
        FVs = np.reshape(out_img[0,:,:,1], (im_h, im_w))
        HDs = np.reshape(out_img[0,:,:,2], (im_h, im_w))
        RIs = np.reshape(out_img[0,:,:,3], (im_h, im_w))
        WRs = np.reshape(out_img[0,:,:,4], (im_h, im_w))
        misc.imsave(RO_dir+img_name+'.bmp', ROs)
        misc.imsave(FB_dir+img_name+'.bmp', FVs)
        misc.imsave(HD_dir+img_name+'.bmp', HDs)
        misc.imsave(RI_dir+img_name+'.bmp', RIs)
        misc.imsave(WR_dir+img_name+'.bmp', WRs)
        # combine the masks in a single RGB and save
        mask_rgb = get_rgb_from_masks(HDs, ROs, WRs, RIs, FVs)
        misc.imsave(samples_dir+img_name+'.bmp', mask_rgb)
def main(argv):
    # lendo imagem
    image1 = img_as_float(rgb2gray(data.imread("data/" + argv[0])))
    channels = 'gray'

    # verificando número de canais
    if len(image1.shape) == 3:
        channels = None

    # mascara da média
    mascara = np.ones([int(argv[2]), int(argv[2])], dtype='float')
    mascara_final = np.divide(mascara, int(argv[2])**2)
    # processo de suavização
    final = fil.convolve(image1, mascara_final, mode='constant', cval=0)

    # aplicando otsu
    otsu = filters.threshold_otsu(image1)
    final_image = final < otsu

    # salvando imagem
    plt.imsave(argv[1], final_image, cmap=channels)

    # plotagem
    plt.figure().suptitle("Suavização + otsu")
    plt.subplot(1, 2, 2)
    plt.imshow(final_image, cmap=channels)
    plt.title('Depois')
    plt.axis('off')
    # Divide a área de plotagem: 1 linha e 3 colunas.
    plt.subplot(1, 2, 1)  # A área ativa é a 1.
    plt.imshow(image1, cmap=channels)
    plt.title('Antes')
    plt.axis('off')
    plt.show()
Example #51
0
def transform_pcf(output_dir="",
                  patch_size=32,
                  rotations=4,
                  limit=None):
    training = pd.read_csv("training.csv", 
                           header=None,
                           names=['name', 'fga'],
                           dtype={'name': object, 'fga': float})
    if limit:
        training = training.head(limit)
    from skimage.io import imsave
    for i, row in training.iterrows():
        row_patches = []
        row_output = []
        name = row['name']
        fga = row['fga']
        print("Transforming image %s" % name)
        import sys
        sys.stdout.flush()
        for kind in ["DX", "TS"]:
            img = imread("images/%s/%s-%s.png" % (kind, name, kind))
            img_patches = transform_img(img, name, patch_size=patch_size)
            for img_patch in img_patches:
                for rot_i in range(rotations):
                    row_patches.append(np.rot90(img_patch, k=rot_i))
                    row_output.append(fga)
        X_row = np.asarray(row_patches)
        y_row = np.asarray(row_output)
        with open(path.join(output_dir, "X_file_%s" % name), 'w') as f:
            np.save(f, X_row)
        with open(path.join(output_dir, "y_file_%s" % name), 'w') as f:
            np.save(f, y_row)
Example #52
0
 def hash(self, files):
     for path in files:
         try:
             p = imread(path)
         except IOError as e:
             print "oups", e
             continue
         yield path, dhash(p)
Example #53
0
def read_stack(path):
    """Load and return stack as normalized unsigned 8bit ints"""
    assert psutil.virtual_memory()[1] > 1000*1024*1024, \
        'Less than 1GiB of memory available'
    stack = data.imread(path)
    stack -= stack.min()
    np.true_divide(stack, stack.max() / 255, out=stack, casting='unsafe')
    return stack.astype(np.uint8)
Example #54
0
def check_train_dim():
    rel_path = '../data/images_training_rev1/'
    files = os.listdir(rel_path)
    for i, f in enumerate(files):
        n_s = imread(rel_path + f).shape
        if i > 0:
            assert(n_s == s)
        s = n_s
Example #55
0
def read_stack(path):
    """Load and return stack as normalized unsigned 8bit ints"""
    assert psutil.phymem_usage()[1] > 1000*1024*1024, \
        'Less than 1GiB of memory available'
    stack = data.imread(path)
    stack -= stack.min()
    stack /= stack.max()/255
    return stack.astype(np.uint8)
Example #56
0
def getContourImage(imageFile):
    planeImage = data.imread(imageFile,True)
    imageArray = np.asarray(planeImage)
    averageColor = np.mean(imageArray)
    imageArray = getBlackAndWhiteImage(imageArray,averageColor*0.85)
    imageArray = filters.sobel(imageArray)
    imageArray = morphology.dilation(imageArray,morphology.disk(3))
    return imageArray
Example #57
0
def find_mask(base_name, base_img, img_name, img, model_robust, channel):
    # what type of interpolation
    # 0: nearest-neighbor
    # 1: bi-linear
    warp_order = 1
    output_shape, corner_min = find_output_shape(base_img, model_robust, channel)
    # This in-plane offset is the only necessary transformation for the base image
    offset = SimilarityTransform(translation= -corner_min)
    base_warped = warp(base_img[:,:,channel], offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    base_color = warp(base_img, offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    # warp image corners to new position in mosaic
    transform = (model_robust + offset).inverse

    img_warped = warp(img[:,:,channel], transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    img_color = warp(img, transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    base_mask = (base_warped != -1)
    base_warped[~base_mask] = 0

    img_mask = (img_warped != -1)
    img_warped[~img_mask] = 0
    plt.imsave("img_mask.jpg", img_mask)

    #convert to rgb
    img_alpha = np.dstack((img_color, img_mask))
    base_alpha = np.dstack((base_color, base_mask))

    td = config.tmp_dir
    tmp_base = os.path.join(td, 'tmp_' + '.'.join(base_name.split('.')[:-1]) + '.png')
    tmp_img = os.path.join(td, 'tmp_' + '.'.join(img_name.split('.')[:-1]) + '.png')
    tmp_out = os.path.join(td, 'tmp_out_' + '.'.join(base_name.split('.')[:-1]) + '.png')

    plt.imsave(tmp_base, base_alpha)
    plt.imsave(tmp_img, img_alpha)

    cmd = ['enblend', tmp_base, tmp_img, '-o', tmp_out]

    p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    output, err = p.communicate(b"input data that is passed to subprocess' stdin")
    rc = p.returncode
    #if you don't have enblend, you can use one of these
    #merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_edges = remove_empty_edges(merged_img)
    # remove alpha channel
    if os.path.exists(tmp_out):
        out = imread(tmp_out)
        oute = remove_empty_alpha(out)
        os.remove(tmp_base)
        os.remove(tmp_img)
        os.remove(tmp_out)
        return oute[:,:,:3]
    else:
        print("Could not find out", tmp_out, rc)
        raise Exception("failed cmd %s" %cmd)
def skimage_filter_technique(image_path):

    img2 = data.imread(image_path, True)
    tv_filter = filter.denoise_tv_chambolle(img2, weight=0.1)

    cv2.imshow('Gray Scale', tv_filter)
    cv2.waitKey(Delay)

    return tv_filter
Example #59
0
    def load_image(path, as_grey = False, to_float = True):
        # Load image
        image = imread(path, as_grey)

        if to_float:
            # Convert to floating point matrix
            image = image.astype(np.float32)

        return image