示例#1
0
文件: edge.py 项目: th13/libyoga
def edge(ifile, ofile):
    img = io.imread(ifile, flatten=True)
    edges1 = feature.canny(img)
    edges2 = feature.canny(img, sigma=VALUE_SIGMA)
    out = np.uint8(edges2 * 255)

    io.imsave(ofile, out)

    # display results
    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)

    ax1.imshow(img, cmap=plt.cm.jet)
    ax1.axis('off')
    ax1.set_title('noisy image', fontsize=20)

    ax2.imshow(edges1, cmap=plt.cm.gray)
    ax2.axis('off')
    ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)

    ax3.imshow(edges2, cmap=plt.cm.gray)
    ax3.axis('off')
    ax3.set_title('Canny filter, $\sigma=' + str(VALUE_SIGMA) + '$', fontsize=20)

    fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
                        bottom=0.02, left=0.02, right=0.98)

    plt.show()
示例#2
0
def recreate_images(result_dir, noisy_image_dir):
    # Read noisy images first
    test_images = {}
    for image_name in os.listdir(noisy_image_dir):
        if image_name.endswith('.png'):
            image_path = os.path.join(noisy_image_dir, image_name)
            image = util.img_as_float(io.imread(image_path))
            image_name_noext = os.path.splitext(image_name)[0]
            test_images[image_name_noext] = image
    # Enumerate results - image directories
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.net'):
                    # Instantiate trained ANN from .net file
                    net_path = os.path.join(image_dir, result_file)
                    ann = libfann.neural_net()
                    ann.create_from_file(net_path)
                    # Filter the same image which it was trained with
                    filtered_image = filter_fann(
                        test_images[image_name], ann)
                    param_set_name = os.path.splitext(result_file)[0]
                    io.imsave(
                        os.path.join(image_dir, param_set_name + '.png'),
                        filtered_image)
示例#3
0
文件: gui.py 项目: oduwa/Wheat-Count
 def didClickSubmitButton(self, event):
     print(self.imageFilePath)
     img = img_as_ubyte(io.imread(CLUSTER_IMAGE_FILENAME))
     roi_img = spectral_roi.extract_roi(img, gui_checkbox_handlers.getSelectedClusters())
     roi_img_filename = "{}.png".format(Helper.generate_random_id())
     io.imsave(roi_img_filename, roi_img)
     Display.show_image(roi_img, roi_img_filename)
示例#4
0
def cut_char(pk):
    page = get_object_or_404(Page, pk=pk)
    page_img_path = page.get_image_path()
    char_lst = Character.objects.filter(page_id=pk)
    image = io.imread(page_img_path, 0)
    binary = binarisation(image)
    binary_image = (binary * 255).astype('ubyte')
    char_dir = settings.CHARACTER_IMAGE_ROOT+ pk+'/'
    if not os.path.exists(char_dir):
        os.makedirs(char_dir)
    for char in char_lst:
        char_image = binary_image[char.top:char.bottom,char.left:char.right]
        char_filename = char.id+'.png'
        char_path = char_dir+char_filename
        try:
            io.imsave(char_path, char_image)
            status = 0
            if is_low_contrast(char_image):
                status = -5
        except:
            char_filename = ''
            status = -6
        char.is_correct = status
        char.image = char_filename
        char.save()
    append_char_stastics.delay(pk)
    return 'cutchar:'+pk
def produce_smoothed_images(get_component, replace_component, bins, output_path, paths):

    start_img = io.imread(paths[0])
    start_cdf = get_cdf(get_component(start_img), bins)

    end_img = io.imread(paths[-1])
    end_cdf = get_cdf(get_component(end_img), bins)

    delta_cdf = end_cdf - start_cdf

    for i, path in enumerate(paths[1:-1]):
        percentage = i / len(paths[1:-1])
        target_cdf = start_cdf + (delta_cdf * percentage)

        img = io.imread(path)
        values = get_component(img)
        cdf = get_cdf(values, bins)

        # In order to match the length of "bins" for the interpolation below
        # we prepend a 0
        target_cdf = numpy.insert(target_cdf, 0, 0)
        cdf = numpy.insert(cdf, 0, 0)

        matched = match(values, cdf, target_cdf, bins)
        matched = matched.reshape(values.shape)

        img = replace_component(img, matched)

        result_path = os.path.join(output_path, os.path.basename(path))
        io.imsave(result_path, img)
        print('Done with', result_path)
示例#6
0
def convert_image(i, scene, img_depth, image, label):

    idx = int(i) + 1
    if idx in train_images:
        train_test = "training"
    else:
        assert idx in test_images, "index %d neither found in training set nor in test set" % idx
        train_test = "testing"

    folder = "%s/%s/%s" % (out_folder, train_test, scene)
    if not os.path.exists(folder):
        os.makedirs(folder)

    img_depth *= 1000.0

    png.from_array(img_depth, 'L;16').save("%s/%05d_depth.png" % (folder, i))

    depth_visualization = visualize_depth_image(img_depth)

    # workaround for a bug in the png module
    depth_visualization = depth_visualization.copy()  # makes in contiguous
    shape = depth_visualization.shape
    depth_visualization.shape = (shape[0], np.prod(shape[1:]))

    depth_image = png.from_array(depth_visualization, "RGBA;8")
    depth_image.save("%s/%05d_depth_visualization.png" % (folder, i))

    imsave("%s/%05d_colors.png" % (folder, i), image)

    ground_truth = process_ground_truth(label)
    imsave("%s/%05d_ground_truth.png" % (folder, i), ground_truth)
    def save_segmented_image(self, filepath_image, modality='t1c', show=False):
        '''
        Creates an image of original brain with segmentation overlay and save it in ./predictions
        INPUT   (1) str 'filepath_image': filepath to test image for segmentation, including file extension
                (2) str 'modality': imaging modality to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
                (3) bool 'show': If true, shows output image. defaults to False.
        OUTPUT  (1) if show is True, shows image of segmentation results
                (2) if show is false, returns segmented image.
        '''
        modes = {'flair': 0, 't1': 1, 't1c': 2, 't2': 3}

        segmentation = self.predict_image(filepath_image, show=False)
        print 'segmentation = ' + str(segmentation)
        img_mask = np.pad(segmentation, (16, 16), mode='edge')
        ones = np.argwhere(img_mask == 1)
        twos = np.argwhere(img_mask == 2)
        threes = np.argwhere(img_mask == 3)
        fours = np.argwhere(img_mask == 4)

        test_im = io.imread(filepath_image)
        test_back = test_im.reshape(5, 216, 160)[modes[modality]]
        # overlay = mark_boundaries(test_back, img_mask)
        gray_img = img_as_float(test_back)

        # adjust gamma of image
        image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
        sliced_image = image.copy()
        red_multiplier = [1, 0.2, 0.2]
        yellow_multiplier = [1, 1, 0.25]
        green_multiplier = [0.35, 0.75, 0.25]
        blue_multiplier = [0, 0.25, 0.9]

        print str(len(ones))
        print str(len(twos))
        print str(len(threes))
        print str(len(fours))

        # change colors of segmented classes
        for i in xrange(len(ones)):
            sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
        for i in xrange(len(twos)):
            sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
        for i in xrange(len(threes)):
            sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
        for i in xrange(len(fours)):
            sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier
        #if show=True show the prediction
        if show:
            print 'Showing...'
            io.imshow(sliced_image)
            plt.show()
        #save the prediction
        print 'Saving...'
        try:
            mkdir_p('./predictions/')
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'
        except:
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'
 def roundtrip(self, dtype, x, suffix):
     f = NamedTemporaryFile(suffix='.' + suffix)
     fname = f.name
     f.close()
     sio.imsave(fname, x)
     y = sio.imread(fname)
     assert_array_equal(y, x)
示例#9
0
def detect(path):

	input_image = io.imread(path)
	gnb = joblib.load('/home/qburst/Desktop/Emotion_detection/ED_dist/Classifier/gnb.pkl')
	face_count, features, faces = p.feature_extraction(input_image)
	if face_count:
		emotions = gnb.predict(features)
		print emotions
		for d, emotion in zip(faces, emotions):
			print d, emotion
			if emotion == 0:
				cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 0), 2)

			elif emotion == 1:
                                cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255), 3)


			elif emotion == 2:
                                cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (255, 255, 0), 3)


			elif emotion == 3:
				cv2.rectangle(input_image, (d.left(), d.top()), (d.right(), d.bottom()), (255, 0, 0), 3)

				
		io.imsave('Detected/emotion.jpg', input_image)
		return face_count, 'Detected/emotion.jpg'
	else:
	 	return face_count, 'err'
    def saveImage(self, outputImages):
        imageName = 'segout1104.png'

        def map_channels(i_x):
            i, x = i_x
            x = (x * 255).astype(np.uint8)
            if x.max() > 0.35 * 255:
                threshold = np.fabs(x.max() - x.max() * .65)
            else:
                threshold = 255
            threshImage = stats.threshold(x, threshmin=threshold)
            threshImage[threshImage > 0] = i
            return threshImage

        def smash_channels(channels):
            base = channels[0]
            for i, x in enumerate(channels):
                base[x > 0] = i
            return base

        # np.ndarray.reshape(outputImages,[1,480,640,2])
        imgchannels = list(map(map_channels, enumerate(np.transpose(outputImages[0, :, :, :], [2, 0, 1]))))
        smashed = smash_channels(imgchannels)

        io.imsave(imageName, smashed)
示例#11
0
def rawFrameToImageFile(image, filename):
    """Writes a single raw image frame to image file.
    The file type must be given, e.g. png or jpg.
    The image need not be scaled beforehand, it is done prior 
    to writing out the image. Could be one of
    BMP, JPG, JPEG, PNG, PPM, TIFF, XBM, XPM)
    but the file types available depends
    on the QT imsave plugin in use.

    Args:
        | image (np.ndarray): two-dimensional array representing an image
        | filename (string): name of file to be written to, with extension

    Returns:
        | Nothing

    Raises:
        | No exception is raised.
    """
    #normalise input image (img) data to between 0 and 1
    from scipy import ndimage

    image = (image - ndimage.minimum(image)) / (ndimage.maximum(image) - ndimage.minimum(image))

    # http://scikit-image.org/docs/dev/api/skimage.io.html#imsave
    import skimage.io as io
    io.imsave(filename, image) 
示例#12
0
 def roundtrip_file(self, x):
     f = NamedTemporaryFile(suffix='.png')
     fname = f.name
     f.close()
     imsave(fname, x)
     y = imread(fname)
     return y
示例#13
0
 def roundtrip(self, dtype, x):
     f = NamedTemporaryFile(suffix='.tif')
     fname = f.name
     f.close()
     imsave(fname, x)
     y = imread(fname)
     assert_array_equal(x, y)
    def _write_image(self, img_data, filename, img_format=None, dtype=None):
        """
        Output image data to a file, in a given image format.
        Assumes that the output directory exists (must be checked before).

        @param img_data :: image data in the usual numpy representation
        @param filename :: file name, including directory and extension
        @param img_format :: image file format
        @param dtype :: can be used to force a pixel type, otherwise the type
                        of the input data is used

        Returns:: name of the file saved
        """
        if not img_format:
            img_format = self.default_out_format
        filename = filename + '.' + img_format

        if dtype and img_data.dtype != dtype:
            img_data = np.array(img_data, dtype=dtype)

        if img_format == 'tiff' and _USING_PLUGIN_TIFFFILE:
            img_data = exposure.rescale_intensity(img_data, out_range='uint16')
            skio.imsave(filename, img_data, plugin='tifffile')
        else:
            img_data = exposure.rescale_intensity(img_data, out_range='uint16')
            skio.imsave(filename, img_data)

        return filename
示例#15
0
def run_quadrant_stitch(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',
                        re_quadrant_group=1):
    """Read images, stitched them, and write out to same directory.

    Parameters
    ----------
    fns : list of string
        The filenames to be processed.
    re_string : string, optional
        The regular expression to match the filename.
    re_quadrant_group : int, optional
        The group from the re.match object that will contain quadrant info.

    Returns
    -------
    fns_out : list of string
        The output filenames
    """
    qd = group_by_quadrant(fns, re_string, re_quadrant_group)
    fns_out = []
    for fn_pattern, fns in qd.items():
        new_filename = '_'.join(fn_pattern) + '_stitched.tif'
        ims = map(io.imread, sorted(fns))
        im = quadrant_stitch(*ims)
        io.imsave(new_filename, im)
        fns_out.append(new_filename)
    return fns_out
def run():
    fileList = os.listdir(cfg.resultsFolder)
    resultsFileList = filter(lambda element: '.result' in element, fileList)

    for resultsFile in resultsFileList:

        resultsFilePath = cfg.resultsFolder + '/' +resultsFile
        file = open(resultsFilePath, 'r')
        imageResults = pickle.load(file)

        boxes = imageResults['bboxes']
        scores = imageResults['scores']
        imagepath = imageResults['imagepath']

        filename = os.path.basename(imagepath)
        if boxes is None:
            print 'No pedestrians found for image '+imagepath
            continue

        print 'Saving results for image '+filename

        idx = np.where(scores > cfg.decision_threshold)
        boxes = boxes[idx]
        scores = scores[idx]

        boxes, scores = nms.non_max_suppression_fast(boxes, scores, overlapthresh= cfg.nmsOverlapThresh)

        img = Image.open(imagepath)
        #Show the results on a colored image
        img = drawing.drawResultsOnImage(img, boxes, scores)
        io.imsave('Results/'+filename, img)

        file.close()

    print 'Finished!'
示例#17
0
def deflicker():
    #bins = numpy.arange(0, 1, 0.01)
    bins = 256

    for path, img in zip(paths, use_first(paths, bins)):
        output_path = os.path.join(output_directory, os.path.basename(path))
        io.imsave(output_path, img)
def make_lungs():
    path = '/path/to/JSRT/All247images/'
    for i, filename in enumerate(os.listdir(path)):
        img = 1.0 - np.fromfile(path + filename, dtype='>u2').reshape((2048, 2048)) * 1. / 4096
        img = exposure.equalize_hist(img)
        io.imsave('/path/to/JSRT/new/' + filename[:-4] + '.png', img)
        print 'Lung', i, filename
示例#19
0
def main():

	t0 = time.time()

	print "Preparing Inputs..."
	pi.prepareInputs()

	ndsm = io.imread("C:\\bertud_temp\\ndsm.tif")
	classified = io.imread("C:\\bertud_temp\\classified.tif")
	classified = classified[0:len(ndsm),0:len(ndsm[0])]
	slope = io.imread("C:\\bertud_temp\\slope.tif")
	numret = io.imread("C:\\bertud_temp\\numret.tif")

	print "Generating Initial Mask..."
	initialMask = ma.generateInitialMask(ndsm,classified,slope,numret)

	io.imsave("C:\\bertud_temp\\initialMask.tif",initialMask)


	pieces = br.performBoundaryRegularizationV2(initialMask,numProcesses=3)

	finalMask = ma.buildFinalMask(pieces,initialMask)

	io.imsave("C:\\bertud_temp\\finalMask.tif",finalMask)

	# pickle.dump(pieces,open("E:/BertudV2/pieces.pickle","wb"))

	t1 = time.time()

	print "Finished everything in ",round(t1-t0,2),"s."
示例#20
0
def addArtificialData():
    print "here"
    baseName = os.path.basename(leftEyePath)
    print baseName
    data_dir = os.path.join(projectPath,baseName)
    print data_dir
    files = os.listdir(data_dir)
    files = [f for f in files if f.split('.')[-1]=='txt']
    print files
    data = []
    for f in files:
        label = f.split('.')[0]
        filePath = os.path.join(data_dir,f)
        with open(filePath,'r') as r:
            for image in r:
                data.append(image.strip())
    #print data
    for f in data:
        parentDir =  os.path.dirname(f)
        image_name = f.split('/')[-1].split('.')[0]
        scale_image_name = os.path.join(parentDir,image_name+'_s.jpg')
        roate_image_name = os.path.join(parentDir,image_name+'_r.jpg')
        print image_name
        img = io.imread(f,as_grey=True)
        scale_image = rescale(img,0.9)
        rotated_image = rotate(img,5,resize=False)
        print img.shape
        print scale_image.shape
        print rotated_image.shape
        io.imsave(scale_image_name,scale_image)
        io.imsave(roate_image_name,rotated_image)
        raw_input()
def make_masks():
    path = '/path/to/JSRT/All247images/'
    for i, filename in enumerate(os.listdir(path)):
        left = io.imread('/path/to/JSRT/Masks/left lung/' + filename[:-4] + '.gif')
        right = io.imread('/path/to/JSRT/Masks/right lung/' + filename[:-4] + '.gif')
        io.imsave('/path/to/JSRT/new/' + filename[:-4] + 'msk.png', np.clip(left + right, 0, 255))
        print 'Mask', i, filename
示例#22
0
def make_face_tubes(input, output):

    if format == "jpg":
        io.imsave(output, img)
    else:
        output = output.rstrip("jpg") + ".npy"
        numpy.save(output, img)
示例#23
0
    def produce(solution_number, gsd, name=None):
        print("{}/{}".format(solution_number+1, number_of_solutions))
        cam_left = model.fexternal(solution_number)[0]
        cam_right = model.fexternal(solution_number)[1]

        corners_left  = project_corners(model.finternal(solution_number), cam_left , pixel_size(model.finternal(solution_number)), image_shape, elevation)
        corners_right = project_corners(model.finternal(solution_number), cam_right, pixel_size(model.finternal(solution_number)), image_shape, elevation)

        world_rect = WorldRect.from_points(np.vstack([corners_left, corners_right]))

        tile = FlatTile(world_rect, gsd)
        tile.draw_cam_trace(corners_left)
        tile.draw_cam_trace(corners_right)
        tile.project_camera(model.finternal(solution_number), cam_left, elevation, left)
        tile.project_camera(model.finternal(solution_number), cam_right, elevation, right)
        tile.draw_observations(model.finternal(solution_number), cam_left, elevation, data_set.rows, data_set.cols, model.features.edges[0].obs_a)
        tile.draw_observations(model.finternal(solution_number), cam_right, elevation, data_set.rows, data_set.cols, model.features.edges[0].obs_b)
        tile.draw_obs_pair(model.finternal(solution_number),
                           cam_left,
                           cam_right,
                           elevation,
                           data_set.rows,
                           data_set.cols,
                           model.features.edges[0].obs_a,
                           model.features.edges[0].obs_b)

        if name is None:
            name = "iteration{}.jpg".format(solution_number)
        io.imsave(os.path.join(tile_dir, name), tile.image)
示例#24
0
def gen_ablation(imgIds = [], mode = 'blackout', ct = None, out_path="tmp", **args):
    """Perform specified ablation on every image specified by the imgIds list.
    If no imgId is specified, will randomly sample an image with text.
    return (imgId, old_img, new_img) list"""
    imgs = ct.loadImgs(imgIds)
    results = []
    for idx, img in enumerate(imgs):
        print("Ablating image {}/{}".format(idx+1, len(imgIds)))
        ori_file_name = '%s/%s/%s'%(DATA_PATH,DATA_TYPE,img['file_name'])
        orig = io.imread(ori_file_name)
        annIds = ct.getAnnIds(imgIds=img['id'])
        anns = ct.loadAnns(annIds)

        if len(anns)==0:
            print("[WARNING] Weirdly sampled an image without text contents:{}".format(img['file_name']))

        running = orig
        for ann in anns:
            bbox = ann['bbox'] #format: [x,y,width,height]
            if mode=='blackout':
                running = blackout(running, bbox)
            elif mode=='gaussian':
                running = gaussian(running, bbox, ksize=args['ksize'], sigma = args['sigma'])
            elif mode=='median':
                running = median(running, bbox, width=args['width'])
        out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
        io.imsave(out_file_name, running)
        results.append((img['id'], ori_file_name, out_file_name))
    return results
示例#25
0
def ablate(imgIds = [], mode ='destroy', out_path="tmp", coco = coco, ct = None,  **args):
    """[ablation entry point 2.0]
    Created to accomodate background-destroying ablation. Will dispatch all
    old ablations (gaussian, blackout, & median) to gen_ablation."""

    if ct is None:
        ct = coco_text.COCO_Text(os.path.join(CD, 'COCO_Text.json'))
    if imgIds == []:
        imgIds = ct.getImgIds(imgIds=ct.train, catIds=[('legibility','legible')])
        imgIds = [imgIds[np.random.randint(0,len(imgIds))]]

    #dispatch to old ablation entry point
    if mode in ['gaussian', 'blackout', 'median']:
        return gen_ablation(imgIds, mode, ct, out_path=out_path, **args)

    #else do destroy_bg
    if coco is None:
        coco = COCO('%s/annotations/instances_%s.json'%(DATA_PATH,DATA_TYPE))
    imgs = coco.loadImgs(imgIds)
    results = []
    for idx, img in enumerate(imgs):
        print("Ablating image {}/{} with id {} ".format(idx+1, len(imgIds), img['id']))
        ori_file_name = os.path.join(CD, DATA_PATH, DATA_TYPE, img['file_name'])
        orig = io.imread(ori_file_name)

        if mode == 'destroy':
            ablt = destroy_bg(orig, img['id'], coco, **args)
        elif mode == 'median_bg':
            ablt = median_bg(orig, img['id'], coco, **args)

        out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
        io.imsave(out_file_name, ablt)

        results.append((img['id'], ori_file_name, out_file_name))
    return results
示例#26
0
    def save_to_file(self):
        """Save current image to file.

        The current behavior is not ideal: It saves the image displayed on
        screen, so all images will be converted to RGB, and the image size is
        not preserved (resizing the viewer window will alter the size of the
        saved image).
        """
        filename = dialogs.save_file_dialog()
        if filename is None:
            return
        if len(self.ax.images) == 1:
            io.imsave(filename, self.image)
        else:
            underlay = mpl_image_to_rgba(self.ax.images[0])
            overlay = mpl_image_to_rgba(self.ax.images[1])
            alpha = overlay[:, :, 3]

            # alpha can be set by channel of array or by a scalar value.
            # Prefer the alpha channel, but fall back to scalar value.
            if np.all(alpha == 1):
                alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()

            alpha = alpha[:, :, np.newaxis]
            composite = (overlay[:, :, :3] * alpha +
                         underlay[:, :, :3] * (1 - alpha))
            io.imsave(filename, composite)
示例#27
0
def run_illum(args):
    """Run illumination correction.

    Parameters
    ----------
    args : argparse.Namespace
        The arguments parsed by the argparse library.
    """
    if args.file_list is not None:
        args.images.extend([fn.rstrip() for fn in args.file_list])
    il = pre.find_background_illumination(args.images, args.radius,
                                          args.quantile, args.stretchlim,
                                          args.use_mask, args.mask_offset,
                                          args.mask_close, args.mask_erode)
    if args.verbose:
        print 'illumination field:', type(il), il.dtype, il.min(), il.max()
    if args.save_illumination is not None:
        io.imsave(args.save_illumination, il / il.max())
    base_fns = [pre.basefn(fn) for fn in args.images]
    ims_out = [fn + args.output_suffix for fn in base_fns]
    mask_fns = [fn + '.mask.tif' for fn in base_fns]
    ims = (io.imread(fn) for fn in args.images)
    for im, fout, mask_fn in it.izip(ims, ims_out, mask_fns):
        if os.path.isfile(mask_fn):
            mask = io.imread(mask_fn).astype(bool)
        else:
            mask = np.ones(im.shape, bool)
        im = pre.correct_image_illumination(im, il,
                                            args.stretchlim_output, mask)
        io.imsave(fout, im)
示例#28
0
def main():
    # get a list of image filenames in a directory
    base_directory = "/path/to/image/directory"

    # create an output_directory
    output_directory = os.path.join(base_directory, "output")
    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    # string formatting with leading zeros - we'll use this later
    output_filename_template = os.path.join(output_directory, "output {:03d}.png")

    # this will work if base_directory does not end with '/'
    filenames = glob(base_directory + "/*.png")

    # but it's better to safely join paths
    filenames = glob(os.path.join(base_directory, "*.png"))

    # will often want to process these in order
    filenames = sorted(glob(os.path.join(base_directory, "*.png")))

    # process each image
    for i, filename in enumerate(filenames):
        image = img_as_float(imread(filename))
        result = vignette(image)
        imsave(output_filename_template.format(i), result)
示例#29
0
def run(imfile, N, sigma, mu):
    N = 2 if N is None else int(N)
    sigma = 1.0 if sigma is None else float(sigma)
    mu = 10.0 if mu is None else float(mu)

    # read image
    im0 = imread(imfile, as_grey=True)

    # rescale to a common size
    scale = 1e6 / float(im0.size)
    im = rescale(im0, (scale, scale))

    # estimate illumination profile
    proc0 = NonUniformIllumination(N=N, sigma=sigma, mu=mu)

    comp = proc0(im)
    illum = proc0.profile

    # # resize to original size
    # illum = rescale(illum, (1.0/scale, 1.0/scale))
    # illum = np.resize(illum, im0.shape)

    fname = os.path.splitext(imfile)

    illum = (illum - illum.min()) / (illum.max() - illum.min())
    imsave(fname[0] + '-illum' + fname[1], illum)

    comp = (comp - comp.min()) / (comp.max() - comp.min())
    imsave(fname[0] + '-comp' + fname[1], comp)

    return
示例#30
0
def main():
    try:
	if len(sys.argv) <= 1:
		print 'Error: Filename Required'  
	if len(sys.argv) == 2:
		print 'Error: Background Filename Required'
	if len(sys.argv) >= 3:

	    # Constants
	    Window_Size = 5
	    image_name = sys.argv[1]
	    ref_name = sys.argv[2]	
	
            image = rgb2gray(io.imread(sys.argv[1]))
	    ref = rgb2gray(io.imread(sys.argv[2]))

	    part_image, region, angle = pre.interest_region(image, plot_image = 0)
	    ref_rotate = rotate(ref,angle)
	    part_ref = ref_rotate[region[0]:region[1], region[2]:region[3]]
	
	    pre_image = pre.noise_reduction(part_image, part_ref, Window_Size, mode = 0)
	    io.imsave('pre_image.jpg',pre_image)

    except KeyboardInterrupt:
        print "Shutdown requested... exiting"
    except Exception:
        traceback.print_exc(file=sys.stdout)
    sys.exit(0)
示例#31
0
    ## la liste new_classe contient la nouvelle classe qui est soit la moyenne soit la valeur max
    new_class.append(st.mean(L1_class))

    ## on supprime ensuite L et L1 pour pouvoir effectuer la même opération sur le segment i+1
    del L1_class[:]
    del L_class[:]

##----------------------------------------------#
#### Création de la matrice V#####
##----------------------------------------------#

V = np.asarray(new_class)

##----------------------------------------------#
#### Création de l'image avec la nouvelle méthode utilisé
##----------------------------------------------#
### Création de la nouvelle image avec valeur de pixel les lignes de la matrice V
im_vide = np.zeros((KoLanta_segmentation_ligne, KoLanta_segmentation_colonne))

for i in range(KoLanta_segmentation_ligne):
    for j in range(KoLanta_segmentation_colonne):
        im_vide[i][j] = V[liste_segment_unique.index(KoLanta_segmentation[i,
                                                                          j])]

### Enregistrement de l'image im_vide
image_result = imsave("image_result.tif", im_vide)

### Affichage de l'image
##plt.figure(1)
##plt.imshow(im_vide)
示例#32
0
digits = load_digits()

data_dir = os.path.join(KIT_DIR, 'data')
if not os.path.exists(data_dir):
    os.mkdir(data_dir)
n_images = digits.data.shape[0]
n_train_images = int(n_images * 0.8)

img_dir = os.path.join(data_dir, 'imgs')
if not os.path.exists(img_dir):
    os.mkdir(img_dir)

filenames_image = []
for img_idx, img in zip(range(n_images), digits.data):
    filename = os.path.join(img_dir, str(img_idx) + '.png')
    imsave(filename, img.reshape((8, 8)).astype(np.int8))
    filenames_image.append(filename)

train_csv = pd.DataFrame({
    'id': np.array(filenames_image[:n_train_images]),
    'class': digits.target[:n_train_images]
})
train_csv = train_csv.set_index('id')
train_csv.to_csv(os.path.join(data_dir, 'train.csv'))

test_csv = pd.DataFrame({
    'id': np.array(filenames_image[n_train_images:]),
    'class': digits.target[n_train_images:]
})
test_csv = test_csv.set_index('id')
test_csv.to_csv(os.path.join(data_dir, 'test.csv'))
from skimage import io
from skimage import feature
from skimage import color
img = io.imread("./image.jpg")
img = color.rgb2gray(img)
edge = feature.canny(img,3)
io.imshow(edge)
io.imsave("canny_edge.jpg", edge)
io.show()
#Viewing the data from the loaded mask
plt.subplot(1,2,1)
plt.imshow(left_mask[137])
plt.subplot(1,2,2)
plt.imshow(right_mask[137])

len(right_mask)

#Defining function to merge the manual masks
def merge_mask(left_mask, right_mask):
  img=[]
  for i in range(138):
    merged_image= left_mask[i] + right_mask[i]
    img.append(merged_image)
  img=np.array(img)
  return img

#Call for function
merged_images= merge_mask(left_mask, right_mask)

plt.imshow(merged_images[137])

#Saving the merged mask to the MergedMask directory
save_path = '/home/suprim/dataset/MontgomerySet/ManualMask/MergedMasks'
for i in range(138):
  io.imsave(save_path + '/' + f"m_{str(i).zfill(4)}.png", merged_images[i]  ) #.zfill(4) to make files 0000,0001

#Loading the saved mask to check the merged manual masks
a= io.imread('/home/suprim/dataset/MontgomerySet/ManualMask/MergedMasks/m_0030.png')

plt.imshow(a)
示例#35
0
文件: MNIST.py 项目: SakuraAyase/GAN
def plot(image, i):
    image = np.reshape(image, [28, 28])
    fileName = 'C:\\Users\\myfamily\\Desktop\\新建文件夹2\\'
    io.imsave(fileName + 'lean' + str(i) + '.jpg', image)
 def _save_picture(data, path):
     try:
         io.imsave(path, data)
         return True
     except (KeyError, TypeError):
         return False
from tkinter.filedialog import askdirectory
from tkinter import Tk
from os import listdir
from skimage import io
from skimage import util
import numpy as np

Tk().withdraw()

dir1 = askdirectory(title='Choose source directory')
dir2 = askdirectory(title='Choose destination directory')
print(dir1)
print(dir2)

for f in listdir(dir1):
    if f.find('background') == 0 or f.find('Background') == 0:
        bgr = io.imread(dir1 + '/' + f)
        bgr = util.invert(bgr)
for f in listdir(dir1):
    if f.find('background') != 0 and f.find('Background') != 0:
        img = io.imread(dir1 + '/' + f)
        img = np.add(img, bgr)
        img[img < bgr] = 255
        io.imsave(dir2 + '/' + f, img)

print('Done!')
        yield ([X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch)


#Train model
# model.add(Dropout(0.5))
model.compile(optimizer='rmsprop', loss='mse')
model.fit_generator(image_a_b_gen(batch_size), epochs=4000, steps_per_epoch=1)

# In[10]:
model.save('final_model.h5')

color_me = []
for filename in os.listdir('Test/'):
    color_me.append(img_to_array(load_img('Test/' + filename)))
color_me = np.array(color_me, dtype=float)
gray_me = gray2rgb(rgb2gray(1.0 / 255 * color_me))
color_me_embed = create_inception_embedding(gray_me)
color_me = rgb2lab(1.0 / 255 * color_me)[:, :, :, 0]
color_me = color_me.reshape(color_me.shape + (1, ))

# Test model
output = model.predict([color_me, color_me_embed])
output = output * 128

# Output colorizations
for i in range(len(output)):
    cur = np.zeros((256, 256, 3))
    cur[:, :, 0] = color_me[i][:, :, 0]
    cur[:, :, 1:] = output[i]
    imsave("result/img_" + str(i) + ".png", lab2rgb(cur))
示例#39
0
def cropimagesandlabels(
    config,
    numcrops=10,
    size=(400, 400),
    userfeedback=True,
    cropdata=True,
    excludealreadycropped=True,
    updatevideoentries=True,
):
    """
    Crop images into multiple random crops (defined by numcrops) of size dimensions. If cropdata=True then the
    annotation data is loaded and labels for cropped images are inherited.
    If false, then one can make crops for unlabeled folders.

    This can be helpul for large frames with multiple animals. Then a smaller set of equally sized images is created.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    numcrops: number of random crops (around random bodypart)

    size: height x width in pixels

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    cropdata: bool, default True:
        If true creates corresponding annotation data (from ground truth)

    excludealreadycropped: bool, def true
        If true excludes folders that already contain _cropped in their name.

    updatevideoentries, bool, default true
        If true updates video_list entries to refer to cropped frames instead. This makes sense for subsequent processing.

    Example
    --------
    for labeling the frames
    >>> deeplabcut.cropimagesandlabels('/analysis/project/reaching-task/config.yaml')

    --------
    """
    from tqdm import trange

    indexlength = int(np.ceil(np.log10(numcrops)))
    project_path = os.path.dirname(config)
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"].keys()
    video_names = []
    for video in videos:
        parent, filename, ext = _robust_path_split(video)
        if excludealreadycropped and "_cropped" in filename:
            continue
        video_names.append([parent, filename, ext])

    if (
        "video_sets_original" not in cfg.keys() and updatevideoentries
    ):  # this dict is kept for storing links to original full-sized videos
        cfg["video_sets_original"] = {}

    for vidpath, vidname, videotype in video_names:
        folder = os.path.join(project_path, "labeled-data", vidname)
        if userfeedback:
            print("Do you want to crop frames for folder: ", folder, "?")
            askuser = input("(yes/no):")
        else:
            askuser = "******"
        if askuser == "y" or askuser == "yes" or askuser == "Y" or askuser == "Yes":
            new_vidname = vidname + "_cropped"
            new_folder = folder.replace(vidname, new_vidname)
            auxiliaryfunctions.attempttomakefolder(new_folder)

            AnnotationData = []
            pd_index = []

            fn = os.path.join(folder, f"CollectedData_{cfg['scorer']}.h5")
            df = pd.read_hdf(fn, "df_with_missing")
            data = df.values.reshape((df.shape[0], -1, 2))
            sep = "/" if "/" in df.index[0] else "\\"
            if sep != os.path.sep:
                df.index = df.index.str.replace(sep, os.path.sep)
            images = project_path + os.path.sep + df.index
            # Avoid cropping already cropped images
            cropped_images = auxiliaryfunctions.grab_files_in_folder(new_folder, "png")
            cropped_names = set(map(lambda x: x.split("c")[0], cropped_images))
            imnames = [
                im for im in images.to_list() if Path(im).stem not in cropped_names
            ]
            ic = io.imread_collection(imnames)
            for i in trange(len(ic)):
                frame = ic[i]
                h, w = np.shape(frame)[:2]
                if size[0] >= h or size[1] >= w:
                    shutil.rmtree(new_folder, ignore_errors=True)
                    raise ValueError("Crop dimensions are larger than image size")

                imagename = os.path.relpath(ic.files[i], project_path)
                ind = np.flatnonzero(df.index == imagename)[0]
                cropindex = 0
                attempts = -1
                while cropindex < numcrops:
                    dd = np.array(data[ind].copy(), dtype=float)
                    y0, x0 = (
                        np.random.randint(h - size[0]),
                        np.random.randint(w - size[1]),
                    )
                    y1 = y0 + size[0]
                    x1 = x0 + size[1]
                    with np.errstate(invalid="ignore"):
                        within = np.all((dd >= [x0, y0]) & (dd < [x1, y1]), axis=1)
                    if cropdata:
                        dd[within] -= [x0, y0]
                        dd[~within] = np.nan
                    attempts += 1
                    if within.any() or attempts > 10:
                        newimname = str(
                            Path(imagename).stem
                            + "c"
                            + str(cropindex).zfill(indexlength)
                            + ".png"
                        )
                        cropppedimgname = os.path.join(new_folder, newimname)
                        io.imsave(cropppedimgname, frame[y0:y1, x0:x1])
                        cropindex += 1
                        pd_index.append(
                            os.path.join("labeled-data", new_vidname, newimname)
                        )
                        AnnotationData.append(dd.flatten())

            if cropdata:
                df = pd.DataFrame(AnnotationData, index=pd_index, columns=df.columns)
                fn_new = fn.replace(folder, new_folder)
                df.to_hdf(fn_new, key="df_with_missing", mode="w")
                df.to_csv(fn_new.replace(".h5", ".csv"))

            if updatevideoentries and cropdata:
                # moving old entry to _original, dropping it from video_set and update crop parameters
                video_orig = sep.join((vidpath, vidname + videotype))
                cfg["video_sets_original"][video_orig] = cfg["video_sets"][video_orig]
                cfg["video_sets"].pop(video_orig)
                cfg["video_sets"][video_orig.replace(vidname, new_vidname)] = {
                    "crop": ", ".join(map(str, [0, size[1], 0, size[0]]))
                }

    cfg["croppedtraining"] = True
    auxiliaryfunctions.write_config(config, cfg)
示例#40
0
                        padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(2, (3, 3), activation='tanh',
                        padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
model = Model(inputs=encoder_input, outputs=decoder_output)

model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.fit(vggfeatures, Y, validation_split=0.2, epochs=1000)

testpath = 'C://Users/Asus/Desktop/dataset/data/'
files = os.listdir(testpath)
for idx, file in enumerate(files):
    test = img_to_array(load_img(testpath + file))
    test = resize(test, (224, 224), anti_aliasing=True)
    test *= 1.0 / 255
    lab = rgb2lab(test)
    l = lab[:, :, 0]
    L = gray2rgb(l)
    L = L.reshape((1, 224, 224, 3))
    print(L.shape)
    vggpred = newmodel.predict(L)
    vggpred = vggpred.reshape((1, 7, 7, 512))
    ab = model.predict(vggpred)
    print(ab.shape)
    ab = ab * 128
    cur = np.zeros((224, 224, 3))
    cur[:, :, 0] = l
    cur[:, :, 1:] = ab
    imsave('C://Users/Asus/Desktop/dataset/' + str(idx) + ".jpg", lab2rgb(cur))
filename = sys.argv[1]

# reading the image
image = io.imread(filename)

# preprocessing
rows, cols = image.shape[0], image.shape[1]
image = image.reshape(rows * cols, 3)

# modelling
print('Compressing...')
print('Note: This can take a while for a large image file.')
kMeans = KMeans(n_clusters=16)
kMeans.fit(image)

# getting centers and labels
centers = np.asarray(kMeans.cluster_centers_, dtype=np.uint8)
labels = np.asarray(kMeans.labels_, dtype=np.uint8)
labels = np.reshape(labels, (rows, cols))
print('Almost done.')

# reconstructing the image
newImage = np.zeros((rows, cols, 3), dtype=np.uint8)
for i in range(rows):
    for j in range(cols):
        # assinging every pixel the rgb color of their label's center
        newImage[i, j, :] = centers[labels[i, j], :]
io.imsave(filename.split('.')[0] + '-compressed.png', newImage)

print('Image has been compressed sucessfully.')
示例#42
0
import model
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from skimage import io, img_as_ubyte

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

unet = model.UNet2D().to(device)
unet.load_state_dict(torch.load('save.pt', map_location=device))
unet.eval()

t = transforms.ToTensor()
sample = t(io.imread('test.png'))
sample = torch.unsqueeze(sample, 0)
sample = sample.to(device)

with torch.no_grad():
    prediction = unet(sample)
    image = F.softmax(prediction, dim=1)[0, 1, :, :]
    image = image.cpu().numpy()
    io.imsave('out.png', img_as_ubyte(image))
示例#43
0
def save_image(image_dicts, input_image_name, network, output_dir):
    prefix = os.path.splitext(input_image_name)[0]
    for key, image in image_dicts.items():
        io.imsave(
            os.path.join(output_dir,
                         '{}-{}-{}.jpg'.format(prefix, network, key)), image)
示例#44
0
    return states


kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
starting_time = ctypes.wintypes.LARGE_INTEGER()
ending_time = ctypes.wintypes.LARGE_INTEGER()
elapsed_microseconds = ctypes.wintypes.LARGE_INTEGER()
frequency = ctypes.wintypes.LARGE_INTEGER()

kernel32.QueryPerformanceFrequency(ctypes.byref(frequency))
kernel32.QueryPerformanceCounter(ctypes.byref(starting_time))

base = io.imread('images/base.png') // 255
states = generateStateOfLive(base, 100)

#print(get_size(states))

# Сохраняем
if not os.path.exists('images/e'):
    os.makedirs('images/e')

for i in range(len(states)):
    io.imsave('images/e/{:04d}.png'.format(i + 1), states[i] * 255)

kernel32.QueryPerformanceCounter(ctypes.byref(ending_time))
elapsed_microseconds = ending_time.value - starting_time.value
elapsed_microseconds *= 1000
elapsed_microseconds /= frequency.value

print(elapsed_microseconds)
                                             windowSize=(224, 224)):
            # if the window does not meet our desired window size, ignore it
            if window.shape[0] != 224 or window.shape[1] != 224:
                continue
            if not label_image(window):
                continue
            else:
                label = label_image(window)

            if not os.path.exists(dir2Save + str(label) + "/"):
                os.makedirs(dir2Save + str(label) + "/")

            save_path = dir2Save + str(
                label) + "/" + name_im + "_" + institute + "_x_ini_" + str(
                    x) + "_y_ini_" + str(y) + ".png"
            io.imsave(save_path, im_RGB[y:y + 224, x:x + 224])
            df_im = pd.DataFrame({
                'im': [im],
                'patch': [
                    name_im + "_" + institute + "_x_ini_" + str(x) +
                    "_y_ini_" + str(y) + ".png"
                ],
                'name': [name_im],
                'institute': [institute],
                'label': [label],
                'x': [x],
                'y': [y]
            })
            evaluation_set_patches = [evaluation_set_patches, df_im]
            evaluation_set_patches = pd.concat(evaluation_set_patches)
示例#46
0
def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
    for i,item in enumerate(npyfile):
        img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
        io.imsave(os.path.join(save_path,"%d_predict.png"%i),img)
示例#47
0
def printImage(image, filename):
    saveImage = image.copy()
    saveImage[image < 0] = 0
    saveImage[image > 1] = 1
    io.imsave(filename, img_as_ubyte(saveImage))
    path_norm = "Benign_m_norm" + "/"  #output path to keep the normalised output
    n = "t" + str(i) + ".tif"  # image name
    fullpath = path + n  # path to the input image
    fullpath_norm = path_norm + n  # output path to the normalised image
    print(fullpath)
    print(fullpath_norm)
    i1 = utils.read_image(fullpath)  # for reading images as arrays
    if (i == 0):
        #print(i1)

        norm.fit(
            i1
        )  # for the first image we read, we need to fit the Mackenko normaliser() function in accordance with the first image so that we can normalise other images on this basis...norm.fit() actually
        # comes up with the values of mean and sd so that we can normalise the images.

        io.imsave((fullpath_norm), i1)  #save the first image
    else:
        i2 = norm.transform(
            i1)  # apply the Mackenko transformation algorithm for the image
        io.imsave((fullpath_norm), i2)  # save the image

print("IN SITU")
for i in range(0, 63):
    path = "In Situ" + "/"
    path_norm = "In Situ_m_norm" + "/"
    n = "t" + str(i) + ".tif"
    fullpath = path + n
    fullpath_norm = path_norm + n
    print(fullpath)
    print(fullpath_norm)
    i1 = utils.read_image(fullpath)
示例#49
0
#!/usr/bin/env python

from sys import argv
from skimage.filters import sobel
from skimage import io
#
path = argv[1]
imageName = argv[2]
processedImageName = argv[3]

# load image
image = io.imread(path + imageName)

if len(image.shape) == 3:
    image = image[:, :, 0]  #RGB -> grayscale
image = sobel(image)

# save image
io.imsave(path + processedImageName, image)



示例#50
0
# Evaluate models
"""1st Affine"""
theta_aff, theta_aff_inv = model(batch)

# Calculate theta_aff_2
batch_size = theta_aff.size(0)
theta_aff_inv = theta_aff_inv.view(-1, 2, 3)
theta_aff_inv = torch.cat((theta_aff_inv, (torch.Tensor([0, 0, 1]).to('cuda').unsqueeze(0).unsqueeze(1).expand(batch_size, 1, 3))), 1)
theta_aff_2 = theta_aff_inv.inverse().contiguous().view(-1, 9)[:, :6]

theta_aff_ensemble = (theta_aff + theta_aff_2) / 2  # Ensemble

### Process result
warped_image_aff = affTnf(Im2Tensor(source_image), theta_aff_ensemble.view(-1,2,3))
result_aff_np = warped_image_aff.squeeze(0).transpose(0,1).transpose(1,2).cpu().detach().numpy()
io.imsave('results/aff.jpg', result_aff_np)

"""2nd Affine"""
# Preprocess source_image_2
source_image_2 = normalize_image(resize(warped_image_aff.cpu()))
if use_cuda:
    source_image_2 = source_image_2.cuda()
theta_aff_aff, theta_aff_aff_inv = model({'source_image': source_image_2, 'target_image':batch['target_image']})

# Calculate theta_aff_2
batch_size = theta_aff_aff.size(0)
theta_aff_aff_inv = theta_aff_aff_inv.view(-1, 2, 3)
theta_aff_aff_inv = torch.cat((theta_aff_aff_inv, (torch.Tensor([0, 0, 1]).to('cuda').unsqueeze(0).unsqueeze(1).expand(batch_size, 1, 3))), 1)
theta_aff_aff_2 = theta_aff_aff_inv.inverse().contiguous().view(-1, 9)[:, :6]

theta_aff_aff_ensemble = (theta_aff_aff + theta_aff_aff_2) / 2  # Ensemble
示例#51
0
# imports needed for skimage
import skimage
import numpy
import sys
import scipy
from skimage import io, util, color
from scipy import ndimage

# Read in image and convert to appropriate format
image = util.img_as_float(color.rgb2gray(io.imread(sys.argv[1])))

# Read in the sharpening constant from command line
c = float(sys.argv[2])

# Create the Laplacian filter mask. This version of the filter generates positive
# values for brighter values. The negated alternative generates negative values for
# brighter values.
mask = numpy.asarray([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])

# Filter the image with the filter
laplacian = ndimage.convolve(image, mask, mode="nearest")

# Perform the sharpening as specified. If the negated filter is used, one must
# subtract instead of add
out = image + c * laplacian

# Save the clipped result
io.imsave(sys.argv[3], numpy.clip(out, 0, 1))
示例#52
0
                    minX = j
                elif j > maxX:
                    maxX = j
    return minX, maxX, minY, maxY


def printImage(image, filename):
    saveImage = image.copy()
    saveImage[image < 0] = 0
    saveImage[image > 1] = 1
    io.imsave(filename, img_as_ubyte(saveImage))


phaseSymmetry = calculatePhaseSymmetry(imageFFT, phaseSymmetry, symmetryTotal,
                                       amplitudeTotal)
io.imsave("/Users/purvigoel/224Final/images/phaseSymmetry.jpg",
          img_as_ubyte(phaseSymmetry))

image = io.imread("/Users/purvigoel/224Final/images/bunny_desired.png")
image = img_as_float(color.rgb2gray(image))

desired = phaseSymmetry

bestDiff = 10000000
bestImage = np.zeros((image.shape[0], image.shape[1]))

success = 0
attempt = 0

bestVector = []
printcounter = 0
xmin, xmax, ymin, ymax = maskMinMax(mask)
示例#53
0
def train_net(root,
              resize,
              data_augment,
              rotate,
              change_color,
              lr,
              weight_decay,
              model_choice,
              save_ckpt,
              image_size,
              batch_size,
              num_epochs,
              save_test_image,
              test_image_name,
              early_stop,
              early_stop_tol,
              lr_decay,
              decay_rate,
              decay_period,
              validate_root,
              loss_type='bce',
              smooth=1.0,
              lam=1.0,
              gamma=2.0):
    '''
    Network training, which will output:
        1. log for loss in every iteration, in text file.
        2. saved checkpoint which contains the trained parameters, in directory ./parameters
        3. segmentation result on the test image, saved in directory ./epoch_output.
    
    Parameters:
        @root: root directory for training dataset.
        @resize: boolean flag for image resizing.
        @data_augment: boolean flag for DA8 (randomly rotate 90 degrees, flip horizontally and vertically).
        @rotate: boolean flag for random rotation to the training images.
        @change_color: boolean flag for random perturbation on HSV channels of the training images.
        @lr: learning rate.
        @weight_decay: weight decay for L2 regularization on the network parameters.
        @model_choice: 1 for LinkNet, 2 for D-LinkNet, 3 for D-LinkNet+.
        @save_ckpt: the period (in epochs) to save the checkpoint of the network.
        @image_size: the image size for the images to trained.
        @batch_size: batch size for mini-batch stochastic gradient descent.
        @num_epochs: number of epochs for training.
        @save_test_image: the period (in epochs) to save the prediction of the test image.
        @test_image_name: the name of the test image.
        @early_stop: the boolean flag to have early stop.
        @early_stop_tol: the tolerance (in number of saving checkpoints) to trigger early stop.
        @lr_decay: boolean flag for learning rate decay in every decay period.
        @decay_rate: decay ratio for learning rate, e.g. lr = lr * lr_decay.
        @decay_period: the period in number of epochs to trigger the learning rate decay.
        @validate_root: root directory for validation dataset (mainly for evaluation of network during training).
        @loss_type: either 'bce' (BCE loss) or 'focal' (focal loss).
        @smooth: number to be added on denominator and numerator when compute dice loss.
        @lam: weight to balance the dice loss in the final combined loss.
        @gamma: for focal loss.
    '''

    if os.path.exists('./epoch_output'):
        shutil.rmtree('./epoch_output')
    os.makedirs('./epoch_output')

    if not os.path.exists('./parameters'):
        os.makedirs('./parameters')
    weights_name = './parameters/weights' + str(model_choice)

    net = utils.create_models(model_choice)
    net.train()  # in train mode

    # net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))

    # create AMSGrad optimizer
    optimizer = optim.Adam(net.parameters(),
                           lr=lr,
                           weight_decay=weight_decay,
                           amsgrad=True)
    Loss = utils.loss(smooth, lam, gamma, loss_type)

    dataloader = utils.get_data_loader(root, resize, data_augment, image_size,
                                       batch_size, rotate, change_color)

    num_batch = len(dataloader)
    total_train_iters = num_epochs * num_batch

    loss_history = []
    print('Started training at {}'.format(
        time.asctime(time.localtime(time.time()))))
    test_loss = 100.0
    count = 0
    for epoch in range(num_epochs):
        print('Start epoch ', epoch)
        epoch_loss = 0
        t = time.time()
        for iteration, batch in enumerate(dataloader, epoch * num_batch + 1):
            print('Iteration: ', iteration)
            print('Time for loading the data takes: ', time.time() - t, ' s')
            t = time.time()
            image = utils.np_to_var(batch['image'])
            mask = utils.np_to_var(batch['mask'])

            optimizer.zero_grad()

            pred = net.forward(image)

            loss = Loss.final_loss(pred, mask)

            loss.backward()
            optimizer.step()

            epoch_loss += loss.data.item()

            # print the log info
            print('Iteration [{:6d}/{:6d}] | loss: {:.4f}'.format(
                iteration, total_train_iters, loss.data.item()))
            print('Time spent on back propagation: ', time.time() - t, ' s')
            loss_history.append(loss.data.item())
            t = time.time()

        # save the test image for visualizing the training outcome
        if (epoch + 1) % save_test_image == 0:
            with torch.no_grad():
                _, test_image = test.test_single_image(net,
                                                       test_image_name,
                                                       resize=False)
            io.imsave('./epoch_output/test_epoch' + str(epoch) + '.png',
                      test_image)

        # early stop
        if early_stop and (epoch + 1) % save_ckpt == 0:
            with torch.no_grad():
                loss, f1 = test.test_batch_with_labels(net,
                                                       validate_root,
                                                       resize=False,
                                                       batch_size=10,
                                                       image_size=image_size,
                                                       smooth=smooth,
                                                       lam=lam)
                print('On the validation dataset, loss: ', loss, ', F1: ', f1)
                if loss <= test_loss:
                    test_loss = loss
                    count = 0
                    torch.save(net.state_dict(), weights_name)
                elif count < early_stop_tol:
                    count += 1
                    lr *= decay_rate
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr
                    print('The new loss is found to be larger than before')
                else:
                    print('Reach the early stop tolerence...')
                    print('Break the update at ', epoch, 'th epoch')
                    break

        if not early_stop and (epoch + 1) % save_ckpt == 0:
            with torch.no_grad():
                torch.save(net.state_dict(), weights_name)

        if lr_decay and (epoch + 1) % decay_period == 0:
            with torch.no_grad():
                lr *= decay_rate
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr

        epoch_loss /= num_batch
        print('In the epoch ', epoch, ', the average batch loss is ',
              epoch_loss)

    if not early_stop:
        torch.save(net.state_dict(), weights_name)

    # save the loss history
    with open('loss.txt', 'wt') as file:
        file.write('\n'.join(['{}'.format(loss) for loss in loss_history]))
        file.write('\n')
示例#54
0
a = os.listdir("./")
a.remove("process.py")
a.remove("nohup.out")

#img_, map_ = [], []

cnt = 0
for i in a:
    try:
        k = SD(i)
        k_img = k.select("MaxFRP").get()
        k_map = k.select("FireMask").get()

        for j in range(len(k_img)):
            io.imsave("MaxFRP/%d.jpg" % cnt, k_img[j])
            io.imsave("FireMask/%d.jpg" % cnt, k_map[j])
            cnt += 1
            if cnt % 1000 == 0:
                print(cnt)
        os.remove(i)
    except:
        print(i)
        pass

#img_ = np.stack(img_)
#map_ = np.stack(map_)

#np.save("frp_img.npy", img_)
#np.save("fire_map.npy", map_)
示例#55
0
import numpy as np
from skimage import io

from tqdm import tqdm

produce_num = 1000

result_dir = './result/produce/'

ckpt_path = './checkpoints/cell_fig_400.ckpt'

if not os.path.exists(result_dir):
    os.makedirs(result_dir)

x = tf.placeholder(tf.float32, [1, 100])
y = generator(x)

sess = tf.Session()
init = tf.global_variables_initializer()

saver = tf.train.Saver()
sess.run(init)
saver.restore(sess, ckpt_path)

for i in tqdm(range(produce_num)):
    noise = np.random.uniform(-1, 1, size=(1, 100))
    feed_dice = {x: noise}
    fig = sess.run(y, feed_dict=feed_dice)
    fig = np.reshape(fig, (64, 64, 3))
    io.imsave(result_dir + '{0}.jpg'.format(i), fig)
示例#56
0
    id__ = 'i' + getNameFromTime()
    temp_imgs = next(os.walk(path + '/masks/'))[2]
    assert len(temp_imgs) > 0
    for mask in temp_imgs:
        mask_img = imread(path + '/masks/' + mask)
        mask_imgs.append(mask_img)
        mask_imgs_flip.append(np.fliplr(mask_img))
    image__flip = np.fliplr(image__)
    id__flip = 'i' + getNameFromTime()
    dirPath = ''
    os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__))
    os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/images/'))
    os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/masks/'))
    path___ = os.path.join(
        dirPath, 'stage1_train_copy/' + id__ + '/images/' + id__ + '.png')
    imsave(path___, image__)
    for mask_ in mask_imgs:
        mask_id = 'm' + getNameFromTime()
        path__m = os.path.join(
            dirPath,
            'stage1_train_copy/' + id__ + '/masks/' + mask_id + '.png')
        imsave(path__m, mask_)

    os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__flip))
    os.mkdir(
        os.path.join(dirPath, 'stage1_train_copy/' + id__flip + '/images/'))
    os.mkdir(os.path.join(dirPath,
                          'stage1_train_copy/' + id__flip + '/masks/'))
    path___ = os.path.join(
        dirPath,
        'stage1_train_copy/' + id__flip + '/images/' + id__flip + '.png')
示例#57
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join('BirdReOrined_UnwrapBlender.obj'))
    parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join('birdie2.png'))


    parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'example_result.gif'))
    parser.add_argument('-g', '--gpu', type=int, default=0)
    args = parser.parse_args()

    model = Model(args.filename_obj, args.filename_ref,useSilhouette=True,useDeltaAndScale=True)
    model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.1, betas=(0.5,0.999))
    loop = tqdm.tqdm(range(300))
    for _ in loop:
        loop.set_description('Optimizing')
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

    # draw object
    # loop = tqdm.tqdm(range(0, 360, 4))
    # print(nr.get_points_from_angles(2.732,180,0))
    #這竟然不是求座標系的輸出,但看起來輸入是求座標系,而得到的結果也是求座標系。
    #好奇特,
    model.renderer.eye = nr.get_points_from_angles(2.732, 180, 0)
    # print(nr.get_points_from_angles(2.732,0,0))
    images, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures))
    image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))
    imsave('data/outputs.png', img_as_ubyte(image))
    # exit()
    #---------------------------test for read texture
    renderer_texture=nr.Renderer(image_size=256,camera_mode="look")
    renderer_texture.perspective = True
    renderer_texture.background_color=[1,1,1]
    renderer_texture.light_intensity_directional = 0.0
    renderer_texture.light_intensity_ambient = 1.0
    texture_vertices,texture_faces=load_texture_by_obj(args.filename_obj)
    texture_vertices=texture_vertices[None, :, :]
    texture_faces=texture_faces[None, :, :]
    texture_data=open("texture_data.txt","w")
    print(model.textures.shape)
    # exit()
    for face_num in(range(model.textures.shape[1])):
        for i in range(model.textures.shape[2]):
            for j in range(model.textures.shape[3]):
                for k in range(model.textures.shape[4]):
                    for color in range(3):
                        texture_data.write(str(model.textures[0][face_num][i][j][k][color].detach().cpu().numpy()))
                        if(color<2):
                            texture_data.write(" ")
                        else:
                            texture_data.write("\n")
    # print(model.textures[0])
    texture_data.close()
    # exit()
    renderer_texture.camera_direction=[0,0,1]
    renderer_texture.eye=[0.5,0.5,-1*(3**0.5)/2]
    images, _, _ = renderer_texture(texture_vertices, texture_faces, torch.tanh(model.textures))
    uv_image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))


    imsave('data/texture.png', img_as_ubyte(uv_image))
    imshow(img_as_ubyte(uv_image))
    plt.show()
    #----------------------------------------------
    for num, azimuth in enumerate(loop):
        loop.set_description('Drawing')
        model.renderer.eye = nr.get_points_from_angles(2.732, 0, azimuth)
        images, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures))
        image = images.detach().cpu().numpy()[0].transpose((1, 2, 0))
        imsave('/tmp/_tmp_%04d.png' % num, img_as_ubyte(image))
    make_gif(args.filename_output)
示例#58
0
def test_net(model_choice,
             resize,
             image_size,
             TTA,
             ensemble,
             test_set_output,
             test_with_labels,
             only_test_single,
             test_image_name,
             test_root,
             validate_root,
             num_test=50):
    '''
    Model test, which includes three different tests:
        1. If test_set_output = 1, we output the prediction masks of all test images in directory ./output. 
            A submission file is also an output, as required in the competition.
        2. If test_with_labels = 1, we test all the images in the dataset and print the F1 and average loss.
        3. If only_test_single = 1, we only test a single image, i.e. pass it to the network. 
            It also outputs the original image coverred by the prediction mask, saved as test.png.
    
    
    @model_choice: 1 for LinkNet, 2 for D-LinkNet, 3 for D-LinkNet+.    
    @resize: boolean flag for image resizing.    
    @image_size: the image size for the images to trained.
    @TTA: boolean flag for test time augmentation. 
    @ensemble: boolean flag to enable ensemble when testing
    @test_set_output: boolean flag for testing all the images in the test dataset.
    @test_with_labels: boolean flag for testing on a validation dataset, with labels provided.
    @only_test_single: boolean flag for testing a single image.
    @test_image_name: the name of the image to be tested.
    @test_root: root directory for test dataset.
    @validate_root: root directory for validation dataset.
    @num_test: number of test images in the test dataset.
    '''

    net = utils.create_models(model_choice)
    linkNet = None
    DlinkNet = None

    weights_name = './parameters/weights' + str(model_choice)
    #    net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    if RUN_ON_GPU:
        net.load_state_dict(torch.load(weights_name))
    else:
        net.load_state_dict(
            torch.load(weights_name,
                       map_location=lambda storage, loc: storage))
    net.eval()

    if ensemble:
        linkNet = utils.create_models(0)
        DlinkNet = utils.create_models(1)
        if RUN_ON_GPU:
            linkNet.load_state_dict(torch.load('./parameters/weights0'))
            DlinkNet.load_state_dict(torch.load('./parameters/weights1'))
        else:
            linkNet.load_state_dict(
                torch.load('./parameters/weights0',
                           map_location=lambda storage, loc: storage))
            DlinkNet.load_state_dict(
                torch.load('./parameters/weights1',
                           map_location=lambda storage, loc: storage))
        linkNet.eval()
        DlinkNet.eval()

    if test_with_labels:
        loss, f1 = test.test_batch_with_labels(net,
                                               validate_root,
                                               resize=resize,
                                               batch_size=1,
                                               image_size=image_size,
                                               smooth=1.0,
                                               lam=1.0)
        print('F1 is evaluated as ', f1)
        print('Average batch loss is ', loss)

    if only_test_single:
        if ensemble:
            mask, image = test.test_single_with_ensemble(linkNet,
                                                         DlinkNet,
                                                         net,
                                                         test_image_name,
                                                         size=image_size,
                                                         resize=resize)
        elif TTA:
            mask, image = test.test_single_with_TTA(net,
                                                    test_image_name,
                                                    size=image_size,
                                                    resize=resize)
        else:
            mask, image = test.test_single_image(net,
                                                 test_image_name,
                                                 size=image_size,
                                                 resize=resize)
        io.imshow(image)
        io.imsave('test.png', image)

    if test_set_output:
        if not os.path.exists('./output'):
            os.makedirs('./output')

        for i in range(1, num_test + 1):
            t = 'test_' + str(i)
            name = test_root + t + '/' + t + '.png'
            if ensemble:
                mask, image = test.test_single_with_ensemble(linkNet,
                                                             DlinkNet,
                                                             net,
                                                             name,
                                                             size=image_size,
                                                             resize=resize)
            elif TTA:
                mask, image = test.test_single_with_TTA(net,
                                                        name,
                                                        size=image_size,
                                                        resize=resize)
            else:
                mask, image = test.test_single_image(net,
                                                     name,
                                                     size=image_size,
                                                     resize=resize)
            io.imsave('./output/' + 'test' + str(i) + '.png', mask)

        submission_filename = 'submission.csv'

        image_filenames = []
        for i in range(1, num_test + 1):
            image_filename = 'output/test' + str(i) + '.png'
            print(image_filename)
            image_filenames.append(image_filename)
        mask_to_submission.masks_to_submission(submission_filename,
                                               *image_filenames)
anns = {}
for ann_data in tqdm(data['annotations']):
    anns[ann_data['image_id']] = ann_data['segments_info']

# %%
from os import listdir
filelist = listdir("F:/segmentation_train")
for img_data in tqdm(data['images']):
    if img_data['file_name'].replace('jpg', 'png') in filelist: continue
    # pan_img = io.imread("D:/panoptic_val2017/{:012d}.png".format(img_data['id']))
    pan_img = io.imread(
        "F:/panoptic_train2017/panoptic_train2017/{:012d}.png".format(
            img_data['id']))
    red, green, blue = pan_img[:, :, 0], pan_img[:, :, 1], pan_img[:, :, 2]
    for ann in anns[img_data['id']]:
        ann_id = ann['id']
        rgb = [
            ann_id % 256,
            int(ann_id / 256) % 256,
            int(ann_id / 256**2) % 256
        ]
        mask = (red == rgb[0]) & (green == rgb[1]) & (blue == rgb[2])
        pix = id_remap[ann['category_id']]
        pan_img[:, :, :3][mask] = [pix, pix, pix]
    io.imsave('F:/segmentation_train/{:012d}.png'.format(img_data['id']),
              pan_img[:, :, 0])

winsound.Beep(frequency, duration)

# %%
示例#60
0
    def __init__(self, filename_obj, filename_ref,useSilhouette=False,useDeltaAndScale=True):
        super(Model, self).__init__()
        vertices, faces = nr.load_obj(filename_obj)
        ##--------------------------
        # print(vertices)

        #############
        # delta v
        #目前先嘗試把delta v 拿掉,原因是,blender產生出來的vertices順序會打亂。

        if useDeltaAndScale:
            delta_v_read = open("bird2_delta_v.txt", "r")
            # print(len(vertices))
            # print(vertices.shape()[0])
            for i in range(len(vertices)):
                # continue
                # print(vertices[i])
                vector=delta_v_read.readline().split()
                vertices[i][0]=vertices[i][0]+float(vector[0])
                vertices[i][1]=vertices[i][1]+float(vector[1])
                vertices[i][2]=vertices[i][2]+float(vector[2])
            ############
            ###########

            #scale original is 0.4
            # 有一度把scale 拿掉,因為blender產生的vertices與原本mesh npy檔案的不一樣
            vertices=vertices*0.4
        #write new vertices
        f=open("deformBird.obj","w")
        f.write("mtllib spot_triangulated.mtl\n")
        f.write("o BirdReOrined_UnwrapBlender\n")
        for i in range(len(vertices)):
            f.write("v "+str(vertices[i][0].detach().cpu().numpy())+" "+str(vertices[i][1].detach().cpu().numpy())+" "+str(vertices[i][2].detach().cpu().numpy())+"\n")
        f.close()
        ##########
        ##########
        #rotataion
        cos=math.cos(4*(math.pi)/3)
        sin=math.sin(4*(math.pi)/3)
        cos_2=math.cos(3*(math.pi)/2)
        sin_2=math.sin(3*(math.pi)/2)
        for i in range(len(vertices)):
            #---------------------Y軸旋轉
            vertices_0=cos_2*vertices[i][0]+sin_2*vertices[i][2]
            vertices_1=vertices[i][1]
            vertices_2=vertices[i][0]*sin_2*(-1)+cos_2*vertices[i][2]
            vertices[i][0]=vertices_0
            vertices[i][1]=vertices_1
            vertices[i][2]=vertices_2

            #---------------------Z軸旋轉
            vertices_0=cos*vertices[i][0]-sin*vertices[i][1]
            vertices_1=sin*vertices[i][0]+cos*vertices[i][1]
            vertices_2=vertices[i][2]
            vertices[i][0]=vertices_0
            vertices[i][1]=vertices_1
            vertices[i][2]=vertices_2
            #-----------------------
            #------------------
            #---------------位移translation
            vertices[i][1]-=0.015
            vertices[i][0]-=0.18

            #------------------
        #################################
        # for i in range(len(vertices)):
        #     vertices[i][1]+=0.
        # exit()
        self.register_buffer('vertices', vertices[None, :, :])
        self.register_buffer('faces', faces[None, :, :])

        # create textures
        # texture size=1的時候每個三角形都各自有一個顏色
        #texture size=2的時候就很神奇了,三角形像是有內插的顏色一樣,這一點我不太理解
        texture_size = 6
        textures = torch.zeros(1, self.faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32)
        self.textures = nn.Parameter(textures)
        #silhouette-----------------------
        mask = open("bird2_mask.txt", "r")
        filename_ref_data=imread(filename_ref)
        #---------------------這邊的問題是強置只能讀取256*256的圖片來製作前景去被
        # print(mask.shape())
        if useSilhouette:
            # filename_ref_data_silhouette=filename_ref_data
            for i in range(256):
                mask_element = mask.readline().split()
                for j in range(256):
                    # break
                    filename_ref_data[i][j]=filename_ref_data[i][j]*float(mask_element[j])
            imshow(filename_ref_data)
            imsave('data/birdie2_silhouette.png', img_as_ubyte(filename_ref_data))
        image_ref = torch.from_numpy(filename_ref_data.astype('float32') / 255.).permute(2,0,1)[None, ::]
        image_ref_flip=np.fliplr(filename_ref_data)
        # image_ref_flip=imread("birdie2_silhouette_switch.png")

        image_ref_flip=torch.from_numpy(image_ref_flip.astype('float32')/255.).permute(2,0,1)[None, ::]
        self.register_buffer('image_ref', image_ref)
        self.register_buffer("image_ref_2",image_ref_flip)

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.perspective = False
        renderer.light_intensity_directional = 0.0
        renderer.light_intensity_ambient = 1.0
        self.renderer = renderer