Beispiel #1
0
def tvl1flow(image1,image2,NPROCS=0,TAU=0.25,LAMBDA=0.15,THETA=0.3,NSCALES=5,ZOOM= 0.5,NWARPS=5,EPSILON=0.01,VERBOSE=1):
   """
   NPROCS is the number of processors to use (NPROCS=0, all processors available)
   TAU is the time step (e.g., 0.25)
   LAMBDA is the data attachment weight (e.g., 0.15)
   THETA is the tightness of the relaxed functional (e.g., 0.3)
   NSCALES is the requested number of scales (e.g., 5)
   ZOOM is the zoom factor between each scale (e.g., 0.5)
   NWARPS is the number of warps per iteration (e.g., 5)
   EPSILON is the stopping criterion threshold (e.g., 0.01)
   VERBOSE is for verbose mode (e.g., 1 for verbose)
   """

   #saving input image to a temporary file
  
   
   temp_image1_file =tempfile.mkstemp('.PNG')[1]
   temp_image2_file =tempfile.mkstemp('.PNG')[1]
   output_file=tempfile.mkstemp('.flo')[1]
   imsave(temp_image1_file,image1)
   imsave(temp_image2_file,image2)
   
   command=exec_folder+'/tvl1flow %s %s %s %d %f %f %f %d %f %d %f %d'%(temp_image1_file,temp_image2_file,output_file,NPROCS,TAU,LAMBDA,THETA,NSCALES,ZOOM,NWARPS,EPSILON,VERBOSE)
      
   # calling the executable
   os.system( command)   
   #reading the output from the temporary file
   flow=np.fromfile(output_file,dtype=np.float32)[3:].reshape(image1.shape[0],image1.shape[1],2)
   
  
   os.remove(output_file)
   
   os.remove(temp_image1_file)
   os.remove(temp_image2_file)
   return flow
def display_layer(X, filename="../images/layer.png"):
    """
    Produces an image, composed of the given N images, patches or neural network weights,
    stored in the array X. Saves it with the given filename.
    :param X: numpy array of size (NxD) — N images, patches or neural network weights
    :param filename: a string, the name of the produced file
    :return: None
    """
    if not isinstance(X, np.ndarray):
        raise TypeError("'X' must be a numpy array")
    N, D = X.shape
    d = get_reshaped_image_size(D)

    if N == 1:
        return X.reshape(d, d, 3)
    divizors = [n for n in range(1, N) if N % n == 0]
    im_sizes = divizors[int(len(divizors) / 2)], int(N / divizors[int(len(divizors) / 2)])
    for i in range(im_sizes[0]):
        # img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        img_row = np.hstack((np.zeros((d, 1, 3)), np.array(X[i * im_sizes[0], :].reshape(d, d, 3))))
        img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        for j in range(1, im_sizes[1]):
            img_row = np.hstack((img_row, X[i * im_sizes[1] + j, :].reshape(d, d, 3)))
            img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        if i == 0:
            img = img_row
        else:
            img = np.vstack((img, img_row))
        img = np.vstack((img, np.zeros((1, img.shape[1], 3))))
    img = np.vstack((np.zeros((1, img.shape[1], 3)), img))
    imsave(filename, img)
    return img
def execute_solver(IMAGE_FILE):
    sample4x4_crop = import_image(IMAGE_FILE)
    cluster_image = get_clustering_image(sample4x4_crop)
    cluster_groupings_dict = cluster_grouper(cluster_image).execute()
    final = pre_process_image(IMAGE_FILE)
    prediction_dict = clean_prediction_dict(get_predictions(final))
    write_puzzle_file(cluster_groupings_dict,prediction_dict)
    try:
        solution = solve_puzzle('cv_puzzle.txt',False)
    except:
        return 'error'

    #get image of result
    fig = plt.figure(figsize=(2, 2), dpi=100,frameon=False)
    plt.axis('off')
    plt.imshow(sample4x4_crop, cmap=mpl.cm.Greys_r)
    for k,v in solution.items():
        if v == None:
            return 'error'
        plt.annotate('{}'.format(v), xy=(k[0]*50+12,k[1]*50+40), fontsize=14)
    plt.tight_layout()
    plt.savefig('static/images/solution.jpg', bbox_inches='tight', dpi=100)

    #theres an issue with the saved layout, tight_layout
    #doesn't appear to work so I need to apply my own cropping again
    resize_final = import_image('static/images/solution.jpg',80)
    imsave('static/images/solution.jpg',resize_final)
    return 'good'
 def _generate_images_for_AMT(self, pred_ann_ids, 
                              coco_image_dir=None, local_image_dir=None):
   """Private function to generated images to upload to AMT."""
   assert coco_image_dir and local_image_dir
   assert os.path.isdir(coco_image_dir)
   if not os.path.isdir(local_image_dir):
     print 'Input local image directory does not exist, create it'
     os.makedirs(local_image_dir)
   
   print 'Start to generate images for AMT in local hard disk'
   image_ids_saved = set()
   for (ind, pred_ann_id) in enumerate(pred_ann_ids):
     gt_data = self.refexp_dataset.loadAnns(ids = [pred_ann_id])[0]  # Need to check - change
     img = self._read_image(coco_image_dir, gt_data)
     mask = self._load_mask(gt_data)
     masked_img = cu.apply_mask_to_image(img, mask)
     masked_img_path = os.path.join(local_image_dir, ('coco_%d_ann_%d'
         '_masked.jpg' % (gt_data['image_id'], pred_ann_id)))
     misc.imsave(masked_img_path, masked_img)
     if not gt_data['image_id'] in image_ids_saved:
       image_ids_saved.add(gt_data['image_id'])
       img_path = os.path.join(local_image_dir, 'coco_%d.jpg' % gt_data['image_id'])
       misc.imsave(img_path, img)
   print ('Images generated in local hard disk, please make sure to make them '
          'publicly available online.')
Beispiel #5
0
 def run(self, img=misc.lena(), increase=True):
     img = misc.imread('/Users/Daniel/Desktop/p0.jpg')
     img_blurred = self.__blur(img)
     img = self.__divide(img, img_blurred)
     if False:
         img = exposure.adjust_sigmoid(img)
     misc.imsave('/Users/Daniel/Desktop/p1.jpg', img)
Beispiel #6
0
def create_cir(rgb, ir, saveto=None):
    """
    Create a color infrared image.

    Parameters:
        rgb - PhenoCam RGB image with same timestamp as ir
        ir - PhenoCam IR image with same timestamp as rgb
        saveto - Path to save the image to (optional)

    Returns:
        A color infrared image (a numpy array), optionally saved to file.
    """
    # Extract the necessary bands
    red = rgb[:,:,0]
    green = rgb[:,:,1]
    ir = ir[:,:,0]

    # Create a new numpy matrix to contain the cir image.
    cir = np.zeros(rgb.shape)  # Should be same shape as rgb image.

    # Compose the cir image
    cir[:,:,0] = ir
    cir[:,:,1] = red
    cir[:,:,2] = green

    # Optionally, save the result to file.
    if saveto:
        misc.imsave(saveto, cir)

    # Return the result
    return cir
Beispiel #7
0
    def do(self, which_callback, *args):
        from gatedpixelblocks import n_channel, batch_size, img_dim, MODE, path, dataset

        model = self.main_loop.model
        net_output = VariableFilter(roles=[OUTPUT])(model.variables)[-2]
        #print '{} output used'.format(net_output)
        Sampler = SamplerMultinomial if MODE == '256ary' else SamplerBinomial
        pred = Sampler(theano_seed=random.randint(0,1000)).apply(net_output)
        forward = ComputationGraph(pred).get_theano_function()

        # Need to replace by a scan??
        output = np.zeros((batch_size, n_channel, img_dim, img_dim), dtype=np.float32)
        x, y, c = (0,0,0)  # location
        # if input_ is not None:
        #     output[:,:c+1,:x,:y] = input_[:,:c+1,:x,:y]
        for row in range(x, img_dim):
            col_ind = y * (row == x)  # Start at column y for the first row to predict
            for col in range(col_ind, img_dim):
                for chan in range(n_channel):
                    prediction = forward(output)[0]
                    output[:,chan,row,col] = prediction[:,chan,row,col]

        output = output.reshape((4, 4, n_channel, img_dim, img_dim)).transpose((1,3,0,4,2))
        if n_channel == 1:
            output = output.reshape((4*img_dim,4*img_dim))
        else:
            output = output.reshape((4*img_dim,4*img_dim,n_channel))
        imsave(
            path+'/'+'{}_samples_epoch{}.jpg'.format(dataset, str(self.main_loop.log.status['epochs_done'])),
            output
        )
 def dump(mat, refs, pid, cam, im_dir):
   """Save the images of a person under one camera."""
   for i, ref in enumerate(refs):
     im = deref(mat, ref)
     if im.size == 0 or im.ndim < 2: break
     fname = new_im_name_tmpl.format(pid, cam, i)
     imsave(osp.join(im_dir, fname), im)
Beispiel #9
0
def create_ndvi(rgb, ir, saveto=None):
    """
    Create an NDVI image

    Parameters:
        rgb - PhenoCam RGB image with same timestamp as ir
        ir - PhenoCam IR image with same timestamp as rgb
        saveto - Path to save NDVI image to (optional)

    Returns:
        ndvi - A numpy matrix representing an NDVI image.
    """
    # Extract the necessary bands
    red = rgb[:,:,0].astype(np.int16)
    ir = ir[:,:,0].astype(np.int16)

    # Create a new numpy matrix to contain the ndvi image.
    ndvi = np.zeros(red.shape)  # Should be same shape as red band

    ndvi = np.true_divide(np.subtract(ir, red), np.add(ir, red))

    if saveto:
        misc.imsave(saveto, ndvi)

    return ndvi
Beispiel #10
0
def main():
    args = parseArgs()
#     rescale image to binary
    filtimg = np.zeros((args.inputimage.shape))
    filtimg[args.inputimage>250] = 1
    boundary = follow_boundary(filtimg)
#     Print the boundary, setting all values to white where the boundary is found
    for i in boundary:
        x,y = i.b
        filtimg[x,y] = 255
    misc.imsave('boundary_followed.tif',filtimg)
    gridmeasures = grid(filtimg,boundary,20)
    gridimg = np.zeros_like(filtimg)
#     Print the grid
    for gridmeasure in gridmeasures:
        x,y = gridmeasure
        gridimg[x,y] = 255
    misc.imsave('grid.tif',gridimg)
#     Calculate the chaincode
    chain = encode_chain(boundary)
    levelencoded = levelencode(chain)
    print "Chaincode : "
    for i in range(len(chain)):
        print chain[i],
    print
    print "Firstlevel difference: "
    for i in range(len(levelencoded)):
        print levelencoded[i],
def save_images(X, save_path):
    # [0, 1] -> [0,255]
    if isinstance(X.flatten()[0], np.floating):
        X = (255.99 * X).astype('uint8')

    n_samples = X.shape[0]
    rows = int(np.sqrt(n_samples))
    while n_samples % rows != 0:
        rows -= 1

    nh, nw = rows, n_samples // rows

    if X.ndim == 2:
        X = np.reshape(X, (X.shape[0], int(np.sqrt(X.shape[1])), int(np.sqrt(X.shape[1]))))

    img = None
    if X.ndim == 4:
        # BCHW -> BHWC
        X = X.transpose(0, 2, 3, 1)
        h, w = X[0].shape[:2]
        img = np.zeros((h * nh, w * nw, 3))
    elif X.ndim == 3:
        h, w = X[0].shape[:2]
        img = np.zeros((h * nh, w * nw))

    for n, x in enumerate(X):
        j = n // nw
        i = n % nw
        img[j * h:j * h + h, i * w:i * w + w] = x

    imsave(save_path, img)
Beispiel #12
0
def process(image):
  #make pgm
  imname = "image_tmp.pgm"
  imsave(imname, image)
  
  output = os.popen("./sift/sift "+imname#+" --edge-thresh 10 --peak-thresh 1.5"
    +" --levels 6 --octaves 10 --first-octave 1"
    +" -o /dev/stdout")
  keys = output.read()
  output.close()

  lines = keys.split("\n")
  num_feat = len(lines)-1

  features = np.zeros((num_feat, 128 + 4))

  i = 0
  for l in lines:
    vector = l.split(" ")
    vector.pop()
    if len(vector) == 0: break

    vec2 = np.array(vector, dtype='|S4').astype(np.float)
    features[i,:] = vec2

    i += 1

  return features
Beispiel #13
0
def main():

    input = hl.ImageParam(float_t, 3, "input")
    levels = 10

    interpolate = get_interpolate(input, levels)

    # preparing input and output memory buffers (numpy ndarrays)
    input_data = get_input_data()
    assert input_data.shape[2] == 4
    input_image = hl.Buffer(input_data)
    input.set(input_image)

    input_width, input_height = input_data.shape[:2]

    t0 = datetime.now()
    output_image = interpolate.realize(input_width, input_height, 3)
    t1 = datetime.now()
    print('Interpolated in %.5f secs' % (t1-t0).total_seconds())

    output_data = hl.buffer_to_ndarray(output_image)

    # save results
    input_path = "interpolate_input.png"
    output_path = "interpolate_result.png"
    imsave(input_path, input_data)
    imsave(output_path, output_data)
    print("\nblur realized on output image.",
          "Result saved at", output_path,
          "( input data copy at", input_path, ")")

    print("\nEnd of game. Have a nice day!")
Beispiel #14
0
    def __init__(self, num_proj, folder, camera_port=0,
                 wait=0, save=True, hsv='v'):
        """
        Acquires specified number of projections for analysis and
        reconstruction. The specified number must ensure that a rotational
        range > 360 degrees is captured.

        # num_proj:  Number of projections to acquire (must cover > 360deg)
        # folder:    Path to folder for data storage. Projections and
                     reconstructed slices will be saved here.
        # hsv:       Extract either the hue (h), saturation (s) or variance(v)
                     from the image matrix.
        """
        self.folder = folder
        self.p0 = 0
        self.cor_offset = 0
        self.crop = None, None, None, None
        self.num_images = None
        self.angles = None
        self.recon_data = None
        
        self.im_stack = image_acquisition(num_proj, camera_port, wait, hsv)
        self.height, self.width = self.im_stack.shape[:2]

        if save:
            save_folder = os.path.join(self.folder, 'projections')
            if not os.path.exists(save_folder):
                os.makedirs(save_folder)
            for idx in range(self.im_stack.shape[-1]):
                f_path = os.path.join(save_folder, '%04d.tif' % idx)
                imsave(f_path, self.im_stack[:, :, idx])
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0, type=int,
                        help='if -1, use cpu only')
    parser.add_argument('-c', '--chainermodel')
    parser.add_argument('-i', '--img-files', nargs='+', required=True)
    args = parser.parse_args()

    img_files = args.img_files
    gpu = args.gpu
    chainermodel = args.chainermodel

    save_dir = 'forward_out'
    if not osp.exists(save_dir):
        os.makedirs(save_dir)

    target_names = apc2015.APC2015.target_names
    forwarding = Forwarding(gpu, target_names, chainermodel)
    for img_file in img_files:
        img, label, _ = forwarding.forward_img_file(img_file)
        out_img = forwarding.visualize_label(img, label)

        out_file = osp.join(save_dir, osp.basename(img_file))
        imsave(out_file, out_img)
        print('- out_file: {0}'.format(out_file))
Beispiel #16
0
def main():

    # define and compile the function
    input = ImageParam(UInt(8), 3, "input")
    erode = get_erode(input)
    erode.compile_jit()

    # preparing input and output memory buffers (numpy ndarrays)
    input_data = get_input_data()
    input_image = ndarray_to_image(input_data, "input_image")
    input.set(input_image)

    output_data = np.empty(input_data.shape, dtype=input_data.dtype, order="F")
    output_image = ndarray_to_image(output_data, "output_image")

    print("input_image", input_image)
    print("output_image", output_image)

    # do the actual computation
    erode.realize(output_image)

    # save results
    input_path = "erode_input.png"
    output_path = "erode_result.png"
    imsave(input_path, input_data)
    imsave(output_path, output_data)
    print("\nerode realized on output image.",
          "Result saved at", output_path,
          "( input data copy at", input_path, ")")

    print("\nEnd of game. Have a nice day!")
    return
Beispiel #17
0
 def dump_(refs, pid, cam, fnames):
     for ref in refs:
         img = deref(ref)
         if img.size == 0 or img.ndim < 2: break
         fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(fnames))
         imsave(osp.join(images_dir, fname), img)
         fnames.append(fname)
def run_variations(
    filename,
    N=50,
    N_scales=7,
    shape=(512, 512),
    steps=[(10, 4.0), (20, 1.0)],
    display_inline=False,
    min_radius=1.5,
    max_radius=90.0,
):
    """Generates random MSTP parameter configurations and renders them.

    filename -- path and filename for pylab.imsave to write output to.
        must include '%s' or '%03d' or similar for numbering.
    N -- number of variations to generate.
    N_scales -- number of scales, length of ra, ri, dt and pal MSTP
        parameters.
    shape -- dimensions of MSTP and output images
    steps -- list of (N_steps, relative_speed) tuples, defaults to 10
        steps at 4x speed followed by 20 steps at 1x speed.
    min_radius, max_radius -- lower and upper limits of activator radius
    display_inline -- call IPython QTConsole function display to
        display intermediate results inline.

    TODO: more parameters to adjust other particulars of random
        configuration generation.

    """

    colors = [[1, 0, 0], [0, 1, 0], [0, 0, 0.9], [1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0.6, 0]]
    perm = np.random.permutation
    for n in range(N):
        # generate palette
        pal = perm(colors)[:4]
        pal = perm(r_[pal, pal])
        pal = (pal * 0.6 + roll(pal, 4, axis=0) * 0.4)[:4]
        pal = shade(N_scales, *pal)
        # generate parameters and MSTP object
        ra = exp(rseq(log(min_radius), log(max_radius), N=N_scales, randomness=0.9)).round(2)
        ri = ((1.333 + rand(N_scales)) * ra).round(2)
        dt = (0.01 * frange(1, N_scales) ** 0.8).round(3)
        wt = (1.33 + arctan(5 * (rand(N_scales) - 0.5))).round(2)
        m = MSTP(shape, ra=ra, ri=ri, dt=dt, wt=wt, pal=pal)

        print "\n-------- rendering image", filename % n
        print m
        # display(HTML(' '.join('<span style="background:rgb(%d,%d,%d)">(o_O)</span> ' % tuple(255*k) for k in array(pal))))
        for i, (N_steps, speed) in enumerate(steps):
            print "rendering %s steps at %1.1fx speed" % (N_steps, dt_multiplier)
            sys.stdout.flush()
            m.run(n=N_steps, speed=speed)
            if display_inline:
                display(IM(m.rgb_image()))
            if i < 0:
                first = False
                print "renoising after iter 1.",
                m.z += 0.25 * rand(*m.z.shape) * m.z.ptp()

        # display(IM(m.rgb_image()))
        imsave(filename % n, m.rgb_image())
Beispiel #19
0
 def reconstructPhi(self, namephi = 'phi.tif'):
     '''
     reconstruct the final phase image using gx and gy
     '''
     w = 1 # Weighting parameter
     tx = np.fft.fftshift(np.fft.fft2(self.gx))
     ty = np.fft.fftshift(np.fft.fft2(self.gy))
     c = np.arange(self.totalnum, dtype=complex).reshape(self.row, self.column)
     for i in range(self.row):
         for j in range(self.column):
             kappax = 2 * np.pi * (j+1-(np.floor(self.column/2.0)+1)) / (self.column*self.dx)
             kappay = 2 * np.pi * (i+1-(np.floor(self.row/2.0)+1)) / (self.row*self.dy)
             if kappax == 0 and kappay == 0:
                 c[i, j] = 0
             else:
                 cTemp = -1j * (kappax*tx[i][j]+w*kappay*ty[i][j]) / (kappax**2 + w*kappay**2)
                 c[i, j] = cTemp
     c = np.fft.ifftshift(c)
     self.phi = np.fft.ifft2(c)
     self.phi = self.phi.real
     imsave(namephi, self.phi)
     
     if self.outfile is None:
         raise 'Not yet create a h5 file! Run merge function first!'
     
     self.outfile.create_dataset('/phi', self.phi.shape, self.phi.dtype, self.phi)
Beispiel #20
0
def copy_incorrect(in_folder, out_folder, incorrect_files="snapshotVGG1-5-test.txt"):
    from scipy.misc import imread, imsave, imrotate
    print(incorrect_files)
    if os.path.exists(incorrect_files):
        f = open(incorrect_files, "r")
        print("File found")
    else:
        f = open(os.path.join(in_folder, "stats", incorrect_files), "r")
    page = f.read()

    sources = page.split('\n')
    print(sources)
    print(len(sources))
    count = 0
    for source in sources:
        if source.find("jpg") >= 0:
            fileinfo = source
            if source.find(",") >= 0:
                fileinfo = source.split(", ")[0]
                rotation = source.split(", ")[1]
                image = imread(fileinfo)
                image = imrotate(image, int(rotation))
            else:
                image = imread(fileinfo)
            if count == 0:
                print(fileinfo)
            count += 1
            destination = os.path.split(fileinfo.replace(in_folder, out_folder))[0]
            if not os.path.exists(destination):
                os.makedirs(destination)
            filename = os.path.split(fileinfo)[1]
            # print(os.path.join(destination, filename))
            imsave(os.path.join(destination, filename), image)
    print("Moved " + str(count) + " files")
    def run(self, n=1, speed=1.0, rnd=0, filename=None, start_frame=0, verbose=True, crop=None):
        """Advance the multiscale Turing pattern by n timesteps.

        Keyword arguments:
        n -- number of timesteps
        speed -- dt multiplier, default 1.0
        rnd -- noise to add at each step, range 0..1, default 0
        filename -- filename pattern to save intermediate frames
            e.g. 'MSTP%04d.png'
        start_frame -- number of the first frame with respect to the
            filename. Useful for continuing an interrupted run sequence.
        verbose -- if True (default), show countdown during rendering
        crop -- rect to crop the saved image, default None, meaning no cropping.

        """
        if verbose and filename:
            print "rendering %s frames as %s ... %s" % (n, (filename % start_frame), (filename % (start_frame + n - 1)))
        for k in xrange(n):
            self.z += rnd * rand(*self.z.shape)
            self.step(speed=speed)
            if filename:
                out = self.rgb_image()
                if crop:
                    out = out[crop[0] : crop[1], crop[2] : crop[3], ...]
                imsave(filename % (k + start_frame), out)
            if verbose:
                print n - k,
                sys.stdout.flush()
Beispiel #22
0
    def edges(cls):
        from scipy import ndimage, misc
        import numpy as np
        from skimage import feature
        col = Image.open("f990.jpg")
        gray = col.convert('L')

        # Let numpy do the heavy lifting for converting pixels to pure black or white
        bw = np.asarray(gray).copy()

        # Pixel range is 0...255, 256/2 = 128
        bw[bw < 245]  = 0    # Black
        bw[bw >= 245] = 255 # White
        bw[bw == 0] = 254
        bw[bw == 255] = 0
        im = bw
        im = ndimage.gaussian_filter(im, 1)
        edges2 = feature.canny(im, sigma=2)
        labels, numobjects =ndimage.label(im)
        slices = ndimage.find_objects(labels)
        print('\n'.join(map(str, slices)))
        misc.imsave('f990_sob.jpg', im)
        return

        #im = misc.imread('f990.jpg')
        #im = ndimage.gaussian_filter(im, 8)
        sx = ndimage.sobel(im, axis=0, mode='constant')
        sy = ndimage.sobel(im, axis=1, mode='constant')
        sob = np.hypot(sx, sy)
        misc.imsave('f990_sob.jpg', edges2)
Beispiel #23
0
def filter_test_image(bilateral_grid, input):

    bilateral_grid.compile_jit()

    # preparing input and output memory buffers (numpy ndarrays)
    input_data = get_input_data()
    input_image = Buffer(input_data)
    input.set(input_image)

    output_data = np.empty(input_data.shape, dtype=input_data.dtype, order="F")
    output_image = Buffer(output_data)

    if False:
        print("input_image", input_image)
        print("output_image", output_image)

    # do the actual computation
    bilateral_grid.realize(output_image)

    # save results
    input_path = "bilateral_grid_input.png"
    output_path = "bilateral_grid.png"
    imsave(input_path, input_data)
    imsave(output_path, output_data)
    print("\nbilateral_grid realized on output_image.")
    print("Result saved at '", output_path,
          "' ( input data copy at '", input_path, "' ).", sep="")

    return
Beispiel #24
0
def run_example( size = 64 ):
    """
    Run this file and result will be saved as 'rsult.jpg'
    Buttle neck: map 
    """
    modes = """
        normal
        add            substract      multiply       divide     
        dissolve       overlay        screen         pin_light
        linear_light   soft_light     vivid_light    hard_light    
        linear_dodge   color_dodge    linear_burn    color_burn
        light_only     dark_only      lighten        darken    
        lighter_color  darker_color   
        """
    top = misc.imresize( misc.imread('./imgs/top.png')[:,:,:-1], (size,size,3) )
    base = misc.imresize( misc.imread('./imgs/base.png')[:,:,:-1], (size,size,3) )
    modes = modes.split()
    num_of_mode = len( modes )
    result = np.zeros( [ size*2, size*(num_of_mode//2+2), 3 ])
    result[:size:,:size:,:] = top
    result[size:size*2:,:size:,:] = base
    for index in xrange( num_of_mode ):
        y = index // 2 + 1
        x = index % 2
        tmp= blends.blend( top, base, modes[index] )
        result[ x*size:(x+1)*size, y*size:(y+1)*size, : ] = tmp 
    # random blend
    result[-size::,-size::,:] = blends.random_blend( top, base )
    misc.imsave('result.jpg',result)
Beispiel #25
0
def filter_test_image(local_laplacian, input):

    local_laplacian.compile_jit()

    # preparing input and output memory buffers (numpy ndarrays)
    input_data = get_input_data()
    input_image = Image(input_data, "input_image")
    input.set(input_image)

    output_data = np.empty(input_data.shape, dtype=input_data.dtype, order="F")
    output_image = Image(output_data, "output_image")

    if False:
        print("input_image", input_image)
        print("output_image", output_image)

    # do the actual computation
    local_laplacian.realize(output_image)

    # save results
    input_path = "local_laplacian_input.png"
    output_path = "local_laplacian.png"
    imsave(input_path, input_data)
    imsave(output_path, output_data)
    print("\nlocal_laplacian realized on output_image.")
    print("Result saved at '", output_path,
          "' ( input data copy at '", input_path, "' ).", sep="")
    return
def rigid_alignment(faces,path,plotflag=False):
  """ 画像を位置合わせし、新たな画像として保存する。
      pathは、位置合わせした画像の保存先
      plotflag=Trueなら、画像を表示する """

  # 最初の画像の点を参照点とする
  refpoints = faces.values()[0]

  # 各画像を相似変換で変形する
  for face in faces:
    points = faces[face]

    R,tx,ty = compute_rigid_transform(refpoints, points)
    T = array([[R[1][1], R[1][0]], [R[0][1], R[0][0]]])

    im = array(Image.open(os.path.join(path,face)))
    im2 = zeros(im.shape, 'uint8')

    # 色チャンネルごとに変形する
    for i in range(len(im.shape)):
      im2[:,:,i] = ndimage.affine_transform(im[:,:,i],linalg.inv(T),
                                            offset=[-ty,-tx])
    if plotflag:
      imshow(im2)
      show()

    # 境界で切り抜き、位置合わせした画像を保存する
    h,w = im2.shape[:2]
    border = (w+h)/20
    imsave(os.path.join(path, 'aligned/'+face),
          im2[border:h-border,border:w-border,:])
def resize_and_save(df, img_name, true_idx, size='80x50', fraction=0.125):
    '''
    INPUT:  (1) Pandas DF
            (2) string: image name
            (3) integer: the true index in the df of the image
            (4) string: to append to filename
            (5) float: fraction to scale images by
    OUTPUT: None

    Resize and save the images in a new directory.
    Try to read the image. If it fails, download it to the raw data directory.
    Finally, read in the full size image and resize it.
    '''
    try:
        img = imread(img_name)
    except:
        cardinal_dir = img_name[-5:-4]
        cardinal_translation = {'N': 0, 'E': 90, 'S': 180, 'W': 270}
        coord = (df.ix[true_idx]['lat'], df.ix[true_idx]['lng'])
        print 'Saving new image...'
        print coord, cardinal_dir, cardinal_translation[cardinal_dir]
        save_image(coord, cardinal_translation[cardinal_dir], loc='newdata')
    finally:
        img_name_to_write = ('newdata_' + size + '/' +
                             img_name[8:-4] + size + '.png')
        if os.path.isfile(img_name_to_write) == False:
            img = imread(img_name)
            resized = imresize(img, fraction)
            print 'Writing file...'
            imsave(img_name_to_write, resized)
Beispiel #28
0
def markImage(filename, minX, minY, maxX, maxY):
    img = misc.imread(PATH + filename, False)
    #print filename
    #print img.shape
    if (len(img.shape) < 3):
        img = img2rgb(img)
        
    for row in range(minY, maxY):
        img[row][minX][0] = 0
        img[row][minX][1] = 0
        img[row][minX][2] = 255
        
        img[row][maxX][0] = 0
        img[row][maxX][1] = 0
        img[row][maxX][2] = 255
        
    for col in range(minX, maxX):
        img[minY][col][0] = 0
        img[minY][col][1] = 0
        img[minY][col][2] = 255

        img[maxY][col][0] = 0
        img[maxY][col][1] = 0
        img[maxY][col][2] = 255
        
    misc.imsave(PATH+filename,img)
Beispiel #29
0
def save_imgs(indices, fmt, filename):
    feats = []
    true = []
    for i, idx in enumerate(indices):
        imsave(fmt.format(i), imagenes[idx])
        # labels[idx] - .5 + np.random.standard_normal()*.1,
        feats.append([1, cat_probs[idx], brightness[idx]])
        true.append(labels[idx])
    true = np.array(true)
    feats = np.array(feats)

    with open(filename,'w') as f:
        print('''<style>
            div { page-break-after: always; }
            table { border-collapse: collapse; margin: 5px; }
            td { border: 1px solid black; padding: 5px; }
        </style>''', file=f)
        for i, feat in enumerate(feats):
            print('<div>', file=f)
            print('<h1>{}</h1>'.format(i+1), file=f)
            print('<img src="{}">'.format(fmt.format(i)), file=f)
            print(
                '<table><tr>',
                ''.join('<td>{:.02f}</td>'.format(f) for f in feat),
                '</tr><tr>',
                ''.join(['<td>&nbsp;</td>'] * len(feat)),
                '</tr><tr>',
                ''.join(['<td>&nbsp;</td>'] * len(feat)),
                '</tr></table>', file=f)
            print('</div>', file=f)
Beispiel #30
0
def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
            im_resized = im.resize(size, Image.ANTIALIAS)
            im_resized.save(filename, "JPEG")
            print("[LOG]: process completed")

        else:
            thecontent = configBaiduAip(path_dictionary['thumb'], thetoken)
            filename = ''
            if thecontent:
                # print thecontent
                filename = getAlpha(thecontent)  #get and save

        alphaImage = misc.imread(filename)
        alphaImage = alpha_conflate(alphaImage, 7)
        # alphaImage = 255-alphaImage
        alphaImage = (1 - alphaImage) * 255
        misc.imsave(path_dictionary['dealpha'], alphaImage)

        if transform:
            transformImg(path_dictionary['temp'])
            transformImg(path_dictionary['new'])
            transformImg(path_dictionary['dealpha'])

        # merge newone(witout human) with oldone
        img1 = Image.open(path_dictionary['new'])
        img2 = Image.open(path_dictionary['dealpha'])  #filename for alpha
        img3 = Image.open(path_dictionary['old'])
        img4 = Image.open(path_dictionary['base'])

        mergepic = Image.composite(img1, img3, img2)
        misc.imsave(path_dictionary['temp'], img1)
        misc.imsave(path_dictionary['new'], mergepic)
Beispiel #32
0
# Operadores de Sobel Vertical
sob_v = np.array([[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]], dtype=float)

# Aplica Gradiente de Sobel
img_saida_h = filters.correlate(img_1, sob_h)
img_saida_v = filters.correlate(img_1, sob_v)

# Magnitude do gradiente
img_saida = np.sqrt(img_saida_h**2 + img_saida_v**2)

# Limiarização
percent = 0.2  # Porcentagem da intensidade maxima
img_saida = img_saida <= img_saida.max() * percent

#img_saida = img_m0_l8_t

# Faz o salvamento das imagens de saída após o processamento
misc.imsave(saida, img_saida.astype(np.uint8))
'''
# Organiza o plote das imagens
plt.figure() 
plt.subplot(221); 
plt.imshow(img_1, cmap='gray', interpolation='nearest'); 
plt.title('img_1')
plt.subplot(222); 
plt.imshow(img_saida, cmap='gray', interpolation='nearest')
plt.title('img_saida')

# Plota as imagens de entrada e saída na tela
plt.show()
'''
Beispiel #33
0
    def predict(self):
        """The testing process.
        """
        self.net.eval()
        trange = tqdm(self.test_dataloader,
                      total=len(self.test_dataloader),
                      desc='testing')

        if self.exported:
            videos_dir = self.saved_dir / 'videos'
            imgs_dir = self.saved_dir / 'imgs'
            csv_path = self.saved_dir / 'results.csv'

            sr_imgs = []
            tmp_sid = None
            header = ['name'] + \
                     [metric_fn.__class__.__name__ for metric_fn in self.metric_fns] + \
                     [loss_fns.__class__.__name__ for loss_fns in self.loss_fns]
            results = [header]

        log = self._init_log()
        count = 0
        for batch in trange:
            batch = self._allocate_data(batch)
            input, target, index = self._get_inputs_targets(batch)
            with torch.no_grad():
                lr_path, hr_path = self.test_dataloader.dataset.data[index]
                filename = lr_path.parts[-1].split('.')[0]
                patient, _, sid, fid = filename.split('_')

                outputs = self.net(input)
                losses = self._compute_losses(outputs, target)
                loss = (torch.stack(losses) * self.loss_weights).sum()
                metrics = self._compute_metrics(outputs, target, patient)

                if self.exported:
                    _losses = [loss.item() for loss in losses]
                    _metrics = [metric.item() for metric in metrics]
                    results.append([filename, *_metrics, *_losses])

                    # Save the video.
                    if sid != tmp_sid and index != 0:
                        output_dir = videos_dir / patient
                        if not output_dir.is_dir():
                            output_dir.mkdir(parents=True)
                        video_name = tmp_sid.replace('slice',
                                                     'sequence') + '.gif'
                        self._dump_video(output_dir / video_name, sr_imgs)
                        sr_imgs = []

                    output = self._denormalize(outputs[-1])
                    sr_img = output.squeeze().detach().cpu().numpy().astype(
                        np.uint8)
                    sr_imgs.append(sr_img)
                    tmp_sid = sid

                    # Save the image.
                    output_dir = imgs_dir / patient
                    if not output_dir.is_dir():
                        output_dir.mkdir(parents=True)
                    imsave(output_dir / f'{sid}_{fid}.png', sr_img)

            batch_size = self.test_dataloader.batch_size
            self._update_log(log, batch_size, loss, losses, metrics)
            count += batch_size
            trange.set_postfix(**dict(
                (key, f'{value / count: .3f}') for key, value in log.items()))

        # Save the results.
        if self.exported:
            with open(csv_path, 'w', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerows(results)

        for key in log:
            log[key] /= count
        logging.info(f'Test log: {log}.')
Beispiel #34
0
    2 * np.pi * jj / width)
x = S / 2. + S / 2. / height * (height - 1 - ii) * np.cos(
    2 * np.pi * jj / width)

input_dir = '../Data/CVUSA/bingmap/19/'
output_dir = '../Data/CVUSA/polarmap/19/'

if not os.path.exists(output_dir):
    os.makedirs(output_dir)

images = os.listdir(input_dir)

for img in images:
    signal = imread(input_dir + img)
    image = sample_bilinear(signal, x, y)
    imsave(output_dir + img.replace('jpg', 'png'), image)

############################ Apply Polar Transform to Aerial Images in CVACT Dataset #############################
S = 1200
height = 112
width = 616

i = np.arange(0, height)
j = np.arange(0, width)
jj, ii = np.meshgrid(j, i)

y = S / 2. - S / 2. / height * (height - 1 - ii) * np.sin(
    2 * np.pi * jj / width)
x = S / 2. + S / 2. / height * (height - 1 - ii) * np.cos(
    2 * np.pi * jj / width)
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 02:22:43 2019

@author: tony
"""

from tensorflow.keras.datasets import fashion_mnist
from scipy.misc import imsave

(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

for i in range(5):
    imsave(name='uploads/{}.png'.format(i), arr=X_test[i])
    

    
Beispiel #36
0
def predict():
    ''' Called when user presses the predict button.
        Processes the canvas and handles the image.
        Passes the loaded image into the neural network and it makes
        class prediction.
    '''

    # Local functions
    def crop(x):
        # Experimental
        _len = len(x) - 1
        for index, row in enumerate(x[::-1]):
            z_flag = False
            for item in row:
                if item != 0:
                    z_flag = True
                    break
            if z_flag == False:
                x = np.delete(x, _len - index, 0)
        return x
    def parseImage(imgData):
        # parse canvas bytes and save as output.png
        imgstr = re.search(b'base64,(.*)', imgData).group(1)
        with open('output.png','wb') as output:
            output.write(base64.decodebytes(imgstr))

    # get data from drawing canvas and save as image
    parseImage(request.get_data())

    # read parsed image back in 8-bit, black and white mode (L)
    x = imread('output.png', mode='L')
    x = np.invert(x)

    ### Experimental
    # Crop on rows
    # x = crop(x)
    # x = x.T
    # Crop on columns
    # x = crop(x)
    # x = x.T

    # Visualize new array
    imsave('resized.png', x)
    x = imresize(x,(28,28))

    # reshape image data for use in neural network
    x = x.reshape(1,28,28,1)

    # Convert type to float32
    x = x.astype('float32')

    # Normalize to prevent issues with model
    x /= 255

    # Predict from model
    model = load_model(args.bin)

    
    out = model.predict(x)

    # Generate response
    response = {'prediction': chr(mapping[(int(np.argmax(out, axis=1)[0]))]),
                'confidence': str(max(out[0]) * 100)[:6]}

    return jsonify(response)
    def collect_data(self):
        output_dir = os.path.expanduser(self.output_datadir)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        dataset = facenet.get_dataset(self.input_datadir)
        with tf.Graph().as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
            sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
            with sess.as_default():
                pnet, rnet, onet = detect_face.create_mtcnn(sess, '')

        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor
        margin = 44
        image_size = 182

        # Add a random key to the filename to allow alignment using multiple processes
        random_key = np.random.randint(0, high=99999)
        bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)

        with open(bounding_boxes_filename, "w") as text_file:
            nrof_images_total = 0
            nrof_successfully_aligned = 0
            for cls in dataset:
                output_class_dir = os.path.join(output_dir, cls.name)
                if not os.path.exists(output_class_dir):
                    os.makedirs(output_class_dir)
                for image_path in cls.image_paths:
                    nrof_images_total += 1
                    filename = os.path.splitext(os.path.split(image_path)[1])[0]
                    output_filename = os.path.join(output_class_dir, filename + '.png')
                    print("Image: %s" % image_path)
                    if not os.path.exists(output_filename):
                        try:
                            img = misc.imread(image_path)
                        except (IOError, ValueError, IndexError) as e:
                            errorMessage = '{}: {}'.format(image_path, e)
                            print(errorMessage)
                        else:
                            if img.ndim < 2:
                                print('Unable to align "%s"' % image_path)
                                text_file.write('%s\n' % (output_filename))
                                continue
                            if img.ndim == 2:
                                img = facenet.to_rgb(img)
                                print('to_rgb data dimension: ', img.ndim)
                            img = img[:, :, 0:3]

                            bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold,
                                                                        factor)
                            nrof_faces = bounding_boxes.shape[0]
                            print('No of Detected Face: %d' % nrof_faces)
                            if nrof_faces > 0:
                                det = bounding_boxes[:, 0:4]
                                img_size = np.asarray(img.shape)[0:2]
                                if nrof_faces > 1:
                                    bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                                    img_center = img_size / 2
                                    offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                                         (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                                    offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                                    index = np.argmax(
                                        bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                                    det = det[index, :]
                                det = np.squeeze(det)
                                bb_temp = np.zeros(4, dtype=np.int32)

                                bb_temp[0] = det[0]
                                bb_temp[1] = det[1]
                                bb_temp[2] = det[2]
                                bb_temp[3] = det[3]

                                cropped_temp = img[bb_temp[1]:bb_temp[3], bb_temp[0]:bb_temp[2], :]
                                scaled_temp = misc.imresize(cropped_temp, (image_size, image_size), interp='bilinear')

                                nrof_successfully_aligned += 1
                                misc.imsave(output_filename, scaled_temp)
                                text_file.write('%s %d %d %d %d\n' % (
                                output_filename, bb_temp[0], bb_temp[1], bb_temp[2], bb_temp[3]))
                            else:
                                print('Unable to align "%s"' % image_path)
                                text_file.write('%s\n' % (output_filename))

        return (nrof_images_total,nrof_successfully_aligned)
Beispiel #38
0
    out = start_w * start + end_w * end
    return out


if __name__ == '__main__':
    import numpy as np
    from scipy.misc import imread, imsave, imresize

    cat = imresize(imread('cat.jpg'), (256, 256))
    dog = imresize(imread('dog.jpg'), (256, 256))
    feats = torch.stack([
        torch.from_numpy(cat.transpose(2, 0, 1).astype(np.float32)),
        torch.from_numpy(dog.transpose(2, 0, 1).astype(np.float32))
    ],
                        dim=0)

    boxes = torch.FloatTensor([
        [0, 0, 1, 1],
        [0.25, 0.25, 0.75, 0.75],
        [0, 0, 0.5, 0.5],
    ])

    box_to_feats = torch.LongTensor([1, 0, 1]).cuda()

    feats, boxes = feats.cuda(), boxes.cuda()
    crops = crop_bbox_batch_cudnn(feats, boxes, box_to_feats, 128)
    for i in range(crops.size(0)):
        crop_np = crops.data[i].cpu().numpy().transpose(1, 2,
                                                        0).astype(np.uint8)
        imsave('out%d.png' % i, crop_np)
Beispiel #39
0
def deep_dream(input_img,
               downsize=False,
               model='inception',
               layer_i=-1,
               neuron_i=-1,
               n_iterations=100,
               save_gif=None,
               save_images='imgs',
               device='/cpu:0',
               **kwargs):
    """Deep Dream with the given parameters.

    Parameters
    ----------
    input_img : np.ndarray
        Image to apply deep dream to.  Should be 3-dimenionsal H x W x C
        RGB uint8 or float32.
    downsize : bool, optional
        Whether or not to downsize the image.  Only applies to
        model=='inception'.
    model : str, optional
        Which model to load.  Must be one of: ['inception'], 'i2v_tag', 'i2v',
        'vgg16', or 'vgg_face'.
    layer_i : int, optional
        Which layer to use for finding the gradient.  E.g. the softmax layer
        for inception is -1, for vgg networks it is -2.  Use the function
        "get_layer_names" to find the layer number that you need.
    neuron_i : int, optional
        Which neuron to use.  -1 for the entire layer.
    n_iterations : int, optional
        Number of iterations to dream.
    save_gif : bool, optional
        Save a GIF.
    save_images : str, optional
        Folder to save images to.
    device : str, optional
        Which device to use, e.g. ['/cpu:0'] or '/gpu:0'.
    **kwargs : dict
        See "_apply" for additional parameters.

    Returns
    -------
    imgs : list of np.array
        Images of every iteration
    """
    net, img, preprocess, deprocess = _setup(input_img, model, downsize)
    batch, height, width, *ch = img.shape

    g = tf.Graph()
    with tf.Session(graph=g) as sess, g.device(device):

        tf.import_graph_def(net['graph_def'], name='net')
        names = [op.name for op in g.get_operations()]
        input_name = names[0] + ':0'
        x = g.get_tensor_by_name(input_name)

        layer = g.get_tensor_by_name(names[layer_i] + ':0')
        layer_shape = sess.run(tf.shape(layer), feed_dict={x: img})
        layer_vec = np.ones(layer_shape) / layer_shape[-1]
        layer_vec[..., neuron_i] = 1.0 - (1.0 / layer_shape[-1])

        ascent = tf.gradients(layer, x)

        imgs = []
        for it_i in range(n_iterations):
            print(it_i, np.min(img), np.max(img))
            if neuron_i == -1:
                this_res = sess.run(
                    ascent, feed_dict={x: img})[0]
            else:
                this_res = sess.run(
                    ascent, feed_dict={x: img, layer: layer_vec})[0]

            _apply(img, this_res, it_i, **kwargs)
            imgs.append(deprocess(img[0]))

            if save_images is not None:
                imsave(os.path.join(save_images,
                                    'frame{}.png'.format(it_i)), imgs[-1])

        if save_gif is not None:
            gif.build_gif(imgs, saveto=save_gif)

    return imgs
Beispiel #40
0
            results = sess.run(sampled_tensors + write_tensors +
                               glimpse_tensors + params_tensors)

            imgs = []
            write_imgs = []
            glimpse_imgs = []
            img_params = []

            for i in range(len(results) // 4):
                imgs.append(results[i])
                write_imgs.append(results[i + len(results) // 4])
                glimpse_imgs.append(results[i + len(results) // 4 * 2])
                img_params.append(results[i + len(results) // 4 * 3])

            for k in range(FLAGS.batch_size):
                imgs_folder = os.path.join(FLAGS.working_directory, 'imgs')
                if not os.path.exists(imgs_folder):
                    os.makedirs(imgs_folder)
                for i in range(len(imgs)):
                    imsave(
                        os.path.join(imgs_folder, '%d_%d.png') % (k, i),
                        imgs[i][k].reshape(28, 28))

                    imsave(
                        os.path.join(imgs_folder, '%d_%d_w.png') % (k, i),
                        write_imgs[i][k].reshape(28, 28))

                    imsave(
                        os.path.join(imgs_folder, '%d_%d_g.png') % (k, i),
                        glimpse_imgs[i][k].reshape(FLAGS.N, FLAGS.N))
color_copy_for_nonzero = isColored.reshape(image_size, order='F').copy(
)  # We have to reshape and make a copy of the view of an array for the nonzero() to work like in MATLAB
colored_inds = np.nonzero(color_copy_for_nonzero)  # colored_inds as lblInds

for t in [1, 2]:
    curIm = YUV[:, :, t].reshape(image_size, order='F').copy()
    b[colored_inds] = curIm[colored_inds]
    new_vals = linalg.spsolve(
        A, b
    )  # new_vals = linalg.lsqr(A, b)[0] # least-squares solution (much slower), slightly different solutions
    # lsqr returns unexpectedly (ndarray,ndarray) tuple, first is correct so:
    # use new_vals[0] for reshape if you use lsqr
    colorized[:, :, t] = new_vals.reshape(n, m, order='F')

# ---------------------------------------------------------------------------- #
# ------------------------------ Back to RGB --------------------------------- #
# ---------------------------------------------------------------------------- #

(R, G, B) = yiq_to_rgb(colorized[:, :, 0], colorized[:, :, 1], colorized[:, :,
                                                                         2])
colorizedRGB = np.zeros(colorized.shape)
colorizedRGB[:, :, 0] = R  # colorizedRGB as colorizedIm
colorizedRGB[:, :, 1] = G
colorizedRGB[:, :, 2] = B

plt.imshow(colorizedRGB)
plt.show()

misc.imsave(os.path.join(dir_path, 'example3_colorized.bmp'),
            colorizedRGB,
            format='bmp')
Beispiel #42
0
def guided_dream(input_img,
                 guide_img=None,
                 downsize=False,
                 layers=[162, 183, 184, 247],
                 label_i=962,
                 layer_i=-1,
                 feature_loss_weight=1.0,
                 tv_loss_weight=1.0,
                 l2_loss_weight=1.0,
                 softmax_loss_weight=1.0,
                 model='inception',
                 neuron_i=920,
                 n_iterations=100,
                 save_gif=None,
                 save_images='imgs',
                 device='/cpu:0',
                 **kwargs):
    """Deep Dream v2.  Use an optional guide image and other techniques.

    Parameters
    ----------
    input_img : np.ndarray
        Image to apply deep dream to.  Should be 3-dimenionsal H x W x C
        RGB uint8 or float32.
    guide_img : np.ndarray, optional
        Optional image to find features at different layers for.  Must pass in
        a list of layers that you want to find features for.  Then the guided
        dream will try to match this images features at those layers.
    downsize : bool, optional
        Whether or not to downsize the image.  Only applies to
        model=='inception'.
    layers : list, optional
        A list of layers to find features for in the "guide_img".
    label_i : int, optional
        Which label to use for the softmax layer.  Use the "get_labels" function
        to find the index corresponding the object of interest.  If None, not
        used.
    layer_i : int, optional
        Which layer to use for finding the gradient.  E.g. the softmax layer
        for inception is -1, for vgg networks it is -2.  Use the function
        "get_layer_names" to find the layer number that you need.
    feature_loss_weight : float, optional
        Weighting for the feature loss from the guide_img.
    tv_loss_weight : float, optional
        Total variational loss weighting.  Enforces smoothness.
    l2_loss_weight : float, optional
        L2 loss weighting.  Enforces smaller values and reduces saturation.
    softmax_loss_weight : float, optional
        Softmax loss weighting.  Must set label_i.
    model : str, optional
        Which model to load.  Must be one of: ['inception'], 'i2v_tag', 'i2v',
        'vgg16', or 'vgg_face'.
    neuron_i : int, optional
        Which neuron to use.  -1 for the entire layer.
    n_iterations : int, optional
        Number of iterations to dream.
    save_gif : bool, optional
        Save a GIF.
    save_images : str, optional
        Folder to save images to.
    device : str, optional
        Which device to use, e.g. ['/cpu:0'] or '/gpu:0'.
    **kwargs : dict
        See "_apply" for additional parameters.

    Returns
    -------
    imgs : list of np.ndarray
        Images of the dream.
    """
    net, img, preprocess, deprocess = _setup(input_img, model, downsize)
    print(img.shape, input_img.shape)
    print(img.min(), img.max())

    if guide_img is not None:
        guide_img = preprocess(guide_img.copy(), model)[np.newaxis]
        assert (guide_img.shape == img.shape)
    batch, height, width, *ch = img.shape

    g = tf.Graph()
    with tf.Session(graph=g) as sess, g.device(device):
        tf.import_graph_def(net['graph_def'], name='net')
        names = [op.name for op in g.get_operations()]
        input_name = names[0] + ':0'
        x = g.get_tensor_by_name(input_name)

        features = [names[layer_i] + ':0' for layer_i in layers]
        feature_loss = tf.Variable(0.0)
        for feature_i in features:
            layer = g.get_tensor_by_name(feature_i)
            if guide_img is None:
                feature_loss += tf.reduce_mean(layer)
            else:
                # Reshape it to 2D vector
                layer = tf.reshape(layer, [-1, 1])
                # Do the same for our guide image
                guide_layer = sess.run(layer, feed_dict={x: guide_img})
                guide_layer = guide_layer.reshape(-1, 1)
                # Now calculate their dot product
                correlation = tf.matmul(guide_layer.T, layer)
                feature_loss += feature_loss_weight * tf.reduce_mean(correlation)
        softmax_loss = tf.Variable(0.0)
        if label_i is not None:
            layer = g.get_tensor_by_name(names[layer_i] + ':0')
            layer_shape = sess.run(tf.shape(layer), feed_dict={x: img})
            layer_vec = np.ones(layer_shape) / layer_shape[-1]
            layer_vec[..., neuron_i] = 1.0 - 1.0 / layer_shape[1]
            softmax_loss += softmax_loss_weight * tf.reduce_mean(tf.nn.l2_loss(layer - layer_vec))

        dx = tf.square(x[:, :height - 1, :width - 1, :] - x[:, :height - 1, 1:, :])
        dy = tf.square(x[:, :height - 1, :width - 1, :] - x[:, 1:, :width - 1, :])
        tv_loss = tv_loss_weight * tf.reduce_mean(tf.pow(dx + dy, 1.2))
        l2_loss = l2_loss_weight * tf.reduce_mean(tf.nn.l2_loss(x))

        ascent = tf.gradients(feature_loss + softmax_loss + tv_loss + l2_loss, x)[0]
        sess.run(tf.global_variables_initializer())
        imgs = []
        for it_i in range(n_iterations):
            this_res, this_feature_loss, this_softmax_loss, this_tv_loss, this_l2_loss = sess.run(
                [ascent, feature_loss, softmax_loss, tv_loss, l2_loss], feed_dict={x: img})
            print('feature:', this_feature_loss,
                  'softmax:', this_softmax_loss,
                  'tv', this_tv_loss,
                  'l2', this_l2_loss)

            _apply(img, -this_res, it_i, **kwargs)
            imgs.append(deprocess(img[0]))

            if save_images is not None:
                imsave(os.path.join(save_images,
                                    'frame{}.png'.format(it_i)), imgs[-1])

        if save_gif is not None:
            gif.build_gif(imgs, saveto=save_gif)

    return imgs
def all_data_augmentation():
    augment_images(image, f)


remove_noise()

print('\nProcessing train samples...')
time_start_train = time.time()
a = []
for (class_id, file_path) in train_samples:
    for item in dirs:
        if (item.split('.')[0] == file_path) and (class_id in cat_breeds[1]):
            f, e = os.path.splitext(SAVE_CAT_ABYSSIANIAN_TRAIN + item)
            img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
            image = np.array(img)
            imsave(f + '.jpg', image)
            all_data_augmentation()
        elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[2]):
            f, e = os.path.splitext(SAVE_CAT_BENGAL_TRAIN + item)
            img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
            image = np.array(img)
            imsave(f + '.jpg', image)
            all_data_augmentation()
        elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[3]):
            f, e = os.path.splitext(SAVE_CAT_BIRMAN_TRAIN + item)
            img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
            image = np.array(img)
            imsave(f + '.jpg', image)
            all_data_augmentation()
        elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[4]):
            f, e = os.path.splitext(SAVE_CAT_BOMBAY_TRAIN + item)
        E_out = E(I)
        O = G.input
        G_out = G(O)
        print("Sampling...")
        for i in tqdm(range(128)):
            x = x.reshape((-1, 80, 160, 3))
            # code = E.predict(x, batch_size=args.batch*args.time)[0]
            code = sess.run([E_out[0]],
                            feed_dict={
                                I: x,
                                K.learning_phase(): 1
                            })[0]
            code = code.reshape((args.batch, args.time, z_dim))
            inp = code[:, :5]  # context is based on the first 5 frames only
            outs = T.predict(inp, batch_size=args.batch)
            imgs = sess.run([G_out],
                            feed_dict={
                                O: outs.reshape((-1, z_dim)),
                                K.learning_phase(): 1
                            })[0]
            # imgs = G.predict(outs[:, 0], batch_size=args.batch)
            x = x.reshape((args.batch, args.time, 80, 160, 3))
            x[0, :-1] = x[0, 1:]
            x[0, -1] = imgs[0]
            imsave("video_" + args.name + "/%03d.png" % i,
                   imresize(imgs[0], (160, 320)))

        cmd = "ffmpeg -y -i ./video_" + args.name + "/%03d.png ./video_" + args.name + "/output.gif -vf fps=1"
        print(cmd)
        os.system(cmd)
Beispiel #45
0
    s1 = im1.shape
    s2 = im2.shape

    im = np.zeros(shape=(s1[0] + s2[0] * 2, s1[1] + s2[1] * 2, 3),
                  dtype=np.uint8) + 255

    im[s2[0]:s2[0] + s1[0], s2[1]:s2[1] + s1[1], 0] = im1
    im[s2[0] + y:s2[0] * 2 + y, s2[1] + x:s2[1] * 2 + x, 1] = im2

    yxtr = min(s2[0], s2[0] + y), max(s2[0] + s1[0], s2[0] * 2 + y)
    xxtr = min(s2[1], s2[1] + x), max(s2[1] + s1[1], s2[1] * 2 + x)
    cropped = im[yxtr[0]:yxtr[1], xxtr[0]:xxtr[1]]

    greyscale = np.minimum(cropped[..., 0], cropped[..., 1])
    imsave('result_color_%s.png' % theta, cropped)
    imsave('result_bw_%s.png' % theta, greyscale)
    return greyscale
    #call(["open", "result%s.png"%theta])
    #plt.imshow(-im)
    #plt.imshow(-im[s2[0]:s1[0]+s2[0], s2[1]:s1[1]+s2[1]])


def plot_x_y_correlation(im, im2, phi, theta):
    im, im2 = rotate_images(im, im2, phi, theta)

    x, y = x_y_spectrum(im)
    x2, y2 = x_y_spectrum(im2)

    corr_x_domain, corr_x = x_y_correlation(x, x2)
    corr_y_domain, corr_y = x_y_correlation(y, y2)
Beispiel #46
0
    def procimage(self, picture, threadid):
            filelist={}
            max = 1000
            data=[]
            integparams={}
            
            '''Setting output directory paths'''
            if self.options.outdir!="":
                basename=self.options.outdir+os.sep+('_'.join(picture.replace('./', '').split(os.sep))[:-3]).replace('/', "_")
                basename=basename.replace(':', '').replace('.', '')
            else:
                reldir=os.path.join(os.path.dirname(picture),
                                      self.options.relpath)
                if not os.path.isdir(reldir):
                    try:
                        os.mkdir(reldir)
                    except:
                        print("Problem creating WORK directory!!!")
                        return
                basename=os.path.join(reldir,
                                      os.path.basename(picture)[:-4])
             
            '''Check if image exists or we are in Gisaxs mode''' 
            skipfile=False    
            for calnum, cal in enumerate(self.cals):
                if self.options["OverwriteFiles"]==False:
                    if len(list(enumerate(self.cals)))==1 or calnum==0:
                        filename=basename
                    else:
                        filename=basename+"_c"+cal.kind[0]+str(calnum)
                    chifilename=filename+".chi"
                    if os.path.isfile(chifilename):
                        filelist[cal.kind+str(calnum)]=chifilename
                        skipfile=True
                        if self.options["livefilelist"] is not "xxx":
                            lock.acquire()
                            with open(self.options["livefilelist"], 'a') as f_handle:
                                file_path = os.path.normpath(chifilename)
                                file_path=str.split(str(file_path), str(os.path.split(self.options["watchdir"])[0]))[1]
                                output = file_path +", "+str(0)+ ", "+str(0)+", "+str(0)+"\n"
                                f_handle.write(output)
                                f_handle.close()
                            lock.release()
                
            '''Check if image can be opened'''
            imgChecker = False
            i = 0
            if skipfile==False:                  
                # print("[", threadid, "] open: ", picture) 
                while imgChecker is False:
                    try:
                        # print("[", threadid, "]try opening picture: ", picture)
                        # image=imageio.imread(picture)
                        image=misc.imread(picture)
                        # if image can be opened, set boolean to True
                        if image.shape == tuple(self.cals[0].config["Geometry"]["Imagesize"]):
                            #print("[", threadid,i, "]: ","Image Format is Good")  
                            imgChecker = True
                        else:
                            #print("[", threadid,i, "]: ","Image Shape: ", image.shape)
                            #print("[", threadid,i, "]: ","Required Shape: ", tuple(self..cals[0].config["Geometry"]["Imagesize"]))
                            #print("[", threadid,i, "]: ","image ", picture, " has wrong format.")  
                            imgChecker = False
                    except KeyboardInterrupt:
                        return
                    except Exception as e:
                        pass
                    #   print("[", threadid,i, "]: ","e: ", e)

                    # If both tests are passed, we can break the loop
                    if imgChecker == True:
                        break
                    else:
                        if i<max:
                            #print("[", threadid,i, "]: ", "Issues with ", picture, ", lets wait.", max-i, " s")
                            time.sleep(0.001)
                            i=i+1
                            continue
                        else:
                            print("[", threadid, "]: ", "Gave it ", max, " tries - skipping picture: ", picture)
                            print("[", threadid, "]: ", "Adding it back into the picture queue.")
                            try:
                                self.picturequeue.put(picture)
                            except Exception as e:
                                print("[", threadid, "]: ","Error was: ", e)
                            try:
                                image=misc.imread(picture)
                                print("[", threadid, "]: ","Image Shape: ", image.shape)
                                print("[", threadid, "]: ","Required Shape: ", tuple(self.cals[0].config["Geometry"]["Imagesize"]))
                            except Exception as e:
                                print("[", threadid, "]: ","Error was: ", e)
                            return
                            
            print("[", threadid, "]: ", picture, "took ", (i), "ms." ) 
                
                
            if skipfile == False:    
                imgMetaData=datamerge.readtiff(picture)
                if "date" in imgMetaData:
                    imgTime=imgMetaData["date"]
                else:
                    imgTime=""
            else:
                    imgTime=""  
            
            if skipfile==False:  
                for calnum,cal in enumerate(self.cals):
                    if self.options.GISAXSmode == True and calnum==0: #pass on GISAXSmode information to calibration.integratechi
                        continue
                    if len(list(enumerate(self.cals)))==1 or calnum==0:
                        filename=basename
                    else:
                        filename=basename+"_c"+cal.kind[0]+str(calnum)                
                    chifilename=filename+".chi"
                    filelist[cal.kind+str(calnum)]=chifilename
                    if not self.options.resume or not os.path.isfile(chifilename):
                        result=cal.integratechi(image, chifilename, picture)
                        # print("[", threadid, "]: ",chifilename, " has been integrated!")
                        result["Image"]=picture
                        if "Integparam" in result:
                            integparams[cal.kind[0]+str(calnum)]=result["Integparam"]                  
                        data.append(result)
                        if self.options["livefilelist"] is not "xxx":
                            lock.acquire()
                            with open(self.options["livefilelist"], 'a') as f_handle:
                                file_path = os.path.normpath(chifilename)
                                file_path=str.split(str(file_path), str(os.path.split(self.options["watchdir"])[0]))[1]
                                if "Integparam" in result:
                                    output = file_path +", "+str(result["Integparam"]["I0"])+ \
                                        ", "+str(result["Integparam"]["I1"])+", "+str(result["Integparam"]["I2"])+"\n"
                                else:
                                    output = file_path +", "+str(0)+ \
                                        ", "+str(0)+", "+str(0)+"\n"
                                f_handle.write(output)
                                f_handle.close()
                            lock.release()
                        if threadid==0 and self.options.plotwindow:
                            # this is a hack it really schould be a proper GUI
                           
                            cal.plot(image, fig=self.fig)
                            plt.draw()
                           
                                 
                    if self.options.writesvg:     
                        if not self.options.resume or not os.path.isfile(filename+'.svg'):
                             cal.plot(image, filename+".svg", fig=self.fig)
                    if self.options.writepng:
                         if not self.options.resume or not os.path.isfile(filename+'.svg'):
                              misc.imsave(filename+".png", image)
                    #if self.options.silent:
                    #    if np.mod(self.allp.value, 100)==0:
                    #        print("[", threadid, "] ", self.allp.value)
                    #else:
                    #    print("[", threadid, "] write: ", filename+".chi") 
            
            with self.allp.get_lock():
                self.allp.value+=1
                
            filelist["JSON"]=basename+".json"
            
            try:
                self.histqueue.put({"Time":float(time.time()),
                                "ImgTime":imgTime, 
                                "FileList":filelist,
                                "BaseName":basename,
                                "IntegralParameters":integparams}, block=False)
            except Full:
                print("Full")
            return basename, data
Beispiel #47
0
        generated = decoder.predict(dtmp)
        file_name = './MeEnhanced/img' + str(i) + '_' +str(np.reshape(np.transpose(y_test[i,:]),[1,y_test.shape[1]]).argmax())+ '.jpg'
        file_name_input = './MeEnhanced/img' + str(i) + '_' +str(np.reshape(np.transpose(y_test[i,:]),[1,y_test.shape[1]]).argmax())+ '_input.jpg'
        imsave(file_name, generated.reshape((28, 28)))
        imsave(file_name_input, np.reshape(np.transpose(X_test[i,:]),[1,X_test.shape[1]]).reshape((28, 28)))
        time.sleep(0.1)
# this loop prints the one-hot decodings
'''
with open("variational.txt", "a") as myfile:
    for i in range(999):  #X_test.shape[0]):
        ztmp = encoder.predict(
            np.reshape(np.transpose(X_test[i, :]), [1, X_test.shape[1]]))
        print(ztmp)
        myfile.write(
            str(ztmp[0][:]) + str(
                np.reshape(np.transpose(y_test[i, :]),
                           [1, y_test.shape[1]]).argmax()) + '\n')
        dtmp = ztmp
        generated = decoder.predict(dtmp)
        file_name = './MeEnhanced/img' + str(i) + '_' + str(
            np.reshape(np.transpose(y_test[i, :]),
                       [1, y_test.shape[1]]).argmax()) + '.jpg'
        file_name_input = './MeEnhanced/img' + str(i) + '_' + str(
            np.reshape(np.transpose(y_test[i, :]),
                       [1, y_test.shape[1]]).argmax()) + '_input.jpg'
        imsave(file_name, generated.reshape((28, 28)))
        imsave(
            file_name_input,
            np.reshape(np.transpose(X_test[i, :]),
                       [1, X_test.shape[1]]).reshape((28, 28)))
        time.sleep(0.1)
Beispiel #48
0

def lowFilter(im,r):
    highim = im.copy()
    highim = highFilter(highim,r)
    return im-highim



def highFrecImage(im,r):
    im = fourier(im)
    im = highFilter(im,r)
    im  = ifourier(im)
    return im
def lowFrecImage(im,r):
    imf = fourier(im)
    lowim = lowFilter(imf,r)
    ifim  = ifourier(lowim)
    return ifim



def makeHybrid(im1,im2,r):
    low = lowFrecImage(im1,r)
    high = highFrecImage(im2,r)
    return low+high

finalHybrid=makeHybrid(duque,uribe,8)
imsave('./imgs/hybrid.png',finalHybrid)
os.system('display ./imgs/hybrid.png')
                pspnet = PSPNet101(nb_classes=19,
                                   input_shape=(713, 713),
                                   weights=args.model)
            if "voc2012" in args.model:
                pspnet = PSPNet101(nb_classes=21,
                                   input_shape=(473, 473),
                                   weights=args.model)

        else:
            print("Network architecture not implemented.")

        if args.multi_scale:
            EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5,
                                 1.75]  # must be all floats!
            #EVALUATION_SCALES = [0.15, 0.25, 0.5]  # must be all floats!

        class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES,
                                           args.sliding, args.flip)

        print("Writing results...")

        class_image = np.argmax(class_scores, axis=2)
        pm = np.max(class_scores, axis=2)
        colored_class_image = utils.color_class_image(class_image, args.model)
        # colored_class_image is [0.0-1.0] img is [0-255]
        alpha_blended = 0.5 * colored_class_image + 0.5 * img
        filename, ext = splitext(args.output_path)
        misc.imsave(filename + "_seg" + ext, colored_class_image)
        misc.imsave(filename + "_probs" + ext, pm)
        misc.imsave(filename + "_seg_blended" + ext, alpha_blended)
	def load_image(self, filename):
		letter, name = filename.split('/')[1:3]
		image = misc.imread(filename)
		image = resize(image, (self.height, self.width))
		misc.imsave(os.path.join('standard_data', letter, name), image)
Beispiel #51
0
inference.initialize(optimizer=optimizer)

n_epoch = 100
n_iter_per_epoch = 1000
for epoch in range(n_epoch):
    avg_loss = 0.0

    widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
    pbar = ProgressBar(n_iter_per_epoch, widgets=widgets)
    pbar.start()
    for t in range(n_iter_per_epoch):
        pbar.update(t)
        x_train, _ = mnist.train.next_batch(M)
        _, loss = sess.run([inference.train, inference.loss],
                           feed_dict={x_ph: x_train})
        avg_loss += loss

    # Take average over all ELBOs during the epoch, and over minibatch
    # of data points (images).
    avg_loss = avg_loss / n_iter_per_epoch
    avg_loss = avg_loss / M

    # Print a lower bound to the average marginal likelihood for an
    # image.
    print("log p(x) >= {:0.3f}".format(avg_loss))

    # Prior predictive check.
    imgs = sess.run(x.value())
    for m in range(M):
        imsave("img/%d.png" % m, imgs[m].reshape(28, 28))
Beispiel #52
0
def _get_room_connections_corner_grpah(room_info, density_img, room_idx,
                                       global_idx):
    corners_info = room_info['corners_info']
    mask = room_info['mask']
    contour = room_info['contour']
    source_idx = room_info['max_corner_idx']
    end_idx = room_info['adj_corner_idx']
    graph_weights = room_info['graph_weights']
    # build the graph, define the distance between different corners

    # for debugging use
    import cv2
    from scipy.misc import imsave
    debug_img = np.zeros([256, 256, 3])
    debug_img += np.stack([mask] * 3, axis=-1).astype(np.float32) * 255
    result_img = np.copy(debug_img)
    for corner_idx, corner_info in enumerate(corners_info):
        cv2.circle(debug_img, corner_info['corner'], 2, (255, 0, 0), 2)
        cv2.circle(result_img, corner_info['corner'], 2, (255, 0, 0), 2)
        cv2.putText(debug_img, '{}'.format(corner_idx), corner_info['corner'],
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)

        for bin_idx, edge_conf in enumerate(corner_info['binning'].tolist()):
            if edge_conf > 0.1:
                unit_vec = (np.cos(bin_idx * 10 / 180 * np.pi),
                            np.sin(bin_idx * 10 / 180 * np.pi))
                end_point = (int(corner_info['corner'][0] + unit_vec[0] * 10),
                             int(corner_info['corner'][1] - unit_vec[1] * 10))
                cv2.line(debug_img, corner_info['corner'], end_point,
                         (255, 255, 255), 1)

    imsave('./debug/{}_{}_corners.png'.format(global_idx, room_idx), debug_img)
    # imsave('./debug/{}_{}_density.png'.format(global_idx, room_idx), density_img)

    if end_idx is None:
        room_connections = defaultdict(list)
    else:
        # #build corner detection based graphs
        heuristic_room_graph = _build_room_graph(corners_info, mask, contour,
                                                 source_idx, end_idx)
        final_graph = _refine_predicted_graph_weights(graph_weights,
                                                      heuristic_room_graph,
                                                      corners_info, source_idx,
                                                      end_idx)
        room_corners = [info['corner'] for info in corners_info]

        # solve the shortest path given source and end using Dijkstra's algorithm
        # shortest_path, dists = _dijkstra(final_graph, source_idx, end_idx)
        # path = list(reversed(shortest_path))

        # if global_idx == 10 and room_idx == 2:

        # if len(shortest_path) < 0.8 * len(final_graph):  # the path is too short
        trial_num = 0
        reselected_path = None
        while reselected_path is None:
            all_paths, all_lens = dfs_all_paths(final_graph, room_corners,
                                                source_idx, end_idx, trial_num)
            reselected_path = reselect_path(all_paths, all_lens,
                                            len(final_graph), trial_num)
            print('search trial No.{}'.format(trial_num))
            trial_num += 1
            if trial_num >= 3:
                pdb.set_trace()
        path = reselected_path
        room_connections = defaultdict(list)
        # construct room connections according to shortest path
        for idx, corner_idx in enumerate(path):
            next_idx = idx + 1 if idx < len(path) - 1 else 0
            corner = corners_info[corner_idx]['corner']
            to_corner = corners_info[path[next_idx]]['corner']
            room_connections[corner].append(to_corner)
            room_connections[to_corner].append(corner)

    for corner, to_corners in room_connections.items():
        for to_corner in to_corners:
            cv2.line(result_img, corner, to_corner, (0, 255, 255), 2)

    path_str = '-'.join([str(node) for node in path])
    cv2.putText(result_img, path_str, (20, 20), 1, 1, (255, 255, 255))
    imsave('./debug/{}_{}_results.png'.format(global_idx, room_idx),
           result_img)

    return room_connections
Beispiel #53
0
        test_dict = {
            limage: linput,
            rimage: rinput,
            snet['is_training']: False
        }
        limage_map, rimage_map = session.run(
            [snet['lbranch'], snet['rbranch']], feed_dict=test_dict)

        map_width = limage_map.shape[2]
        unary_vol = np.zeros(
            (limage_map.shape[1], limage_map.shape[2], FLAGS.disp_range))
        total_time = 0

        for loc in range(FLAGS.disp_range):
            x_off = -loc
            l = limage_map[:, :, max(0, -x_off):map_width, :]
            r = rimage_map[:, :, 0:min(map_width, map_width + x_off), :]
            t1 = int(round(time.time() * 1000))
            res = session.run(map_prod, feed_dict={lmap: l, rmap: r})
            t2 = int(round(time.time() * 1000))
            total_time += t2 - t1

            unary_vol[:, max(0, -x_off):map_width, loc] = res[0, :, :]

        print('Image %s processed.' % (i + 1))
        print('Total_time=', total_time / (1000.0 * (FLAGS.disp_range)))
        pred = np.argmax(unary_vol, axis=2) * scale_factor

        misc.imsave('%s/disp_map_%06d_10.png' % (FLAGS.out_dir, file_id), pred)
Beispiel #54
0
inference.initialize(optimizer=optimizer)

hidden_rep = tf.sigmoid(logits)

init = tf.global_variables_initializer()
init.run()

n_epoch = 100
n_iter_per_epoch = 1000
for epoch in range(n_epoch):
    avg_loss = 0.0

    pbar = Progbar(n_iter_per_epoch)
    for t in range(1, n_iter_per_epoch + 1):
        pbar.update(t)
        x_train, _ = mnist.train.next_batch(M)
        x_train = np.random.binomial(1, x_train)
        info_dict = inference.update(feed_dict={x_ph: x_train})
        avg_loss += info_dict['loss']

    # Print a lower bound to the average marginal likelihood for an
    # image.
    avg_loss = avg_loss / n_iter_per_epoch
    avg_loss = avg_loss / M
    print("log p(x) >= {:0.3f}".format(avg_loss))

    # Visualize hidden representations.
    imgs = hidden_rep.eval()
    for m in range(M):
        imsave(os.path.join(IMG_DIR, '%d.png') % m, imgs[m].reshape(28, 28))
Beispiel #55
0
def generate_animation(anim_name):
    frames = []
    rex = re.compile("screen_([0-9]+).png")
    for f in os.listdir(anim_name):
        m = re.search(rex, f)
        if m:
            frames.append((int(m.group(1)), anim_name + "/" + f))
    frames.sort()

    images = [nd.imread(f) for t, f in frames]

    zero = images[0] - images[0]
    pairs = zip([zero] + images[:-1], images)
    diffs = [sign((b - a).max(2)) for a, b in pairs]

    # Find different objects for each frame
    img_areas = [
        nd.measurements.find_objects(nd.measurements.label(d)[0])
        for d in diffs
    ]

    # Simplify areas
    img_areas = [simplify(x, SIMPLIFICATION_TOLERANCE) for x in img_areas]

    ih, iw, _ = shape(images[0])

    # Generate a packed image
    allocator = Allocator2D(MAX_PACKED_HEIGHT, iw)
    packed = zeros((MAX_PACKED_HEIGHT, iw, 3), dtype=uint8)

    # Sort the rects to be packed by largest size first, to improve the packing
    rects_by_size = []
    for i in xrange(len(images)):
        src_rects = img_areas[i]

        for j in xrange(len(src_rects)):
            rects_by_size.append((slice_tuple_size(src_rects[j]), i, j))

    rects_by_size.sort(reverse=True)

    allocs = [[None] * len(src_rects) for src_rects in img_areas]

    print anim_name, "packing, num rects:", len(
        rects_by_size), "num frames:", len(images)

    t0 = time()

    for size, i, j in rects_by_size:
        src = images[i]
        src_rects = img_areas[i]

        a, b = src_rects[j]
        sx, sy = b.start, a.start
        w, h = b.stop - b.start, a.stop - a.start

        # See if the image data already exists in the packed image. This takes
        # a long time, but results in worthwhile space savings (20% in one
        # test)
        existing = find_matching_rect(allocator.bitmap,
                                      allocator.num_used_rows, packed, src, sx,
                                      sy, w, h)
        if existing:
            dy, dx = existing
            allocs[i][j] = (dy, dx)
        else:
            dy, dx = allocator.allocate(w, h)
            allocs[i][j] = (dy, dx)

            packed[dy:dy + h, dx:dx + w] = src[sy:sy + h, sx:sx + w]

    print anim_name, "packing finished, took:", time() - t0

    packed = packed[0:allocator.num_used_rows]

    misc.imsave(anim_name + "_packed_tmp.png", packed)
    # Don't completely fail if we don't have pngcrush
    if os.system("pngcrush -q " + anim_name + "_packed_tmp.png " + anim_name +
                 "_packed.png") == 0:
        os.system("rm " + anim_name + "_packed_tmp.png")
    else:
        print "pngcrush not found, output will not be larger"
        os.system("mv " + anim_name + "_packed_tmp.png " + anim_name +
                  "_packed.png")

    # Generate JSON to represent the data
    times = [t for t, f in frames]
    delays = (array(times[1:] + [times[-1] + END_FRAME_PAUSE]) -
              array(times)).tolist()

    timeline = []
    for i in xrange(len(images)):
        src_rects = img_areas[i]
        dst_rects = allocs[i]

        blitlist = []

        for j in xrange(len(src_rects)):
            a, b = src_rects[j]
            sx, sy = b.start, a.start
            w, h = b.stop - b.start, a.stop - a.start
            dy, dx = dst_rects[j]

            blitlist.append([dx, dy, w, h, sx, sy])

        timeline.append({'delay': delays[i], 'blit': blitlist})

    f = open(anim_name + '_anim.js', 'wb')
    f.write(anim_name + "_timeline = ")
    json.dump(timeline, f)
    f.close()
Beispiel #56
0
sys.path.append(
    os.path.join(PROJECT_DIR, "src", "unsupervised_image_translation"))
from experiment import prepare_argument_parser, set_up_experiment
from patches import plot_patch_vectors

if __name__ == "__main__":
    argparser = prepare_argument_parser()
    args, _ = argparser.parse_known_args()

    patches, em = set_up_experiment(args)

    # Check that splitting to patches works.
    observed_patches = plot_patch_vectors(patches.observed_vectors,
                                          patches.observed_grid_size,
                                          overlap=-2)
    imsave(os.path.join(args.output, "patches_input.png"), observed_patches)
    source_patches = plot_patch_vectors(patches.dictionary_vectors,
                                        patches.source_grid_size,
                                        overlap=-2)
    imsave(os.path.join(args.output, "patches_source.png"), source_patches)

    # Check that PCA works.
    input_pca_resconstruction = patches.pca.inverse_transform(
        patches.compact_observed_vectors)
    input_pca_image = plot_patch_vectors(input_pca_resconstruction,
                                         patches.observed_grid_size,
                                         patches.patch_overlap)
    imsave(os.path.join(args.output, "pca_input.png"), input_pca_image)
    source_pca_reconstruction = patches.pca.inverse_transform(
        patches.compact_dictionary_vectors)
    source_pca_image = plot_patch_vectors(source_pca_reconstruction,
Beispiel #57
0
 def write_attack(self, images, image_names):
     for image, image_name in zip(images, image_names):
         imsave(os.path.join(self.__args.output_dir, image_name), image)
# so as to minimize the loss
x = preprocess_image(base_image_path)
for i in range(5):
    print('Start of iteration', i)
    start_time = time.time()

    # add a random jitter to the initial image. This will be reverted at decoding time
    random_jitter = (settings['jitter'] * 2) * (np.random.random(
        (3, img_width, img_height)) - 0.5)
    x += random_jitter

    # run L-BFGS for 7 steps
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=7)

    print('Current loss value:', min_val)

    # decode the dream and save it
    x = x.reshape((3, img_width, img_height))
    x -= random_jitter
    img = deprocess_image(x)
    fname = result_prefix + '_at_iteration_%d.png' % i
    imsave(fname, img)

    end_time = time.time()
    print('Image saved as', fname)

    print('Iteration %d completed in %ds' % (i, end_time - start_time))
def testEvolution():
    ''' Second thread used to break out of training loop'''
    #thr = threading.Thread(target=running_func)
    #thr.start()
    global running
    
    #os.chdir("C:\\Users\\Ryan\\Documents\\Python\\EvolveRBM")
    
    ''' Get training data'''
    #training_data, classes =  getData(["C:\\Users\\Ryan\\Documents\\\SWAG_TRAINING\\male\\Zoom\\F", "C:\\Users\\Ryan\\Documents\\\SWAG_TRAINING\\female\\Zoom\\F"])
    mn = MNIST()    
    training_data, classes = mn.load_training()
    training_data = np.asarray(training_data)#[0:50])
    training_data = np.reshape(training_data, [len(training_data),28,28])
    training_data = (training_data * 1.0)/255 #training_data.max()
    #imsave("Images\\reconL0_"+str(20)+".jpg", training_data[0])
    saveType = "simple_genome" #20
    #np.random.shuffle(training_data)
    
    classes = np.asarray(classes)
    #training_data = training_data[0:50000]
    np.random.seed(101)
    np.random.shuffle(training_data)
    np.random.seed(101)
    np.random.shuffle(classes)
    
    training_data = training_data[0:10000]#*1.0)/255
    classes = classes[0:10000]
    # TRAIN SEED 101, TEST SEED (from remainder) 103
    
#    training_data = training_data[classes==7]
#    print training_data.shape    
#    print classes.shape
    print 'Test ConvRBM'
    rLayers = 7
    '''Conv(num_filters, filter_shape, pool_shape, binary=True, scale=0.001):'''
#    r = ConvolutionalEvolution(rLayers, [15,15], [2,2], False) #ConvolutionalEvolution(2, [3, 3], [2, 2])
    
    #initPop = 100, children_mult = 4, num_filters=20, tourney=5, filter_size=12, num_gaussians=5,mutation_prob = .7):
    t = EvolveTrainer(100, 10, rLayers, 3, filter_size = 10, num_gaussians=10, mutation_prob = .4)
    print "Working dir: ", os.getcwd()
#    t = pickle.load(open("trainer"+"_"+saveType+".p", "rb"))#
#    for i in range(len(t.population)):
#        r = pickle.load(open("conv"+str(i)+"_"+saveType+".p", "rb"))
##        r.filter_changed = np.ones(r.num_filters, dtype=bool)
#        t.population[i] = r
#    t.children_multiplier = 5
#    t.mutationProbability =.4
#    #t.tournament_size = 3
#    t.pop_size = 100
#    t.population = t.population[0:20]
    #t.children_pop
    #rLayers = r.num_filters
#    t.diversity_cutoff -= t.diversity_cutoff*.3
#    t.deviation -= .4*t.deviation
#    t.percent_kept = .6
    print "Visualizing..."
#    t.visualizePop(todo='save', saveType=saveType)
    #t.visualizeRecons(training_data[0:10])
    #return 0
#    for i in range(len(t.population)):
####        t.population[i].genelocations = np.zeros(t.population[i].weights_size, dtype=bool)
####        #t.population[i].entropy = 26
##        t.best_fitness = 0
##        t.population[i].bernoul = .3
#        if (t.population[i].weights.mean(axis=-1).mean(axis=-1)==0.0).any():
#            t.population[i].fitness -= 10
##        #t.population[i].vis_bias = 1
##        t.population[i].hid_bias+= .02 #[t.population[i].hidden==0] += .05
##    t.diversity_cutoff =.331 #-= 1.0001 *(t.diversity_cutoff)
##    t.deviation = 0#-10000.0 #-= 1.00001 * (t.deviation)
#        t.population[i].fitness -= 5 #= -9999
#    t.best_fitness -= 20
##        t.population[i].entropy = 25
    r = t.population[0]
    batchsize = 15
    #t.stats = []
    #t.best_fitness = 100
    #t.best_hid_bias = np.zeros(r.num_filters)
    #t.best_vis_bias = np.zeros(1)
    '''Trainer(rbm, momentum=0., l2=0., target_sparsity=None):'''
    #t = ConvolutionalTrainer(r,.5, 0, .005) #changed from .005 to .05
    print 'Training...'
    for i in range(rLayers):
        imsave(os.path.join("Images", "weights_init_" +saveType+"_"+str(i)+".jpg"), r.weights[i])
    ''' Training for first layer'''
    #startOff = 0
    #if len(t.stats) > 0 :
    #    startOff = t.stats[len(t.stats)-1][1]
    bfit, prev_bfit = 0, 0
    j = random.randrange(0, batchsize)  
    #t.mutationProbability = .8
    start = time.clock()
    print "t.epoch: ", str(t.epoch)
    for i in range(t.epoch, 5000):
        t.epoch = i
        #batchsize = 2 #5 + int(i//6)
        ''' Get NEW training data'''
        global avgRE, minRE1
        #np.random.shuffle(training_data)
        #for j in range(t.data_loc, training_data.shape[0], batchsize):
        t.data_loc = j
        print "Epoch: ", str(i), " Data: ", str(j), "/", str(training_data.shape[0]),\
                " batchsize: ", str(batchsize)
        #print "bat size: ", training_data[j:j+batchsize].shape
        prev_bfit = bfit
        
        
        bfit = t.learn(training_data[j:j+batchsize]) #, 20)
        #print "stats size: ", sys.getsizeof(t.stats)
        #avgRE = r.get_avg_error(training_data[j])
        print "num children: ", len(t.children_pop)
        #print "pop size: ", t.pop_size
        r = t.population[0]
        
        
        elapsed = (time.clock() - start)
        t.stats.append((i, j, t.avg_fitness, t.best_fitness, t.worst_fitness, elapsed, r.error, r.sparsity.mean(), r.overhidden))
        print 'Bernoull cut: ', r.bernoul
        print 'Avg Fitness: ', str(t.avg_fitness), ', Best Fit: ', \
                str(t.best_fitness), ', Worst Fit: ', str(t.worst_fitness)
        print "Error: ", r.error, ", entropy: ", r.entropy
        print "Amplitude: ", r.amplitude
        print "id: ", str(r.id), " averages: (w, h, v) ", str(r.weights.mean()), ", ", str(r.hid_bias), ", ", str(r.vis_bias)
        print "Target sparsity: ", r.target_sparsity
        print "Sparsity: ", r.sparsity
        print "Hidden overlap: ", r.overhidden
        print "Avg Hidden: ", r.hidden_expectation(training_data[j]).mean(axis=-1).mean(axis=-1)
        print "Max: ", np.max(r.weights), ", min: ", np.min(r.weights)
        #print "filtercorrelation: ", str(r.filtercorrelation)
        if j % 1 == 0 :
            ''' Reconstruct image for one layer'''   
            k = random.randrange(0, batchsize)                 
            minRecon = r.reconstruct(training_data[k], 2)
            rerror = r.get_avg_error(training_data[k])
            if bfit >= prev_bfit and (i+j) % 1 == 0:
                #rerror < t.min_reconerror and i+j % 1 == 0:

                print "Writing..."
                if len(t.stats ) > 0:
                    with open('convevolv_stats.csv', 'ab') as csvfile:
                        spamwriter = csv.writer(csvfile, delimiter=',',
                                quotechar='|', quoting=csv.QUOTE_MINIMAL)
                        spamwriter.writerows(t.stats)
                        t.stats = []
                print "Visualizing..."
                t.min_reconerror = rerror
                #if j % 100:
                t.visualizePop(todo='save', saveType=saveType)
                t.visualizeRecons(training_data[j:j+min(10,batchsize)], todo='save', saveType=saveType) #data
                
                
                hid = r.hidden_sample(training_data[k])
                exp = r.hidden_expectation(training_data[k])
                for y in range(len(r.weights)):
                    imsave("expect"+str(y)+".jpg", exp[y])
                    imsave("hidden"+str(y)+".jpg", hid[y])
                imsave("image"+str(k)+".jpg", training_data[k])
                imsave("recon"+str(k)+".jpg", minRecon)
            #elif j % 1 == 0 and batchsize < 100:
                #batchsize += 1
            #minRecon = minRecon / minRecon.max() * 255
#            with lock:
#                imsave(os.path.join("Images", "reconL"+saveType+"_"+str(i*100+j)+"_0.jpg"), training_data[k]*255)
#                imsave(os.path.join("Images", "reconL"+saveType+"_"+str(i*100+j)+"_1.jpg"), minRecon*255)
                #t.visualizePop('save')
        # Save parameters occassionally 
        if j % 1*batchsize == 0 :
            for k in range(len(t.population)):
                r = t.population[k]
                pickle.dump(r, open("conv"+str(k)+"_"+saveType+".p", "wb"))
            print 'Saving layer 1 weights'
            pickle.dump(t, open("trainer"+"_"+saveType+".p", "wb"))
        if j % 5*batchsize == 0 :
            for k in range(rLayers):
                imsave(os.path.join("Images", "weights_iter"+str(i)+saveType+str(k)+".jpg"), r.weights[k])
        if not running:
            with lock:
                print 'First break'
                print 'Breaking on running (in)'
            break
        # END SECOND INDENT
            #t.data_loc = 0
        #if abs(oldRE - avgRE) < .0001:
        #    break
            #if not running:
            #    print 'Second break'
            #    print 'Breaking on running (out)'
            #    break
                #print 'shape: ', r.hidden_sample(training_data[j]).shape
    #with lock:
    #    print 'Joining threads...'
    #thr_train.join()
    t.visualizePop(todo='save', saveType=saveType)
    t.visualizeRecons(training_data[j:j+10], todo='save', saveType=saveType) #data
    
    print "Working dir: ", os.getcwd()
    for i in range(len(t.population)):
        r = t.population[i]
        pickle.dump(r, open("conv"+str(i)+"_"+saveType+".p", "wb"))
    print 'Saving layer 1 weights'
    pickle.dump(t, open("trainer"+"_"+saveType+".p", "wb"))
    #t.visualizePop()
    #t.visualizeRecons(training_data[0:batchsize])
    # Print weights to images
    #for i in range(rLayers):
    #    imsave(os.path.join("Images", "weightsL20_"+str(i)+".jpg"), r.weights[i])
    #thr.join()
    #print 'joined.'
    print 'Done.'
          "%")
    prev_min_val = min_val
    # save current generated image
    img = deprocess_image(x.copy())

    if preserve_color and content is not None:
        img = original_color_transform(content, img, mask=color_mask)

    if not rescale_image:
        img_ht = int(img_width * aspect_ratio)
        print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
        img = imresize(img, (img_width, img_ht), interp=args.rescale_method)

    if rescale_image:
        print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
        img = imresize(img, (img_WIDTH, img_HEIGHT),
                       interp=args.rescale_method)

    fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
    imsave(os.path.join(output_path, fname), img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))

    if improvement_threshold is not 0.0:
        if improvement < improvement_threshold and improvement is not 0.0:
            print(
                "Improvement (%f) is less than improvement threshold (%f). Early stopping script."
                % (improvement, improvement_threshold))
            exit()