Esempio n. 1
1
 def save_img(self, fig):
     plt.imsave(os.path.join(self.dir_path, 'rank_list.jpg'), fig)
Esempio n. 2
0
    def format_image(note_image):
        """Format the Lilypond images uniformly.

        :param str note_image: name of note png image to format
        """
        im = mpimg.imread(note_image)

        # find image boundaries
        im_filter = np.where(im < 1)
        x_min = np.min(im_filter[1])
        x_max = np.max(im_filter[1])
        y_clef_idx = np.median(np.where(im_filter[1] == x_min))
        y_clef = im_filter[0][int(y_clef_idx)]

        # crop image
        x_pad = 15
        vertical_total = 400
        upper_pad = int(vertical_total / 2) - y_clef
        row, col, rgb = im.shape
        im = np.insert(im, 0, np.broadcast_to(im[0], (upper_pad, col, rgb)),
                       axis=0)
        img_resize = im[:vertical_total, (x_min - x_pad):(x_max + x_pad)]

        # set transparent background
        row, col, rgb = img_resize.shape
        img_transparent = np.append(img_resize, np.zeros((row, col, 1)),
                                    axis=2)
        mask = np.where(img_resize < 1)
        img_transparent[mask[0], mask[1], -1] = 1

        # save image
        plt.imsave(note_image, img_transparent)
Esempio n. 3
0
def test_imsave():
    # The goal here is that the user can specify an output logical DPI
    # for the image, but this will not actually add any extra pixels
    # to the image, it will merely be used for metadata purposes.

    # So we do the traditional case (dpi == 1), and the new case (dpi
    # == 100) and read the resulting PNG files back in and make sure
    # the data is 100% identical.
    from numpy import random
    random.seed(1)
    data = random.rand(256, 128)

    buff_dpi1 = io.BytesIO()
    plt.imsave(buff_dpi1, data, dpi=1)

    buff_dpi100 = io.BytesIO()
    plt.imsave(buff_dpi100, data, dpi=100)

    buff_dpi1.seek(0)
    arr_dpi1 = plt.imread(buff_dpi1)

    buff_dpi100.seek(0)
    arr_dpi100 = plt.imread(buff_dpi100)

    assert arr_dpi1.shape == (256, 128, 4)
    assert arr_dpi100.shape == (256, 128, 4)

    assert_array_equal(arr_dpi1, arr_dpi100)
Esempio n. 4
0
def kmeans(iData, clustNumber, oPrefix, norm=False):
    '''Perform k-means cluster analysis and return MAP of zones'''
    print 'Run K-Means'
    
    height, width = iData.shape[1:3]
    #reshape 3D cube of data into 2D matrix and get indeces of valid pixels
    iData, notNanDataI = cube2flat(iData)
    if norm:
        #center and norm
        iDataMean = iData[:, notNanDataI].mean(axis=1)
        iDataStd  = iData[:, notNanDataI].std(axis=1)
        iData = np.subtract(iData.T, iDataMean).T
        iData = np.divide(iData.T, iDataStd).T

    #perform kmeans on valid data and return codebook
    codeBook = vq.kmeans(iData[:, notNanDataI].astype('f8').T, clustNumber)[0]
    #perform vector quantization of input data uzing the codebook
    #return vector of labels (for each valid pixel)
    labelVec = vq.vq(iData[:, notNanDataI].astype('f8').T, codeBook)[0]+1
    #create and fill MAP of zones
    zoneMap = np.zeros(width*height) + np.nan
    zoneMap[notNanDataI] = labelVec
    zoneMap = zoneMap.reshape(height, width)
    
    #visualize map of zones
    plt.imsave(oPrefix + 'zones.png', zoneMap)
    
    return zoneMap
Esempio n. 5
0
 def create_concentration_image(self, image_data):
     cmap = cm.get_cmap()
     cmap._init()
     is_comparison_run = isinstance(self.scenario_run, models.ComparisonScenarioRun)
     if not is_comparison_run or self.scenario_run.comparison_mode == "Absolute":
         alphas = np.abs([min(n, 1.0) for n in np.linspace(0, 2, cmap.N)])
         vmax = np.max(image_data)
         vmin = np.min(image_data)
     else:
         results_max = np.max(image_data)
         results_min = np.min(image_data)
         if np.abs(results_max) > np.abs(results_min):
             vmax = results_max
             vmin = -results_max
         else:
             vmax = -results_min
             vmin = results_min
         results_range = vmax - vmin
         value_array = np.linspace(vmin, vmax, cmap.N)
         alphas = np.array([min(np.abs(v) / results_range * 2, 1.0) for v in value_array])
     cmap._lut[:-3, -1] = alphas
     if is_comparison_run:
         output_directory = self.scenario_run.output_directory_1
     else:
         output_directory = self.scenario_run.output_directory
     plt.imsave(fname=os.path.join(output_directory, "concentrations.png"),
                arr=image_data, format='png', vmax=vmax, vmin=vmin)
Esempio n. 6
0
def plot_brights(ax, path, star, regionList, goal=False):
    '''
    Components of this routine:
        Projected brightness map
         
    Please note that this has been modified for use in diagnostic plots, 
    there should really be a way to specify a windowNumber for real data
    '''
    currentWindow = 0

    ###########################
    # Make the brightness map #
    ###########################
    img = make_bright_image(star, regionList, currentWindow, goal=goal)
    
    plt.imsave(path + "temp.jpg", img, cmap='hot', vmin=0.85, vmax=1.15)
    plt.imshow(img, cmap='hot')
    #Create the plot
    bmap = Basemap(projection='moll', lon_0 = 0, ax=ax)
    bmap.warpimage(path + "temp.jpg", ax=ax)
    
    if goal:
        ax.set_title("Desired Map")
    else:
        ax.set_title("Average Map")
Esempio n. 7
0
def arrayToImg(mat, filename, format="png"):
    """
    Write a 2D numpy array to a grayscale image.

    If mat is 3D, will separately write each image along the 3rd dimension.

    Parameters
    ----------
    mat : array (2D or 3D), dtype must be uint8
        Pixel values for image or set of images to write

    filename : str
        Base filename for writing

    format : str, optional, default = "png"
        Image format to write (see matplotlib's imsave for options)
    """
    from matplotlib.pyplot import imsave
    from matplotlib import cm

    dims = shape(mat)
    if len(dims) > 2:
        for z in range(0, dims[2]):
            cdata = mat[:, :, z]
            imsave(filename+"-"+str(z)+"."+format, cdata, cmap=cm.gray)
    elif len(dims) == 2:
        imsave(filename+"."+format, mat, cmap=cm.gray)
    else:
        raise NotImplementedError('array must be 2 or 3 dimensions for image writing')
def input_image_setup(img_name, img2_name):
	'''	Nimmt ein Bild als input, erstellt eine "Regel-Karte". Bei der Bild-
	erstellung bedenken: Rot = Gitter, Gruen = Verzweigt, Blau = Radial, wobei ein schwarzer
	Pixel ein Zentrum definiert. '''
	#TODO: Document
	import matplotlib.image as mpimg
	import matplotlib.pyplot as plt
	import procedural_city_generation
	import os
	#TODO:translate	
	
	img = mpimg.imread(img_name)
	img2 = mpimg.imread(img2_name)
	
	import matplotlib.pyplot as plt
	path=os.path.dirname(procedural_city_generation.__file__)
	print path
	plt.imsave(path+"/temp/diffused.png",img2,cmap='gray')
	with open(path+"/temp/isdiffused.txt",'w') as f:
		f.write("False")
	
	
	img*=255
	img2*=255
	return img, img2
Esempio n. 9
0
def save_imgs(path, name, data):
    if not os.path.exists(path):
        os.makedirs(path)
    for _i, _f in enumerate(data):
        fname = name + '_' + str(_i) + '.png'
        fpath = os.path.join(path, fname)
        plt.imsave(fpath, _f)
Esempio n. 10
0
 def toFilenameAndTiff(outputDirPath, kv):
     key, image=kv
     fname = outputDirPath+'/w_'+str(key)+'.tif'
     if(len(image.shape)==3):
         image=image.T
         image=np.swapaxes(image,1,2)
     imsave(fname, image)
Esempio n. 11
0
def plot_linear_stretch(M, path, R, G, B, suffix=None):
    """
    Plot a linear stretched RGB image.

    Parameters:
        M: `numpy array`
          A HSI cube (m x n x p).

        path: `string`
          The path where to put the plot.

        R: `int`
            A band number that will render the red color.

        G: `int`
            A band number that will render the green color.

        B: `int`
            A band number that will render the blue color.

        suffix: `string [default None]`
          Add a suffix to the file name.
    """
    img = _linear_stretch(M, R, G, B)
    plt.ioff()
    if suffix == None:
        fout = osp.join(path, 'linear_stretch.png')
    else:
        fout = osp.join(path, 'linear_stretch_{0}.png'.format(suffix))
    plt.imsave(fout, img)
def main():
    """
    Args: save_path output_dir
    """
    args = sys.argv
    save_dir = args[1]
    output_dir = args[2]

    layer_list = [
        'conv1/Conv/Conv2D',
        'conv2/Conv/Conv2D',
        'conv3/Conv/Conv2D',
        'conv4/Conv/Conv2D',
        'conv5/Conv/Conv2D',
        'conv5/Conv_1/Conv2D',
        'conv6/Conv/Conv2D'
    ]
    channels = [16, 32, 64, 64, 128, 256, 2]

    sess = tf.Session()

    with sess.as_default():
        maximize_output_multi = layers.prepare_graph(movie.build_net, save_dir)

        for i, layer in enumerate(layer_list):
            folder_name = layer.replace('/', '_')
            directory = os.path.join(output_dir, folder_name)
            create_dir(directory)
            for channel in range(channels[i]):
                result = maximize_output_multi(layer, channel, octave_n=4, iter_n=100, step=5.0, seed=123)
                plt.imsave(os.path.join(directory, str(channel) + '.png'), result)
Esempio n. 13
0
def draw_homotopy(metadata, config, target_path, idx1, idx2):
    if not os.path.isfile(target_path + 'mu.npy'):
        encode(metadata, config, target_path)

    mu = np.load(target_path + 'mu.npy')
    x1, y1 = config.x_train[idx1, :], config.y_train[idx1]
    x2, y2 = config.x_train[idx2, :], config.y_train[idx2]
    mu1, mu2 = mu[idx1, :], mu[idx2, :]

    decoder = config.build_decoder()
    decoder_layers = nn.layers.get_all_layers(decoder.l_out)
    nparams = len(nn.layers.get_all_params(decoder.l_out))
    nn.layers.set_all_param_values(decoder.l_out, metadata['param_values'][-nparams:])
    print '  Decoder'
    for layer in decoder_layers:
        name = layer.__class__.__name__
        print "    %s %s" % (string.ljust(name, 32), nn.layers.get_output_shape(layer))

    decode = theano.function([decoder.l_z.input_var], nn.layers.get_output(decoder.l_out))

    p_range = np.arange(1, 0, -0.05)
    tile = np.reshape(x1, (28, 28))

    for p in p_range:
        zp = p * mu1 + (1 - p) * mu2
        zp = zp[np.newaxis, :]
        xp_hat = decode(zp)

        xp_hat = np.reshape(xp_hat, (28, 28))
        tile = np.hstack((tile, xp_hat))

    tile = np.hstack((tile, np.reshape(x2, (28, 28))))

    plt.imsave(target_path + 'homotopy_%s-%s.png' % (str(y1), str(y2)), tile, cmap=matplotlib.cm.Greys)
Esempio n. 14
0
def segment(sourceImage, DstImage):
    import sys
    sys.path.insert(0,'/home/joe/github/caffe-with_crop/python')
    import numpy as np
    from PIL import Image
    import matplotlib.pyplot as plt
    import caffe
    # caffe.set_mode_gpu()
    # caffe.set_device(0)
    # load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
    im = Image.open(sourceImage)
    in_ = np.array(im, dtype=np.float32)
    in_ = in_[:,:,::-1]
    in_ -= np.array((104.00698793,116.66876762,122.67891434))
    in_ = in_.transpose((2,0,1))

    # load net
    net = caffe.Net('/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/deploy.prototxt', '/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/fcn-32s-pascalcontext.caffemodel', caffe.TEST)
    # shape for input (data blob is N x C x H x W), set data
    net.blobs['data'].reshape(1, *in_.shape)
    net.blobs['data'].data[...] = in_
    # run net and take argmax for prediction
    net.forward()
    out = net.blobs['score'].data[0].argmax(axis=0)
#    plt.imshow(out)
    plt.imsave(DstImage, out)
Esempio n. 15
0
    def post_image(self, im, folder, filename='sources'):
        """
        Post an image to S3 for this pull request

        Parameters
        ----------
        im : array
            The image as a 2D array (grayscale) or 3D array (RGB)

        name : str
            The folder name to put file in
        """
        from matplotlib.pyplot import imsave, cm

        im = asarray(im)
        imfile = io.BytesIO()
        if im.ndim == 3:
            imsave(imfile, im, format="png")
        else:
            imsave(imfile, im, format="png", cmap=cm.gray)

        k = Key(self.bucket)
        k.key = 'neurofinder/images/' + str(
            self.id) + '/' + folder + '/' + filename + '.png'
        k.set_contents_from_string(imfile.getvalue())
Esempio n. 16
0
def draw_tile(metadata, config, target_path):
    decoder = config.build_decoder()
    decoder_layers = nn.layers.get_all_layers(decoder.l_out)
    print "  decoder layer output shapes:"
    nparams = len(nn.layers.get_all_params(decoder.l_out))
    nn.layers.set_all_param_values(decoder.l_out, metadata['param_values'][-nparams:])

    for layer in decoder_layers:
        name = layer.__class__.__name__
        print "    %s %s" % (string.ljust(name, 32), nn.layers.get_output_shape(layer))

    mesh = np.linspace(0.001, 0.999, 20)
    z = np.zeros((400, 2), dtype='float32')
    for i in xrange(20):
        for j in xrange(20):
            z[20 * i + j, :] = np.array([norm.ppf(mesh[i]), norm.ppf(mesh[j])])

    sample = theano.function([decoder.l_z.input_var], nn.layers.get_output(decoder_layers[-1]))

    digits = sample(z)

    tile = np.zeros((20 * 28, 20 * 28), dtype='float32')

    for i in xrange(20):
        for j in xrange(20):
            d = np.reshape(digits[20 * i + j, :], (28, 28))
            tile[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = d

    plt.imsave(target_path + 'tile.png', tile, cmap=matplotlib.cm.Greys)
    def xest_create_segmentation_from_seeds(self):
        "test whether we can create a segmentation from given seeds."

        first_image = plt.imread(path.join(dirname(__file__),
                                           'data/ilastik_data/CropStack20001_Simple Segmentation.tif'))

        seed_image = mesh.create_seeds_from_image( first_image )
        
        actual_image = plt.imread(path.join(dirname(__file__),
                                           'data/image_data/CropStack20001.tif'))
        
        segmented_image = mesh.create_segmentation_from_seeds( actual_image, seed_image )
        
        self.assertEqual( segmented_image.dtype, np.dtype('uint16') )
        
        plt.imsave( path.join(dirname(__file__), 
                    'output/testsegmentation.tif'), segmented_image )
        
        cv2.imwrite( path.join(dirname(__file__), 
                     'output/testsegmentationwithcv2.tif'), segmented_image )
        
        reloaded_image = cv2.imread( path.join(dirname(__file__), 
                         'output/testsegmentationwithcv2.tif'), flags = -1 )
        
        np.testing.assert_equal( segmented_image, reloaded_image )
def prep_image(url,idx,dataset,datadir,width=224,filetype='jpg',verbose=False):
    '''
    Check to see image file has been downloaded at current size.  If it has not,
    download and resize image. Saves file to datadir/images/[dataset]_[idx]_w[width].[filetype]
    e.g. datadir/images/train_10001_w256.bmp

    args:
        url: url of image source
        idx: image row index
        dataset: string 'train' or 'test' or other identifier
        datadir: data directory
        width: desired width of image. Will be resized to width squared
    returns:
        rawim: scaled and cropped image
    '''
    outpath = datadir + 'images/' + dataset + '_' +  str(idx) + '_w' + str(width) + '.' + filetype

    if not os.path.isfile(outpath):
        if verbose:
            print "downloading image #%s..." %str(idx)
        try:
            rawim = download_and_resize(url,width)
            plt.imsave(outpath,rawim)
            return rawim
        except:
            print "unable to download image #%i from url %s..." %(idx,url)
            return None
    else:
        if verbose:
            print "Image %i already downloaded. Loading from file..." % idx
        rawim = plt.imread(outpath)
        return rawim
def generate_cross_dissolve(im1,im2,file_path):
    step_size = 0.1
    step_array = np.arange(0,1 + step_size,step_size)
    for t in step_array:
        cd_im = np.add(t*im1,  (1.-t) * im2)
        file_path_t = file_path+'_'+str(t)+'_r.jpg'
        plt.imsave(file_path_t, cd_im)
Esempio n. 20
0
 def forward(self, bottom, top):
     """Forward data in the architecture to the following layer."""
     input_heatMaps = bottom[0].data[...]
     heatMaps = np.zeros((self.batch_size, self.num_channels, self.input_size, self.input_size))
     metadata = bottom[1].data[...]
     
     # consider each image in the batch individually
     for b in range(self.batch_size):
         (_, camera, _, _) = self.extractMetadata(metadata[b])
         # get new points
         (points, cov_matrices) = self.manifoldDataConversion(input_heatMaps[b], camera)
         # heatMaps[b] = self.sumHeatMaps(input_heatMaps[b], self.generateHeatMaps(points, cov_matrices))
         heatMaps[b] = self.generateHeatMaps(points, cov_matrices)
         
         if (self.debug_mode):
             for j in range(self.num_channels):
                 name = '%s/tmp/batch_%d_beforeafter_%d.png' % (os.environ['HOME'], b, j)
                 if (np.max(input_heatMaps[b,j,:,:]) > 0):
                     rescaled_input = np.divide(input_heatMaps[b,j],np.max(input_heatMaps[b,j]))
                 else:
                     rescaled_input = input_heatMaps[b,j]
                 vis = np.concatenate((rescaled_input, heatMaps[b,j]), axis=1)
                 plt.imsave(name,vis)
                 if (np.max(input_heatMaps[b,j]) == 0):
                     name = '%s/tmp/exception_zero_%d_%d.png' % (os.environ['HOME'], b, j)
                     plt.imsave(name,vis)
     
     top[0].data[...] = heatMaps
Esempio n. 21
0
def test_imsave(fmt):
    if fmt in ["jpg", "jpeg", "tiff"]:
        pytest.importorskip("PIL")
    has_alpha = fmt not in ["jpg", "jpeg"]

    # The goal here is that the user can specify an output logical DPI
    # for the image, but this will not actually add any extra pixels
    # to the image, it will merely be used for metadata purposes.

    # So we do the traditional case (dpi == 1), and the new case (dpi
    # == 100) and read the resulting PNG files back in and make sure
    # the data is 100% identical.
    np.random.seed(1)
    # The height of 1856 pixels was selected because going through creating an
    # actual dpi=100 figure to save the image to a Pillow-provided format would
    # cause a rounding error resulting in a final image of shape 1855.
    data = np.random.rand(1856, 2)

    buff_dpi1 = io.BytesIO()
    plt.imsave(buff_dpi1, data, format=fmt, dpi=1)

    buff_dpi100 = io.BytesIO()
    plt.imsave(buff_dpi100, data, format=fmt, dpi=100)

    buff_dpi1.seek(0)
    arr_dpi1 = plt.imread(buff_dpi1, format=fmt)

    buff_dpi100.seek(0)
    arr_dpi100 = plt.imread(buff_dpi100, format=fmt)

    assert arr_dpi1.shape == (1856, 2, 3 + has_alpha)
    assert arr_dpi100.shape == (1856, 2, 3 + has_alpha)

    assert_array_equal(arr_dpi1, arr_dpi100)
    def __init__(self, global_counter, path_to_mha=None, how_many_from_one=1, saving_path='./test_data/'):
        if path_to_mha is None:
            raise NameError(' missing .mha path ')
        self.images = []
        for i in range(0, len(path_to_mha)):
            self.images.append(np.array(sitk.GetArrayFromImage(sitk.ReadImage(path_to_mha[i]))))

        mkdir_p(saving_path)
        plt.set_cmap('gray')
        while how_many_from_one > 0:
            image_to_save = np.zeros((5,
                                      216,
                                      160))
            rand_value = rnd.randint(30, len(self.images[0]) - 30)
            for i in range(0, len(path_to_mha)):
                try:
                    image_to_save[i] = self.images[i][rand_value]
                except:
                    print('ahi')
                    print(self.images[i][rand_value].shape)
                    print(type(self.images))
                    print(type(self.images))
                    print('*')
                    continue
            print(image_to_save.shape)
            image_to_save = image_to_save.reshape((216 * 5, 160))
            print(image_to_save.shape)
            # image_to_save = resize(image_to_save, (5*216, 160), mode='constant')
            # image_to_save = image_to_save.resize(5*216, 160)
            plt.imsave(saving_path + str(global_counter) + '.png',
                       image_to_save)
            global_counter += 1
            how_many_from_one -= 1
Esempio n. 23
0
 def _post_step(self, t):
     file_name = os.path.join(self._dir_name, self._file_fmt.format(t))
     spins = np.array([[self._ising.s(i, j)
                        for j in range(self._ising.N())]
                       for i in range(self._ising.N())])
     plt.imsave(file_name, spins)
     return True
Esempio n. 24
0
def detect():
    try:
        image = request.files.get('file')
        recog_face = bool(int(request.files.get('face').file.read()))
        files = {'file': StringIO(image.file.read())}
        objs = requests.post('http://{}/object'.format(OBJ_SERVER),
                             files=files)
        objs = json.loads(objs.text)

        print recog_face
        if recog_face:
            img = None
            for bb in objs:
                if bb['label'] == 'person':
                    if img is None:
                        image.file.seek(0)
                        img = io.imread(StringIO(image.file.read()))
                    x1, y1, x2, y2 = bb['bbox']
                    person_img = img[y1:y2, x1:x2]

                    # detect face
                    s = StringIO()
                    plt.imsave(s, person_img)
                    s.seek(0)
                    faces = requests.post('http://{}/face'.format(FACE_SERVER),
                                          files={'file': s})
                    bb['face'] = json.loads(faces.text)

        return json.dumps(objs)

    except Exception as e:
        print str(type(e)), e
Esempio n. 25
0
 def showEigFace(self, idx=0):
     eigface = np.float32(self.eigenfaces[idx])
     print 'Eigface', eigface
     print 'SHAPE:', eigface.shape
     im = np.reshape(eigface, self.imsize)
     picName = 'eigFaceImage' + str(idx) + '.png'
     plt.imsave(picName, im, cmap=pylab.gray())
def save_results(true_labels, predicted_labels, clf_name, classification_dir):
  cm_int = confusion_matrix (true_labels, predicted_labels);
  cm_float = cm_int/np.apply_along_axis(np.sum, 1, cm_int).astype('float');
  report = classification_report(true_labels, predicted_labels, np.arange(0,len(class_names)), class_names);
  # Save results
  plt.imsave(classification_dir + '/' + clf_name + "_cm.png", cm_float ,  cmap=cmt.gray)
  float_cm_file = classification_dir +'/' + clf_name + "_float_cm.txt"
  fos = open(float_cm_file, 'w');
  np.savetxt(fos,cm_float);
  fos.close();
  int_cm_file = classification_dir +'/' + clf_name + "_int_cm.txt"
  fos = open(int_cm_file, 'w');
  np.savetxt(fos,cm_int);
  fos.close();
  report_file = classification_dir +'/' + clf_name + "_report.txt"
  fos = open(report_file, 'w');
  fos.write(report);
  fos.close();
  labels_file = classification_dir +'/' + clf_name + "_labels.txt"
  fos = open(labels_file, 'w');
  np.savetxt(fos,np.column_stack((true_labels,predicted_labels)));
  fos.close();

  p, r, f1, s = precision_recall_fscore_support(true_labels, predicted_labels,labels=np.arange(0,len(class_names)))
  prf1s_file = classification_dir +'/' + clf_name + "_prf1s.txt"
  fos = open(prf1s_file, 'w');
  np.savetxt(fos,np.column_stack((p, r, f1, s)));
  fos.close();
	def save(self, face_img, face_id, pose, landmark, name):
		# save the face image
		pose_bin_id = self.get_pose_bin_id(pose.yaw, pose.pitch)
		#print "Yaw=%d Pitch=%d Bin Name=%d" % (int(pose.yaw), int(pose.pitch), pose_bin_id)
		save_path = self.bin2path(pose_bin_id) + '/' + name + '.png'
		#print "saving image to path:", save_path
		plt.imsave(save_path, face_img)
		# save the data file
		# this data file could be optimized, by sorting the face_id
		print "saving image data file to path:", self.data_file_path()

		# here will change what we want to store
		# basically, we need the following data for ranking
		# 1. yaw, pitch and roll: yaw and pitch should match first, 
		# and then roll.
		# 2. distance between eyes: to estimate the resolution
		# 3. and also I think I need the name, but this could be provided by
		# using the image file name
		data_file_handler = open(self.data_file_path(), 'a')
		print >>data_file_handler, "%d"%pose_bin_id,
		print >>data_file_handler, "%s"%name,
		print >>data_file_handler, "%d %d %d"%(pose.yaw, pose.pitch, pose.roll),
		for point in landmark.all_points:
			print >>data_file_handler, point,
		print >>data_file_handler, ""
		return save_path
Esempio n. 28
0
def main():
    parser = argparse.ArgumentParser(
        description="Photometric Stereo",
    )
    parser.add_argument(
        "--lightning",
        nargs="?",
        help="Filename of JSON file containing lightning information",
    )
    parser.add_argument(
        "--mask",
        nargs="?",
        help="Filename of an image containing a mask of the object",
    )
    parser.add_argument(
        "image",
        nargs="*",
        help="Images filenames",
    )
    parser.add_argument(
        "--generate-map",
        action='store_true',
        help="Generate a map.png file which represends the colors of the "
             "normal mapping.",
    )
    args = parser.parse_args()

    if args.generate_map:
        normals = generateNormalMap()
        plt.imsave('map.png', normals)
        return

    if not len(args.image) >= 3:
        print("Please specify 3+ image files.")
        return

    if args.lightning:
        normals = photometricStereo(args.lightning, args.image)
        if False:
            try:
                with open('data.pkl', 'rb') as fhdl:
                    normals = pickle.load(fhdl)
            except:
                
                with open('data.pkl', 'wb') as fhdl:
                    pickle.dump(normals, fhdl)
    else:
        normals = photometricStereoWithoutLightning(args.image)

    if args.mask:
        mask = getImage(args.mask)
        mask = mask.T
        print(normals.shape, mask.shape)
        normals[mask<(mask.max() - mask.min())/2.] = np.nan

    color = colorizeNormals(normals)
    plt.imsave('out.png', color)
    mesh.write3dNormals(normals, 'out-3dn.stl')
    surface = mesh.surfaceFromNormals(normals)
    mesh.writeMesh(surface, normals, 'out-mesh.stl')
Esempio n. 29
0
def plot2(fn,p,wa,vmin,vmax,ups):
    
    # Check matrix dimensions are the same
    (m,n)=p.shape
    if (m,n)!=wa.shape:
        print "Matrix dimension mismatch"

    # Set up output array and scaling constant
    o=np.zeros((m*ups,n*ups,3))
    vsca=1.0/(vmax-vmin)

    # Assemble the output array
    for i in range(m):
        iu=i*ups
        for j in range(n):
            ju=j*ups
            if wa[i,j]==1:
                o[iu:iu+ups,ju:ju+ups,0]=1
                o[iu:iu+ups,ju:ju+ups,1]=1
                o[iu:iu+ups,ju:ju+ups,2]=1
            else:
                (re,gr,bl)=palette2(fscale(p[i,j],vmin,vsca))
                o[iu:iu+ups,ju:ju+ups,0]=re
                o[iu:iu+ups,ju:ju+ups,1]=gr
                o[iu:iu+ups,ju:ju+ups,2]=bl

    # Save the image
    plt.imsave(fn,o)
Esempio n. 30
0
def get_top_nearest_neigbors(num_generated, nearneig, real_features_hdf5, gen_features_hdf5, maximum=False, random_select=False, save_path=None):

    real_img_hdf5 = real_features_hdf5.replace('_features_', '_images_')
    gen_img_hdf5 = gen_features_hdf5.replace('_features_', '_images_')

    real_features_file = h5py.File(real_features_hdf5, 'r')
    gen_features_file = h5py.File(gen_features_hdf5, 'r')
    real_img_file = h5py.File(real_img_hdf5, 'r')
    gen_img_file = h5py.File(gen_img_hdf5, 'r')

    real_features = real_features_file['features']
    gen_features = gen_features_file['features']
    real_img = real_img_file['images']
    gen_img = gen_img_file['images']

    with tf.Session() as sess:
        real_features = tf.constant(np.array(real_features), dtype=tf.float32)
        gen_features = tf.constant(np.array(gen_features), dtype=tf.float32)

        # Get Nearest Neighbors for all generated images.
        gen_real_distances = tf.sqrt(tf.abs(euclidean_distance(gen_features, real_features)))
        neg = tf.negative(gen_real_distances)
        neg_s_distances, s_indices = tf.math.top_k(input=neg, k=nearneig, sorted=True)
        s_distances = tf.negative(neg_s_distances)


        # Getting the top smallest distances between Generated and Real images.
        neg_s_distances1, s_indices1 = tf.math.top_k(input=neg, k=1, sorted=True)
        neg_s_distances1 = tf.transpose(neg_s_distances1)
        if not random_select:
            if maximum:
                neg_s_distances1 = tf.negative(neg_s_distances1)
            neg_s_distances1, s_indices1 = tf.math.top_k(input=neg_s_distances1, k=num_generated, sorted=True)
            s_indices1 = tf.transpose(s_indices1)
            s_indices1 = s_indices1.eval()
        else:
            lin = list(range(int(gen_real_distances.shape[0])))
            random.shuffle(lin)
            s_indices1 = np.zeros((num_generated,1), dtype=np.int8)
            s_indices1[:, 0] = lin[:num_generated]
            
        s_indices = s_indices.eval()
        s_distances = s_distances.eval()
        # For the images with top smallest distances, show nearest neighbors.
        height, width, channels = real_img.shape[1:]
        neighbors = dict()
        grid = np.zeros((num_generated*height, (nearneig+1)*width, channels))
        for i, ind in enumerate(s_indices1):
            ind = ind[0]
            total = gen_img[ind]
            neighbors[ind] = list() 
            for j in range(nearneig):
                neighbors[ind].append((s_indices[ind,j], s_distances[ind,j]))
                real = real_img[s_indices[ind,j]]/255.
                total = np.concatenate([total, real], axis=1)
            grid[i*height:(i+1)*height, :, :] = total
        plt.imshow(grid)
        if save_path is not None:
            plt.imsave(save_path, grid)
        return neighbors
Esempio n. 31
0
File: pd1.py Progetto: JeGa/PD1
def main():
    logging.basicConfig(level=logging.INFO)

    imagename = "1_27_s.bmp"
    unaryfilename = "1_27_s.c_unary.txt"

    logging.info("Read image.")
    img = utility.readimg(imagename)

    logging.info("Load unaries.")
    unaries = utility.loadunaryfile(os.path.join("data", unaryfilename))

    # Calculate energy
    unaries = -np.log(unaries)
    numlabels = unaries.shape[2]

    w = 100000
    l = 0.5
    pd1 = PD1(img, unaries, numlabels, w, l)
    pd1.segment()
    img = pd1.get_labeled_image()

    logging.info("Save image.")
    plt.imsave("img_out", img)

    plt.imshow(img)
    plt.show()
    json_gt = [json.loads(line) for line in open(str(jsonFiles[fileCounter]))]

    # Looping over the single file from upper loop line by line
    for cnt in range(len(json_gt)):  #range(1,10):
        gt = json_gt[cnt]
        gt_lanes = gt['lanes']
        y_samples = gt['h_samples']
        raw_file = gt['raw_file']

        #print(raw_file)

        #img_test = plt.imread("10.jpg")
        ######## Saving Original Image File ##########
        path_image_updated = path_image + str(file_name_counter) + ext
        img = plt.imread(raw_file)
        plt.imsave(path_image_updated, img)
        print(img.shape)
        #plt.imshow(img)
        #plt.show()
        '''
            'raw_file': str. Clip file path.
            'lanes': list. A list of lanes. For each list of one lane, the elements are width values on image.
            'h_samples': list. A list of height values corresponding to the 'lanes', which means len(h_samples) == len(lanes[i])
        '''
        # Here For each lanes loop fetched its x from lanes[i] and
        # y from h_samples which is same for all lane for that particular JSON ROW
        #gt_lanes_vis = [[(x, y) for (x, y) in zip(lane, y_samples) if x >= 0] for lane in gt_lanes]

        #List of Lanes are samerated from the JSON file
        # that can be passed to the polylines for plotting
        lane_list = []
def partTransport(direction, particles, eruption, origin, loss, diff_loss):
    ''' calculates transport of particles trough wind'''

    q = 0
    rows = int(np.shape(particles)[0])
    cols = int(np.shape(particles)[1])

    print("Modeling process initiated, goint through {} iterations".format(max(iterations)+1))

    #for-loop to go through specified amount of timesteps
    for n in iterations:

        #create temporary array to save calculated time step
        temp_arr = np.zeros((rows, cols))

        #dynamic particle generation at volcano
        particles[origin[0], origin[1]] = eruption[q]
        particles[origin2[0],origin2[1]] = eruption[q]
        particles[origin3[0], origin3[1]] = eruption[q]
        particles[origin4[0], origin4[1]] = eruption[q]

        print("..." * 10)
        print("..." * 10)
        print("timestep {}, erupting {}ppm".format(q+1, eruption[q]))

        #go through every pixel and evaluate its next time step, then save to temp_arr
        i = 0
        while i < rows:
            j = 0

            while j < cols:
                #to check if it works correctly:
                #print("going trough pixel at {},{}".format(i, j))

                try:
                    # this calculates the following per timestep:
                    # new location of particles [depending on wind] = sum((old location[s] of particles) * loss)
                    if direction[i, j] == 0:
                        # top left
                        temp_arr[i-1, j-1] += particles[i,j] * loss * (1-diff_loss)

                        #save indices to make further steps look more simple on paper
                        z = i-1
                        u = j-1

                        #assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z-1, u-1] += particles[i,j] * loss * diff_loss
                        temp_arr[z-1, u] += particles[i,j] * loss * diff_loss
                        temp_arr[z-1, u+1] += particles[i,j] * loss * diff_loss
                        temp_arr[z, u+1] += particles[i,j] * loss * diff_loss
                        temp_arr[z+1, u+1] += particles[i,j] * loss * diff_loss
                        temp_arr[z+1, u] += particles[i,j] * loss * diff_loss
                        temp_arr[z+1, u-1] += particles[i,j] * loss * diff_loss
                        temp_arr[z, u-1] += particles[i,j] * loss * diff_loss

                    elif direction[i, j] == 1:
                        # top middle
                        temp_arr[i - 1, j] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i - 1
                        u = j

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 2:
                        # top right
                        temp_arr[i - 1, j + 1] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i - 1
                        u = j + 1

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 3:
                        # middle right
                        temp_arr[i, j + 1] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i
                        u = j + 1

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 4:
                        # bottom right
                        temp_arr[i + 1, j + 1] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i + 1
                        u = j + 1

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 5:
                        # bottom middle
                        temp_arr[i + 1, j] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i + 1
                        u = j

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 6:
                        # bottom left
                        temp_arr[i + 1, j - 1] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i + 1
                        u = j - 1

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss

                    elif direction[i, j] == 7:
                        # middle left
                        temp_arr[i, j - 1] += particles[i,j] * loss * (1-diff_loss)

                        # save indices to make further steps look more simple on paper
                        z = i
                        u = j - 1

                        # assign each surrounding pixel of end-of-timestep-target pixel the diffusion value
                        temp_arr[z - 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z - 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u + 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u] += particles[i, j] * loss * diff_loss
                        temp_arr[z + 1, u - 1] += particles[i, j] * loss * diff_loss
                        temp_arr[z, u - 1] += particles[i, j] * loss * diff_loss


                except IndexError:
                    pass

                j +=1
            i += 1
        # enabling iteration for the for loop
        q += 1

        # saving temp_arr as the new particles for the next time-step
        particles = temp_arr
        plt.imsave("Ash_Plumes\Ash_Plume{}".format(q), particles)

    print("{}{} RESULTS {}{}".format("\n","---"*10,"---"*10, "\n"))
    print("Model ran {} timesteps with volcanic output of \n{}\n".format(iterate,eruption))
    print("Wind direction Raster: \n", w_direction, "\n")
    print("Model output: \n", np.rint(particles))

    #plt.imshow(w_direction, cmap='gray')
    #plt.imshow(particles, cmap='gray')
    plt.imsave("Wind_Direction\Wind_Direction", w_direction)
    return np.rint(particles)
Esempio n. 34
0
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 画像読み込み
image = cv2.imread('./capture.jpg', cv2.IMREAD_COLOR)
# 縦横の長さの取得
height, width = image.shape[:2]

# 画像を4つに分ける
kirinuki1 = image[0:height / 2, 0:width / 2]
kirinuki2 = image[0:height / 2, width / 2:width]
kirinuki3 = image[height / 2:height, 0:width / 2]
kirinuki4 = image[height / 2:height, width / 2:width]
# 画像反転( = 0:上下反転, > 0:左右反転, < 0:上下左右反転)
kirinuki1 = cv2.flip(kirinuki1, -1)
kirinuki2 = cv2.flip(kirinuki2, -1)
kirinuki3 = cv2.flip(kirinuki3, -1)
kirinuki4 = cv2.flip(kirinuki4, -1)
# 反転後の画像を適用
image[0:height / 2, 0:width / 2] = kirinuki1
image[0:height / 2, width / 2:width] = kirinuki2
image[height / 2:height, 0:width / 2] = kirinuki3
image[height / 2:height, width / 2:width] = kirinuki4
# 色の補正
image = image[:, :, [2, 1, 0]]
# 画像の表示
plt.imshow(image)
plt.show()
# 画像の保存
plt.imsave('reverse.png', image)
def main(args):
    log_dir = args.log_path if (
        args.log_path is not None
    ) else "/tmp/stable_baselines_" + time.strftime('%Y-%m-%d-%H-%M-%S')
    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        configure_logger(log_dir)
    else:
        rank = MPI.COMM_WORLD.Get_rank()
        configure_logger(log_dir, format_strs=[])

    set_global_seeds(args.seed)

    model_class = SAC_parallel

    n_workers = args.num_workers if not args.play else 1
    env_kwargs = get_env_kwargs(args.env,
                                random_ratio=args.random_ratio,
                                sequential=args.sequential,
                                reward_type=args.reward_type,
                                n_object=args.n_object)

    def make_thunk(rank):
        return lambda: make_env(
            env_id=args.env, rank=rank, log_dir=log_dir, kwargs=env_kwargs)

    env = ParallelSubprocVecEnv([make_thunk(i) for i in range(n_workers)],
                                reset_when_done=True)

    if os.path.exists(os.path.join(logger.get_dir(), 'eval.csv')):
        os.remove(os.path.join(logger.get_dir(), 'eval.csv'))
        print('Remove existing eval.csv')
    eval_env_kwargs = env_kwargs.copy()
    eval_env_kwargs['random_ratio'] = 0.0
    eval_env = make_env(env_id=args.env, rank=0, kwargs=eval_env_kwargs)
    eval_env = FlattenDictWrapper(
        eval_env, ['observation', 'achieved_goal', 'desired_goal'])

    if not args.play:
        os.makedirs(log_dir, exist_ok=True)

    # Available strategies (cf paper): future, final, episode, random
    goal_selection_strategy = 'future'  # equivalent to GoalSelectionStrategy.FUTURE

    if not args.play:
        from stable_baselines.ddpg.noise import NormalActionNoise
        noise_type = args.action_noise.split('_')[0]
        if noise_type == 'none':
            parsed_action_noise = None
        elif noise_type == 'normal':
            sigma = float(args.action_noise.split('_')[1])
            parsed_action_noise = NormalActionNoise(
                mean=np.zeros(env.action_space.shape),
                sigma=sigma * np.ones(env.action_space.shape))
        else:
            raise NotImplementedError
        train_kwargs = get_train_kwargs("sac", args, parsed_action_noise,
                                        eval_env)

        def callback(_locals, _globals):
            if _locals['step'] % int(1e3) == 0:
                if 'FetchStack' in args.env:
                    mean_eval_reward = stack_eval_model(
                        eval_env,
                        _locals["self"],
                        init_on_table=(args.env == 'FetchStack-v2'))
                elif 'MasspointPushDoubleObstacle-v2' in args.env:
                    mean_eval_reward = egonav_eval_model(
                        eval_env,
                        _locals["self"],
                        env_kwargs["random_ratio"],
                        fixed_goal=np.array([4., 4., 0.15, 0., 0., 0., 1.]))
                    mean_eval_reward2 = egonav_eval_model(
                        eval_env,
                        _locals["self"],
                        env_kwargs["random_ratio"],
                        goal_idx=0,
                        fixed_goal=np.array([4., 4., 0.15, 1., 0., 0., 0.]))
                    log_eval(_locals['self'].num_timesteps,
                             mean_eval_reward2,
                             file_name="eval_box.csv")
                else:
                    mean_eval_reward = eval_model(eval_env, _locals["self"])
                log_eval(_locals['self'].num_timesteps, mean_eval_reward)
            if _locals['step'] % int(2e4) == 0:
                model_path = os.path.join(
                    log_dir, 'model_' + str(_locals['step'] // int(2e4)))
                model.save(model_path)
                print('model saved to', model_path)
            return True

        class CustomSACPolicy(SACPolicy):
            def __init__(self, *model_args, **model_kwargs):
                super(CustomSACPolicy, self).__init__(
                    *model_args,
                    **model_kwargs,
                    layers=[256, 256] if 'MasspointPushDoubleObstacle'
                    in args.env else [256, 256, 256, 256],
                    feature_extraction="mlp")

        register_policy('CustomSACPolicy', CustomSACPolicy)
        from utils.sac_attention_policy import AttentionPolicy
        register_policy('AttentionPolicy', AttentionPolicy)
        policy_kwargs = get_policy_kwargs("sac", args)

        if rank == 0:
            print('train_kwargs', train_kwargs)
            print('policy_kwargs', policy_kwargs)
        # Wrap the model
        model = HER2(args.policy,
                     env,
                     model_class,
                     n_sampled_goal=4,
                     goal_selection_strategy=goal_selection_strategy,
                     num_workers=args.num_workers,
                     policy_kwargs=policy_kwargs,
                     verbose=1,
                     **train_kwargs)
        print(model.get_parameter_list())

        # Train the model
        model.learn(
            int(args.num_timesteps),
            seed=args.seed,
            callback=callback,
            log_interval=100 if not ('MasspointMaze-v3' in args.env) else 10)

        if rank == 0:
            model.save(os.path.join(log_dir, 'final'))

    # WARNING: you must pass an env
    # or wrap your environment with HERGoalEnvWrapper to use the predict method
    if args.play and rank == 0:
        assert args.load_path is not None
        model = HER2.load(args.load_path, env=env)

        fig, ax = plt.subplots(1, 1, figsize=(8, 8))
        obs = env.reset()
        if 'FetchStack' in args.env:
            env.env_method('set_task_array',
                           [[(env.get_attr('n_object')[0], 0)]])
            obs = env.reset()
            while env.get_attr('current_nobject')[0] != env.get_attr(
                    'n_object')[0] or env.get_attr('task_mode')[0] != 1:
                obs = env.reset()
        elif 'FetchPushWallObstacle' in args.env:
            while not (obs['observation'][0][4] > 0.7
                       and obs['observation'][0][4] < 0.8):
                obs = env.reset()
            env.env_method('set_goal', [np.array([1.18, 0.8, 0.425, 1, 0])])
            obs = env.env_method('get_obs')
            obs = {
                'observation': obs[0]['observation'][None],
                'achieved_goal': obs[0]['achieved_goal'][None],
                'desired_goal': obs[0]['desired_goal'][None]
            }
            # obs[0] = np.concatenate([obs[0][key] for key in ['observation', 'achieved_goal', 'desired_goal']])
        elif 'MasspointPushDoubleObstacle' in args.env or 'FetchPushWallObstacle' in args.env:
            while np.argmax(obs['desired_goal'][0][3:]) != 0:
                obs = env.reset()
        elif 'MasspointMaze-v2' in args.env:
            while obs['observation'][0][0] < 3 or obs['observation'][0][1] < 3:
                obs = env.reset()
            env.env_method('set_goal', [np.array([1., 1., 0.15])])
            obs = env.env_method('get_obs')
            obs = {
                'observation': obs[0]['observation'][None],
                'achieved_goal': obs[0]['achieved_goal'][None],
                'desired_goal': obs[0]['desired_goal'][None]
            }

        print('goal', obs['desired_goal'][0], 'obs', obs['observation'][0])
        episode_reward = 0.0
        images = []
        frame_idx = 0
        num_episode = 0
        for i in range(env_kwargs['max_episode_steps'] * 10):
            img = env.render(mode='rgb_array')
            ax.cla()
            ax.imshow(img)
            tasks = ['pick and place', 'stack']
            ax.set_title('episode ' + str(num_episode) + ', frame ' +
                         str(frame_idx) + ', task: ' +
                         tasks[np.argmax(obs['observation'][0][-2:])])
            images.append(img)
            action, _ = model.predict(obs, deterministic=True)
            obs, reward, done, _ = env.step(action)
            episode_reward += reward
            frame_idx += 1
            if args.export_gif:
                plt.imsave(
                    os.path.join(os.path.dirname(args.load_path),
                                 'tempimg%d.png' % i), img)
            else:
                plt.pause(0.02)
            if done:
                print('episode_reward', episode_reward)
                obs = env.reset()
                if 'FetchStack' in args.env:
                    while env.get_attr('current_nobject')[0] != env.get_attr('n_object')[0] or \
                                    env.get_attr('task_mode')[0] != 1:
                        obs = env.reset()
                elif 'MasspointPushDoubleObstacle' in args.env or 'FetchPushWallObstacle' in args.env:
                    while np.argmax(obs['desired_goal'][0][3:]) != 0:
                        obs = env.reset()
                print('goal', obs['desired_goal'][0])
                episode_reward = 0.0
                frame_idx = 0
                num_episode += 1
                if num_episode >= 1:
                    break
        exit()
        if args.export_gif:
            os.system('ffmpeg -r 5 -start_number 0 -i ' +
                      os.path.dirname(args.load_path) +
                      '/tempimg%d.png -c:v libx264 -pix_fmt yuv420p ' +
                      os.path.join(os.path.dirname(args.load_path), args.env +
                                   '.mp4'))
            for i in range(env_kwargs['max_episode_steps'] * 10):
                # images.append(plt.imread('tempimg' + str(i) + '.png'))
                try:
                    os.remove(
                        os.path.join(os.path.dirname(args.load_path),
                                     'tempimg' + str(i) + '.png'))
                except:
                    pass
Esempio n. 36
0
        discriminator.trainable = True

        # Train the discriminator on this batch
        discriminator.train_on_batch(X_fake_vs_real, y1)

        #####################################
        ## TRAINING THE GENERATOR     ######
        ###################################

        # Create some noise
        noise = tf.random.normal(shape=[batch_size, codings_size])

        # We want discriminator to belive that fake images are real
        y2 = tf.constant([[1.]] * batch_size)

        # Avois a warning
        discriminator.trainable = False

        GAN.train_on_batch(noise, y2)

print("TRAINING COMPLETE")

# Most likely your generator will only learn to create one type of noisey zero
# Regardless of what noise is passed in.

noise = tf.random.normal(shape=[10, codings_size])

image = generator(noise)

plt.imsave(FLAGS.path, image)
Esempio n. 37
0
from sklearn.metrics import confusion_matrix

conf_mx = confusion_matrix(y_test, final_predictions)
print(accuracy_score(y_test, final_predictions))
print(conf_mx)
np.savetxt("accuracy_score.txt", [accuracy_score(y_test, final_predictions)],
           delimiter=",",
           fmt='%10.5f')
np.savetxt("conf_mat.csv", conf_mx, delimiter=",", fmt='%10.0f')

data = conf_mx

image_product = 70
new_data = np.zeros(np.array(data.shape) * image_product)
for j in range(data.shape[0]):
    for k in range(data.shape[1]):
        new_data[j * image_product:(j + 1) * image_product,
                 k * image_product:(k + 1) * image_product] = data[j, k]
# plt.imshow(new_data, cmap=plt.cm.gray)
# plt.show()
plt.imsave("confusion_mat.jpg", new_data, dpi=1000, cmap=plt.cm.gray)

# In[ ]:

# from sklearn.model_selection import cross_val_predict,cross_val_score
# clf = KNeighborsClassifier(n_jobs=-1)
# y_train_pred = cross_val_score(clf, X_train, y_train, cv=10)
# print(y_train_pred)
# y_train_pred = cross_val_predict(clf, X_train, y_train, cv=10)
# print(y_train_pred)
# Storing the styles
styles = []
for style_path in style_files:
    style = style_tf(Image.open(str(style_path)))
    if args.preserve_color:
        style = coral(style, content)
    style = style.to(device).unsqueeze(0)
    styles.append(style)

# Generating and saving stylized images

for img_path in txt_files:
    print(img_path)
    cls_id = str(img_path.split('_')[0])
    content_path = (args.content_dir / img_path).with_suffix(".jpg")
    img_name = str(content_path).split('/')[-1][-15:]
    path = str(content_path).split('JPEGImages/')[0] + 'JPEGImages/' + img_name

    content = content_tf(Image.open(str(path))).squeeze()
    content = content.to(device).unsqueeze(0)
    for i, style in enumerate(styles):
        with torch.no_grad():
            output = style_transfer(vgg, decoder, content, style, args.alpha)
        output = output.squeeze().permute(1, 2, 0).cpu().numpy()

        output = (output * 255).astype(np.uint8)

        out_path = output_dir / (str(i) + "_" + str(content_path.name))
        plt.imsave(str(out_path), output)
Esempio n. 39
0
def imgshow(file_name, img):
    npimg = np.transpose(img.numpy(), (1, 2, 0))
    f = "./%s.png" % file_name
    Wmin = img.min
    Wmax = img.max
    plt.imsave(f, npimg, vmin=Wmin, vmax=Wmax)
print('test start!')
for x_, _ in test_loader:
    if opt.inverse_order:
        y_ = x_[:, :, :, :x_.size()[2]]
        x_ = x_[:, :, :, x_.size()[2]:]
    else:
        y_ = x_[:, :, :, x_.size()[2]:]
        x_ = x_[:, :, :, :x_.size()[2]]

    if x_.size()[2] != opt.input_size:
        x_ = util.imgs_resize(x_, opt.input_size)
        y_ = util.imgs_resize(y_, opt.input_size)

    x_ = Variable(x_.cuda(), volatile=True)
    test_image = G(x_)
    s = test_loader.dataset.imgs[n][0][::-1]
    s_ind = len(s) - s.find('/')
    e_ind = len(s) - s.find('.')
    ind = test_loader.dataset.imgs[n][0][s_ind:e_ind - 1]
    path = opt.dataset + '_results/test_results/' + ind + '_input.png'
    plt.imsave(path, (x_[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)
    path = opt.dataset + '_results/test_results/' + ind + '_output.png'
    plt.imsave(path,
               (test_image[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)
    path = opt.dataset + '_results/test_results/' + ind + '_target.png'
    plt.imsave(path, (y_[0].numpy().transpose(1, 2, 0) + 1) / 2)

    n += 1

print('%d images generation complete!' % n)
Esempio n. 41
0
image_dir = ""
dst_dir = ""

for fff in os.listdir(image_dir):
    if fff.endswith(".jpg"):
        image_path = os.path.join(image_dir, fff)
        dst_path = os.path.join(dst_dir, fff)
        image_np = load_image_into_numpy_array(image_path)

        input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0),
                                            dtype=tf.float32)
        detections, predictions_dict, shapes = detect_fn(input_tensor)

        label_id_offset = 1
        image_np_with_detections = image_np.copy()

        viz_utils.visualize_boxes_and_labels_on_image_array(
            image_np_with_detections,
            detections['detection_boxes'][0].numpy(),
            (detections['detection_classes'][0].numpy() +
             label_id_offset).astype(int),
            detections['detection_scores'][0].numpy(),
            category_index,
            use_normalized_coordinates=True,
            max_boxes_to_draw=10,
            min_score_thresh=.30,
            agnostic_mode=False)

        plt.figure(figsize=(12, 16))
        plt.imsave(dst_path, image_np_with_detections)
Esempio n. 42
0
def test_rbm():
    ''' Demonstrate how to train and afterwards sample from 
	:param leaning_rate:
	:param training_ecpochs
	:param dataset
	:param barch_size
	:param n_chains
	:param n_samples
	'''

    #import dataset
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    # parameters
    leaning_rate = 0.01
    training_epochs = 1
    batch_size = 50
    display_step = 1

    #define input
    x = tf.placeholder(tf.float32, [None, 784])
    #network parameters
    n_visible = 784
    n_hidden = 500
    grbm = GRBM(x, n_visible=n_visible, n_hidden=n_hidden)

    cost = grbm.get_reconstruct_cost()

    #create the persistent variable
    #persistent_chain = tf.Variable(tf.zeros([batch_size, n_hidden]), dtype = tf.float32)
    persistent_chain = None
    train = grbm.train_ops(lr=leaning_rate, persistent=persistent_chain, k=1)
    #initializing the variables
    init = tf.global_variables_initializer()

    #################
    # training RBM
    #################

    with tf.Session() as sess:
        start_time = timeit.default_timer()
        sess.run(init)
        total_batch = int(mnist.train.num_examples / batch_size)
        for epoch in range(training_epochs):
            c = 0.0
            #print(sess.run(grbm.W))
            # loop over all batches
            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)

                # run optimization op (batchprop) and cost op
                _ = sess.run(train, feed_dict={x: batch_xs})
                c += sess.run(cost, feed_dict={x: batch_xs}) / total_batch
            #display logs per epoch step
            if epoch % display_step == 0:
                print("epoch", '%04d' % (epoch + 1), "cost",
                      "{:.4f}".format(c))
                #print(sess.run(grbm.W))

            #construct image from the weight matrix

            plt.imsave("new_filters_at_{0}.png".format(epoch),
                       tile_raster_images(X=sess.run(tf.transpose(grbm.W)),
                                          img_shape=(28, 28),
                                          tile_shape=(10, 10),
                                          tile_spacing=(1, 1)),
                       cmap='gray')
            plt.show()
        end_time = timeit.default_timer()
        training_time = end_time - start_time
        print("TIME:{0} minutes".format(training_time / 60, ))
        """

		#################################
		#     Sampling from the RBM     #
		#################################
		# Reconstruct the image by sampling
		print("...Sampling from the RBM")
		n_chains = 20
		n_batch = 10
		n_samples = n_batch *2
		number_test_examples = mnist.test.num_examples
		#randomly select the n_chains examples

		test_indexs = np.random.randint(number_test_examples - n_chains * n_batch)
		test_samples = mnist.test.images[test_indexs:test_indexs + n_chains * n_batch]
		#create the persistent variable saving the visiable state
		#persistent_v_chain = tf.Variable(tf.to_float(test_samples), dtype = tf.float32)
		# the step of gibbs
		#step_every = 1000
		'''
		# implement the gibbs sampling
		cond = lambda j, h_mean, h_sample, v_mean, v_sample: j < step_every
		body = lambda j, h_mean, h_sample, v_mean, v_sample: (j+1, ) + grbm.gibbs_vhv(v_sample)
		j, h_mean, h_sample, v_mean, v_sample = tf.while_loop(cond, body, loop_vars=[tf.constant(0), tf.zeros([n_chains, n_hidden]), 
                                                            tf.zeros([n_chains, n_hidden]), tf.zeros(tf.shape(persistent_v_chain)), persistent_v_chain])
		'''
		# Update the persistent_v_chain
		#new_persistent_v_chain = tf.assign(persistent_v_chain, v_sample)
		# Store the image by sampling
		image_data = np.zeros((29*(n_samples+1)+1, 29*(n_chains)-1),
                          dtype="uint8")

		# Initialize the variable
		#sess.run(tf.variables_initializer(var_list=[persistent_v_chain]))
		# Do successive sampling
		for idx in range(n_batch):
			#sample = sess.run(v_mean)
			#sess.run(new_persistent_v_chain)
			# Add the original images
			'''
			image_data[2*idx*29 : 2*idx *29 + 28,:] = tile_raster_images(X=test_samples[idx*n_batch, (idx+1)*n_chains],
                                            img_shape=(28, 28),
                                            tile_shape=(1, n_chains),
                                            tile_spacing=(1, 1))			
			'''
			sample = sess.run(grbm.reconstruction, feed_dict = {x : test_samples[idx*n_batch, (idx+1)*n_chains]})
			print("...plotting sample", idx)
			image_data[(2*idx +1)*29:(2 * idx +1)*29+28,:] = tile_raster_images(X=sample,
                                            img_shape=(28, 28),
                                            tile_shape=(1, n_chains),
                                            tile_spacing=(1, 1))
			#image = plt.imshow(image_data)
		plt.imsave("new_original_and_{0}samples.png".format(n_samples), image_data, cmap = 'gray')
		plt.show()
		"""
        # Randomly select the 'n_chains' examples

        n_chains = 20
        n_batch = 10
        n_samples = n_batch * 2
        number_test_examples = mnist.test.num_examples
        test_indexs = np.random.randint(number_test_examples -
                                        n_chains * n_batch)
        test_samples = mnist.test.images[test_indexs:test_indexs +
                                         n_chains * n_batch]
        image_data = np.zeros((29 * (n_samples + 1) + 1, 29 * (n_chains) - 1),
                              dtype="uint8")
        # Add the original images

        for i in range(n_batch):
            image_data[2 * i * 29:2 * i * 29 + 28, :] = tile_raster_images(
                X=test_samples[i * n_batch:(i + 1) * n_chains],
                img_shape=(28, 28),
                tile_shape=(1, n_chains),
                tile_spacing=(1, 1))

            samples = sess.run(
                grbm.reconstruction(x),
                feed_dict={x: test_samples[i * n_batch:(i + 1) * n_chains]})
            image_data[(2 * i + 1) * 29:(2 * i + 1) * 29 +
                       28, :] = tile_raster_images(X=samples,
                                                   img_shape=(28, 28),
                                                   tile_shape=(1, n_chains),
                                                   tile_spacing=(1, 1))

        image = plt.imsave("original_and_reconstruct.png",
                           image_data,
                           cmap='gray')
        plt.show()
Esempio n. 43
0
    def train_epoch(self, netG, netD, optimizerG, optimizerD, real, style,
                    m_noise, m_image, epoch, z_opt, z_prev, opt):
        """
        Trains network for one epoch.

        Arguments:
            epoch (int) : Current epoch.
            z_prev () : Can be None on the first epoch.
            opt (argparse.ArgumentParser) : Command line arguments.

        Returns:
            errG (torch.cuda.FloatTensor) : Error of generator
            errD (torch.cuda.FloatTensor) : Error of discriminator
            D_x (torch.cuda.FloatTensor) : Error of discriminator on original image
            D_G_z (torch.cuda.FloatTensor) : Error of discriminator on fake image
            rec_loss (torch.cuda.FloatTensor) : Reconstruction loss
        """
        # Scale 0
        if (self.Gs == []):
            # Generate optimal noise that will be kept fixed during training
            z_opt = generate_noise([1, opt.nzx, opt.nzy], device=opt.device)
            z_opt = m_noise(z_opt.expand(1, 3, opt.nzx, opt.nzy))
            noise_ = generate_noise([1, opt.nzx, opt.nzy], device=opt.device)
            noise_ = m_noise(noise_.expand(1, 3, opt.nzx, opt.nzy))
        else:
            noise_ = generate_noise([opt.nc_z, opt.nzx, opt.nzy],
                                    device=opt.device)
            noise_ = m_noise(noise_)

        m_real = m_image(real)
        m_style = m_image(style)

        ############################
        # (1) Update D network: maximize D(x) + D(G(z))
        ###########################
        # Multiple steps for D
        for j in range(opt.Dsteps):
            # TRAIN WITH REAL IMAGE
            netD.zero_grad()

            output = netD(real).to(opt.device)
            D_real_map = output.detach()
            errD_real = -output.mean()  #-a
            errD_real.backward(retain_graph=True)
            D_x = -errD_real.item()

            # TRAIN WITH FAKE IMAGE
            # Only in the very first step of the very first epoch for a layer
            if (j == 0) and (epoch == 0):
                # Scale 0
                if (self.Gs == []):
                    # Define image and noise from previous scales (Nothing for Scale 0)
                    prev = torch.full([1, opt.nc_z, opt.nzx, opt.nzy],
                                      0,
                                      device=opt.device)
                    self.in_s = prev
                    prev = m_image(prev)
                    z_prev = torch.full([1, opt.nc_z, opt.nzx, opt.nzy],
                                        0,
                                        device=opt.device)
                    z_prev = m_noise(z_prev)
                    opt.noise_amp = 1
                # Remaining scales other than 0
                else:
                    # Calculate image and noise from previous scales with draw_concat function
                    prev = draw_concat(
                        self.Gs, self.Zs, self.reals, self.styles,
                        self.NoiseAmp, self.in_s, 'rand', m_noise, m_image,
                        opt)  # Randomly generate image using previous scales
                    prev = m_image(prev)
                    z_prev = draw_concat(
                        self.Gs, self.Zs, self.reals, self.styles,
                        self.NoiseAmp, self.in_s, 'rec', m_noise, m_image, opt
                    )  # Generate image with optimal noise using previous scales
                    criterion = nn.MSELoss()
                    RMSE = torch.sqrt(
                        criterion(real, z_prev)
                    )  # noise amplitude for a certain layer is decided according to the performance of previous layers
                    opt.noise_amp = opt.noise_amp_init * RMSE
                    z_prev = m_image(z_prev)
            # If not very first epoch, just generate previous image
            else:
                prev = draw_concat(self.Gs, self.Zs, self.reals, self.styles,
                                   self.NoiseAmp, self.in_s, 'rand', m_noise,
                                   m_image, opt)
                prev = m_image(prev)

            # Scale 0
            if (self.Gs == []):
                noise = noise_
            # Other scales
            else:
                noise = opt.noise_amp * noise_ + prev

            # Generate image with G and calculate loss from fake image
            fake = netG(noise.detach(), prev, m_real)
            output = netD(fake.detach())
            errD_fake = output.mean()
            errD_fake.backward(retain_graph=True)
            D_G_z = output.mean().item()

            # WGAN Loss with gradient penalty
            gradient_penalty = calc_gradient_penalty(netD, real, fake,
                                                     opt.lambda_grad,
                                                     opt.device)
            gradient_penalty.backward()

            errD = errD_real + errD_fake + gradient_penalty
            optimizerD.step()

        ############################
        # (2) Update G network: maximize D(G(z))
        ###########################

        # Multiple steps for G
        for j in range(opt.Gsteps):
            netG.zero_grad()

            ### Calculate Discriminator Loss
            # Only in the very first step of the very first epoch for a layer
            if (j == 0) and (epoch == 0):
                # Scale 0
                if (self.Gs == []):
                    # Define image and noise from previous scales (Nothing for Scale 0)
                    prev = torch.full([1, opt.nc_z, opt.nzx, opt.nzy],
                                      0,
                                      device=opt.device)
                    self.in_s = prev
                    prev = m_image(prev)
                    z_prev = torch.full([1, opt.nc_z, opt.nzx, opt.nzy],
                                        0,
                                        device=opt.device)
                    z_prev = m_noise(z_prev)
                    opt.noise_amp = 1
                # Remaining scales other than 0
                else:
                    # Calculate image and noise from previous scales with draw_concat function
                    prev = draw_concat(
                        self.Gs, self.Zs, self.reals, self.styles,
                        self.NoiseAmp, self.in_s, 'rand', m_noise, m_image,
                        opt)  # Randomly generate image using previous scales
                    prev = m_image(prev)
                    z_prev = draw_concat(
                        self.Gs, self.Zs, self.reals, self.styles,
                        self.NoiseAmp, self.in_s, 'rec', m_noise, m_image, opt
                    )  # Generate image with optimal noise using previous scales
                    criterion = nn.MSELoss()
                    RMSE = torch.sqrt(
                        criterion(real, z_prev)
                    )  # noise amplitude for a certain layer is decided according to the performance of previous layers
                    opt.noise_amp = opt.noise_amp_init * RMSE
                    z_prev = m_image(z_prev)
            # If not very first epoch, just generate previous image
            else:
                prev = draw_concat(self.Gs, self.Zs, self.reals, self.styles,
                                   self.NoiseAmp, self.in_s, 'rand', m_noise,
                                   m_image, opt)
                prev = m_image(prev)

            # Scale 0
            if (self.Gs == []):
                noise = noise_
            # Other scales
            else:
                noise = opt.noise_amp * noise_ + prev

            # Generate image with G and calculate loss from fake image
            fake = netG(noise.detach(), prev, m_real)
            output = netD(fake)
            D_fake_map = output.detach()
            errG = -output.mean()
            errG.backward(retain_graph=True)

            scale_weight = [1.0, 0.95, 0.9, 0.8, 0.5, 0.1, 0.01, 0.0005]

            ### Reconstruction Loss
            if opt.rec_weight != 0:
                loss = nn.L1Loss()
                Z_opt = opt.noise_amp * z_opt + z_prev

                rec_loss = opt.rec_weight * scale_weight[len(self.Gs)] * loss(
                    netG(noise.detach(), prev, m_real), real)
                rec_loss.backward(retain_graph=True)
                rec_loss = rec_loss.detach()
            else:
                Z_opt = z_opt
                rec_loss = 0

            # Generate image with G and calculate loss from fake image
            fake_style = netG(noise.detach(), prev, m_style.detach())

            ### Style loss, layers from AdaIN
            style_loss = opt.alpha * (
                1 - scale_weight[len(self.Gs)]) * adain_style_loss(
                    style, fake_style, self.vgg, opt.style_layers)
            style_loss.backward()

            optimizerG.step()

        if epoch % opt.niter_update == 0 or epoch == (opt.niter - 1):
            plt.imsave('%s/fake_style_sample.png' % (opt.outf),
                       convert_image_np(fake_style.detach()),
                       vmin=0,
                       vmax=1)
            plt.imsave('%s/fake_real_sample.png' % (opt.outf),
                       convert_image_np(fake.detach()),
                       vmin=0,
                       vmax=1)

            plt.imsave('%s/G(z_opt).png' % (opt.outf),
                       convert_image_np(
                           netG(Z_opt.detach(), z_prev, m_real).detach()),
                       vmin=0,
                       vmax=1)

            plt.imsave('%s/D_fake.png' % (opt.outf),
                       convert_image_np(D_fake_map))
            plt.imsave('%s/D_real.png' % (opt.outf),
                       convert_image_np(D_real_map))

            if (opt.vis != False):
                opt.vis.image(
                    convert_image_np(netG(Z_opt.detach(),
                                          z_prev).detach()).transpose(2, 0, 1),
                    win=opt.viswindows[-1],
                    opts=dict(title='G(z_opt) on scale %d' % len(self.Gs),
                              width=self.max_width,
                              height=self.max_height))

        # Temporarily plot errG in total loss plot
        return rec_loss.item(), style_loss.item(), errG.detach(), z_opt, z_prev
Esempio n. 44
0
    for i in range(1, 399):
        for j in range(1, 399):
            arround = np.array(
                [[s[i - 1, j - 1], s[i - 1, j], s[i - 1, j + 1]],
                 [s[i, j - 1], 0, s[i, j + 1]],
                 [s[i + 1, j - 1], s[i + 1, j], s[i + 1, j + 1]]])
            if arround.sum() == 3: s2[i, j] = 1
            elif s[i, j] == 0 and arround.sum() == 2: s2[i, j] = 0
            elif s[i, j] == 1 and arround.sum() == 2: s2[i, j] = 1
            else: s2[i, j] = 0
    return s2


while True:
    fclock.tick(fps)
    for event in pygame.event.get():
        if event.type == pygame.KEYDOWN:
            if event.key == pygame.K_ESCAPE: exit()
        else: pass

    s = gen_next(s)
    body = pygame.surfarray.make_surface(s * 255)
    screem.fill((0, 0, 0))
    screem.blit(body, (0, 0))
    pygame.display.update()
    generator_times += 1
    if generator_times % 10 == 0: print('第 %s 代...' % generator_times)
    screem_array = pygame.surfarray.array2d(screem)[200 - 40:200 + 40,
                                                    200 - 40:200 + 40]
    plt.imsave('%s.jpg' % generator_times, screem_array)
Esempio n. 45
0
if not os.path.exists('captures'):
    os.makedirs('captures')

camera = PiCamera()
camera.resolution = tuple(camera_resolution)
camera.framerate = camera_framerate
rawCapture = PiRGBArray(camera, size=tuple(camera_resolution))
time.sleep(0.5)

frame_counter = 0
start_time = time.time()

for frame in camera.capture_continuous(rawCapture, format="rgb", use_video_port=True):
    image = frame.array

    # Comment this out if your camera is mounted on the top
    image = cv2.flip(image, -1)

    image = fisheye_correction.undistort(image)

    plt.imsave("captures/img" + str(frame_counter) + ".jpg", image)

    frame_counter += 1
    rawCapture.truncate(0)

    if time.time() - start_time > record_time:
        break


Esempio n. 46
0
def style_transfer(cnn,
                   content_image,
                   style_image,
                   image_size,
                   style_size,
                   content_layer,
                   content_weight,
                   style_layers,
                   style_weights,
                   tv_weight,
                   init_random=False):
    """
    Run style transfer!
    
    Inputs:
    - cnn: CNN model to extracf features
    - content_image: filename of content image
    - style_image: filename of style image
    - image_size: size of smallest image dimension (used for content loss and generated image)
    - style_size: size of smallest style image dimension
    - content_layer: layer to use for content loss
    - content_weight: weighting on content loss
    - style_layers: list of layers to use for style loss
    - style_weights: list of weights to use for each layer in style_layers
    - tv_weight: weight of total variation regularization term
    - init_random: initialize the starting image to uniform random noise
    """
    # Extract features for the content image
    pil_img = PIL.Image.open(content_image)
    # Some png files have alpha channel, esp. screenshots. Convert them to rgb
    if pil_img.mode == 'RGBA':
        pil_img = color.rgba2rgb(pil_img)
        pil_img = PIL.Image.fromarray(pil_img.astype('uint8'))
    content_img = preprocess(pil_img, size=image_size)
    feats = extract_features(content_img, cnn)
    content_target = feats[content_layer].clone()
    # Extract features for the style image
    pil_img = PIL.Image.open(style_image)
    if pil_img.mode == 'RGBA':
        pil_img = color.rgba2rgb(pil_img)
        pil_img = PIL.Image.fromarray(pil_img.astype('uint8'))
    style_img = preprocess(pil_img, size=style_size)

    feats = extract_features(style_img, cnn)
    style_targets = []
    for idx in style_layers:
        style_targets.append(gram_matrix(feats[idx].clone()))

    dtype = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor
    # Initialize output image to content image or nois
    if init_random:
        img = torch.Tensor(content_img.size()).uniform_(0, 1).type(dtype)
    else:
        img = content_img.clone().type(dtype)

    # We do want the gradient computed on our image!
    img.requires_grad_()
    if torch.cuda.is_available():
        img = img.cuda()

    # Set up optimization hyperparameters
    initial_lr = 3.0
    decayed_lr = 0.1
    decay_lr_at = 180

    # Note that we are optimizing the pixel values of the image by passing
    # in the img Torch tensor, whose requires_grad flag is set to True
    optimizer = torch.optim.Adam([img], lr=initial_lr)

    # f, axarr = plt.subplots(1,2)
    # axarr[0].axis('off')
    # axarr[1].axis('off')
    # axarr[0].set_title('Content Source Img.')
    # axarr[1].set_title('Style Source Img.')
    # axarr[0].imshow(deprocess(content_img.cpu()))
    # axarr[1].imshow(deprocess(style_img.cpu()))
    # plt.show()
    # plt.figure()

    for t in range(250):
        if t < 190:
            img.data.clamp_(-1.5, 1.5)
        optimizer.zero_grad()

        feats = extract_features(img, cnn)

        # Compute loss
        c_loss = content_loss(content_weight, feats[content_layer],
                              content_target)
        s_loss = style_loss(feats, style_layers, style_targets, style_weights)
        t_loss = tv_loss(img, tv_weight)
        loss = c_loss + s_loss + t_loss

        loss.backward()

        # Perform gradient descents on our image values
        if t == decay_lr_at:
            optimizer = torch.optim.Adam([img], lr=decayed_lr)
        optimizer.step()

        # if t % 100 == 0:
        #     print('Iteration {}'.format(t))
        #     plt.axis('off')
        #     plt.imshow(deprocess(img.data.cpu()))
        #     plt.show()
    #print('Iteration {}'.format(t))
    #plt.axis('off')
    #plt.imshow(deprocess(img.data.cpu()))
    unique_name = str(uuid.uuid4()) + ".png"
    result_path = "static/images/" + unique_name
    plt.imsave(result_path, im_convert(img))
    return unique_name
Esempio n. 47
0
def save_instance(image,
                  boxes,
                  masks,
                  class_ids,
                  class_names,
                  scores=None,
                  title="",
                  figsize=(16, 16),
                  ax=None,
                  show_mask=True,
                  show_bbox=True,
                  colors=None,
                  captions=None,
                  save_path=None):
    # Number of instances
    N = boxes.shape[0]
    if not N:
        print("\n*** No instances to display *** \n")
    else:
        assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]

    if not ax:
        _, ax = plt.subplots(1, figsize=figsize)

    # Generate random colors
    colors = colors or random_colors(N)

    # Show area outside image boundaries.
    height, width = image.shape[:2]
    ax.set_ylim(height + 10, -10)
    ax.set_xlim(-10, width + 10)
    ax.axis('off')
    ax.set_title(title)

    masked_image = image.astype(np.uint32).copy()
    for i in range(N):
        color = colors[i]

        # Bounding box
        if not np.any(boxes[i]):
            # Skip this instance. Has no bbox. Likely lost in image cropping.
            continue
        y1, x1, y2, x2 = boxes[i]
        if show_bbox:
            p = patches.Rectangle((x1, y1),
                                  x2 - x1,
                                  y2 - y1,
                                  linewidth=2,
                                  alpha=0.7,
                                  linestyle="dashed",
                                  edgecolor=color,
                                  facecolor='none')
            ax.add_patch(p)

        # Mask
        mask = masks[:, :, i]
        if show_mask:
            masked_image = apply_mask(masked_image, mask, color)

        # Mask Polygon
        # Pad to ensure proper polygons for masks that touch image edges.
        padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2),
                               dtype=np.uint8)
        padded_mask[1:-1, 1:-1] = mask
        contours = find_contours(padded_mask, 0.5)
        for verts in contours:
            # Subtract the padding and flip (y, x) to (x, y)
            verts = np.fliplr(verts) - 1
            p = Polygon(verts, facecolor="none", edgecolor=color)
            ax.add_patch(p)
    plt.imsave(save_path, masked_image.astype(np.uint8))
Esempio n. 48
0
    def train(self):
        """
        Trains GAN for niter epochs over stop_scale number of scales. Main training loop that calls train_scale.
        Controls transition between layers. After training is done for a certain layer, freezes weights of the trained scale, and arranges computational graph by changing requires_grad parameters.
        """
        scale_num = 0
        nfc_prev = 0

        ### Visualization
        # For visualization, let's just for now do maximal image dimensions
        self.opt.viswindows = [
        ]  # Windows in visdom that is updated during training G(z_opt)
        self.max_width = convert_image_np(self.real).shape[0]
        self.max_height = convert_image_np(self.real).shape[1]

        ### Load the VGG network
        vgg = VGG()
        vgg.load_state_dict(
            torch.load(self.opt.pretrained_VGG, map_location=self.opt.device))
        self.vgg = vgg.to(self.opt.device)

        # Make sure this network is frozen
        for parameter in self.vgg.parameters():
            parameter.requires_grad_(False)

        # Training loop for each scale
        while scale_num < self.opt.stop_scale + 1:
            # Number of filters in D and G changes every 4th scale
            self.opt.nfc = min(
                self.opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)
            self.opt.min_nfc = min(
                self.opt.min_nfc_init * pow(2, math.floor(scale_num / 4)), 128)

            # Create output directory and save the downsampled image
            self.opt.out_ = generate_dir2save(self.opt)
            self.opt.outf = '%s/%d' % (self.opt.out_, scale_num)
            try:
                os.makedirs(self.opt.outf)
            except OSError:
                pass

            #plt.imsave('%s/in.png' %  (self.opt.out_), convert_image_np(self.real), vmin=0, vmax=1)
            #plt.imsave('%s/original.png' %  (self.opt.out_), convert_image_np(real_), vmin=0, vmax=1)
            plt.imsave('%s/real_scale.png' % (self.opt.outf),
                       convert_image_np(self.reals[scale_num]),
                       vmin=0,
                       vmax=1)

            # Initialize D and G of the current scale. D and G will be initialized with the previous scale's weights if the dimensions match.
            D_curr, G_curr = self.init_models()
            if (nfc_prev == self.opt.nfc):
                G_curr.load_state_dict(
                    torch.load('%s/%d/netG.pth' %
                               (self.opt.out_, scale_num - 1)))
                D_curr.load_state_dict(
                    torch.load('%s/%d/netD.pth' %
                               (self.opt.out_, scale_num - 1)))

            # Training of single scale
            z_curr, G_curr = self.train_scale(G_curr, D_curr, self.opt)

            # Stop gradient calculation for G and D of current scale
            G_curr = reset_grads(G_curr, False)
            G_curr.eval()
            D_curr = reset_grads(D_curr, False)
            D_curr.eval()

            # Store the necessary variables of this scale
            self.Gs.append(G_curr)
            self.Zs.append(z_curr)
            self.NoiseAmp.append(self.opt.noise_amp)

            # Save the networks and important parameters
            torch.save(self.Zs, '%s/Zs.pth' % (self.opt.out_))
            torch.save(self.Gs, '%s/Gs.pth' % (self.opt.out_))
            torch.save(self.reals, '%s/reals.pth' % (self.opt.out_))
            torch.save(self.styles, '%s/styles.pth' % (self.opt.out_))
            torch.save(self.NoiseAmp, '%s/NoiseAmp.pth' % (self.opt.out_))

            scale_num += 1
            nfc_prev = self.opt.nfc  # Update the number of filters
            del D_curr, G_curr

        # Generate with training variables
        SinGAN_generate(self.Gs, self.Zs, self.reals, self.styles,
                        self.NoiseAmp, self.opt)
Esempio n. 49
0
    output = np.transpose(output, (1, 2, 0))
    image = image[:, :, (2, 1, 0)]

    error_image = np.zeros((ind.shape[0], ind.shape[1], 3))
    diff_image = label - ind
    #TruePositive
    #error_image[] = [ 0, 48,255]
    #error_image[label ==1] = [255, 0 , 176]
    error_image[diff_image > 0] = [178, 255, 102]
    error_image[diff_image < 0] = [255, 51, 51]
    # print image.shape,rgb_gt.shape,rgb.shape
    #scipy.misc.toimage(rgb, cmin=0.0, cmax=255).save(IMAGE_FILE+'_segnet.png')

    plt.figure()
    plt.imsave("/home/ubuntu/images/" + str(i) + "o.png",
               image,
               vmin=0,
               vmax=1)
    #plt.figure()
    #plt.imshow(rgb_gt,vmin=0, vmax=1)
    plt.figure()
    plt.imsave("/home/ubuntu/images/" + str(i) + "h.png", error_image)
    #plt.imshow(rgb,vmin=0, vmax=1)
    plt.show()

    y_true = label.flatten()
    y_pred = ind.flatten()

    score = jaccard_similarity_score(y_true, y_pred)
    scores.append(score)
    print "image ", i, " score: ", score
Esempio n. 50
0
def extract_channel(image_name):
    img = cv2.imread(image_name)
    plt.imsave(image_name, img[:, :, 0], cmap='gray')
Esempio n. 51
0
def user_input(local_vars):
    save_name = local_vars['exp_config'].save_name
    _banner = """
    ========================
    === ELEKTRONN2 SHELL ===
    ========================
    >> %s <<
    Shortcuts:
    'help' (display this help text),
    'q' (leave menu),         'kill'(saving last params),
    'sethist <int>',          'setlr <float>',
    'setmom <float>',         'setwd <float> (weight decay)
    'paramstats' ,            'gradstats',
    'actstats' (print statistics)
    'sf <nodename>' (show filters)',
    'load <filename>' (param files only, no model files),
    'preview' (produce preview predictions),
    'ip' (start embedded IPython shell)

    For everything else enter a command in the command line\n""" % (
        save_name, )

    _ipython_banner = """    You are now in the embedded IPython shell.
    You still have full access to the local scope of the ELEKTRONN2 shell
    (e.g. 'model', 'batch'), but shortcuts like 'q' no longer work.

    To leave the IPython shell and switch back to the ELEKTRONN2 shell, run
    'exit()' or hit 'Ctrl-D'."""

    print(_banner)
    data = local_vars['data']
    batch = local_vars['batch']
    trainer = local_vars['self']
    model = trainer.model
    exp_config = local_vars['exp_config']
    local_vars.update(locals())  # put the above into scope of console
    console = code.InteractiveConsole(locals=local_vars)

    while True:
        try:
            try:
                inp = prompt_toolkit.prompt(
                    u"%s@neuromancer: " % user_name,
                    # needs to be an explicit ustring for py2-compat
                    history=ptk_hist,
                    completer=NumaCompleter(lambda: local_vars,
                                            lambda: {},
                                            words=shortcut_completions,
                                            words_metastring='(shortcut)'),
                    auto_suggest=AutoSuggestFromHistory())
            # Catch all exceptions in order to prevent catastrophes in case ptk suddenly breaks
            except Exception:
                inp = console.raw_input("%s@neuromancer: " % user_name)
            logger.debug('(Shell received command "{}")'.format(inp))
            if inp == 'q':
                break
            elif inp == 'kill':
                break
            elif inp == 'help':
                print(_banner)
            elif inp == 'ip':
                try:
                    import IPython
                    IPython.embed(header=_ipython_banner)
                except ImportError:
                    print('IPython is not available. You will need to install '
                          'it to use this function.')
            elif inp.startswith('sethist'):
                i = int(inp.split()[1])
                exp_config.history_freq = i
            elif inp.startswith('setlr'):
                i = float(inp.split()[1])
                model.lr = i
            elif inp.startswith('setmom'):
                i = float(inp.split()[1])
                model.mom = i
            elif inp.startswith('setwd'):
                i = float(inp.split()[1])
                model.wd = i
            elif inp.startswith('sf'):
                try:
                    name = inp.split()[1]
                    w = model[name].w.get_value()
                    m = plotting.embedfilters(w)
                    with FileLock('plotting'):
                        plt.imsave('filters_%s.png' % name, m, cmap='gray')
                except:  # try to print filter of first Layer with w
                    for name, node in model.nodes.items():
                        if hasattr(node, 'w'):
                            m = plotting.embedfilters(node.w.get_value())
                            with FileLock('plotting'):
                                plt.imsave('filters.png', m, cmap='gray')
                            break

            elif inp == 'preview':
                try:
                    trainer.preview_slice(**exp_config.preview_kwargs)
                except Exception:
                    traceback.print_exc()
                    print(
                        '\n\nPlease check if/how you have configured previews '
                        'in your config file.\n(Look for "preview_data_path" '
                        'and "preview_kwargs" variables.)')
            elif inp == 'paramstats':
                model.paramstats()
            elif inp == 'gradstats':
                model.gradstats(*batch)
            elif inp == 'actstats':
                model.actstats(*batch)
            elif inp == 'debugbatch':
                trainer.debug_getcnnbatch()
            elif inp.startswith('load'):
                file_path = inp.split()[1]
                params = utils.pickleload(file_path)
                model.set_param_values(params)
            else:
                console.push(inp)

            plt.pause(0.00001)
        except KeyboardInterrupt:
            print(
                'Enter "q" to leave the shell and continue training.\n'
                'Enter "kill" to kill the training, saving current parameters.'
            )
        except IndexError as err:
            if any([
                    inp.startswith(shortcut)
                    for shortcut in shortcut_completions
            ]):  # ignore trailing spaces
                print(
                    'IndexError. Probably you forgot to type a value after the shortcut "{}".'
                    .format(inp))
            else:
                raise err  # All other IndexErrors are already correctly handled by the console.
        except ValueError as err:
            if any([
                    inp.startswith(shortcut)
                    for shortcut in shortcut_completions
            ]):  # ignore trailing spaces
                print(
                    'ValueError. The "{}" shortcut received an unexpected argument.'
                    .format(inp))
            else:
                raise err  # All other IndexErrors are already correctly handled by the console.
        except Exception:
            traceback.print_exc()
            print(
                '\n\nUnhandled exception occured. See above traceback for debug info.\n'
                'If you think this is a bug, please consider reporting it at '
                'https://github.com/ELEKTRONN/ELEKTRONN2/issues.')

    return inp
Esempio n. 52
0
            def model(data):
                return sparse_ssn_iter(data, i, n_iter)

        # throw every image into the net
        for data in dataloader:
            image, label, name = data
            height, width = image.shape[-2:]
            label_pred = inference(image, args.nspix, args.niter, model,
                                   args.fdim, args.color_scale, args.pos_scale)
            label = label.argmax(1).reshape(height, width).numpy()
            np.savetxt(os.path.join(args.dest, str(i), name[0] + '.csv'),
                       label_pred,
                       fmt='%d',
                       delimiter=',')
            asa = achievable_segmentation_accuracy(label_pred, label)
            usa = undersegmentation_error(label_pred, label)
            cptness = compactness(label_pred)
            BR = boundary_recall(label_pred, label)
            image = np.squeeze(image.numpy(), axis=0).transpose(1, 2, 0)
            image = lab2rgb(image)
            print(
                name[0],
                '\tprocessed,asa_{:.4f}_usa{:.4f}_co{:.4f}_BR_{:.4f}'.format(
                    asa, usa, cptness, BR))
            plt.imsave(
                os.path.join(
                    args.dest, str(i),
                    "asa_{:.4f}_usa_{:.4f}_co_{:.4f}_BR_{:.4f}_{}.jpg".format(
                        asa, usa, cptness, BR, name[0])),
                mark_boundaries(image, label_pred))
Esempio n. 53
0
# Pour l'instant, la superposition donnerait du blanc.
# On mets à 0 les 100 premières lignes de chacune des matrices

R[:100, :] = 223
V[:100, :] = 109
B[:100, :] = 20

R[80:100, 80:100] = 0
V[80:100, 80:100] = 127
B[80:100, 80:100] = 0

#
# On empile les trois matrices
carres = np.stack((R, V, B), axis=2)

# visualisation avec imshow
#plt.imshow(carres)

carres2 = carres[::-1, :]
carres3 = np.concatenate((carres, carres2))
carres4 = carres3[:, ::-1]
carres5 = np.concatenate((carres3, carres4), axis=1)
carres6 = np.concatenate((carres5, carres5))
carres7 = np.concatenate((carres6, carres6, carres6), axis=1)
plt.imshow(carres7)
plt.show()  # inutile en interactif

#
# Sauvegarde avec imsave
plt.imsave('carres.png', carres7)
Esempio n. 54
0
    def registration(self, grid_points_x=3, grid_points_y=3):
        """
        By default, generate 9 galvo voltage coordinates from (-5,-5) to (5,5),
        take the camera images of these points, return a function matrix that 
        transforms camera_coordinates into galvo_coordinates using polynomial transform. 

        Parameters
        ----------
        grid_points_x : TYPE, optional
            DESCRIPTION. The default is 3.
        grid_points_y : TYPE, optional
            DESCRIPTION. The default is 3.

        Returns
        -------
        transformation : TYPE
            DESCRIPTION.

        """
        galvothread = DAQmission()
        readinchan = []

        x_coords = np.linspace(-10, 10, grid_points_x + 2)[1:-1]
        y_coords = np.linspace(-10, 10, grid_points_y + 2)[1:-1]

        xy_mesh = np.reshape(np.meshgrid(x_coords, y_coords), (2, -1),
                             order='F').transpose()

        galvo_coordinates = xy_mesh
        camera_coordinates = np.zeros((galvo_coordinates.shape))

        for i in range(galvo_coordinates.shape[0]):

            galvothread.sendSingleAnalog('galvosx', galvo_coordinates[i, 0])
            galvothread.sendSingleAnalog('galvosy', galvo_coordinates[i, 1])
            time.sleep(1)

            image = self.cam.SnapImage(0.06)
            plt.imsave(
                os.getcwd() +
                '/CoordinatesManager/Registration_Images/2P/image_' + str(i) +
                '.png', image)

            camera_coordinates[i, :] = readRegistrationImages.gaussian_fitting(
                image)

        print('Galvo Coordinate')
        print(galvo_coordinates)
        print('Camera coordinates')
        print(camera_coordinates)
        del galvothread
        self.cam.Exit()

        transformation_cam2galvo = CoordinateTransformations.polynomial2DFit(
            camera_coordinates, galvo_coordinates, order=1)

        transformation_galvo2cam = CoordinateTransformations.polynomial2DFit(
            galvo_coordinates, camera_coordinates, order=1)

        print('Transformation found for x:')
        print(transformation_cam2galvo[:, :, 0])
        print('Transformation found for y:')
        print(transformation_cam2galvo[:, :, 1])

        print('galvo2cam found for x:')
        print(transformation_galvo2cam[:, :, 0])
        print('galvo2cam found for y:')
        print(transformation_galvo2cam[:, :, 1])

        return transformation_cam2galvo
Esempio n. 55
0
def train():
    with tf.Session() as sess:
        with tf.name_scope('inputs'):
            x_data = tf.placeholder(tf.float32, [None, img_size], name="x_data")
            z_prior = tf.placeholder(tf.float32, [None, z_size], name="z_prior")
            noise = tf.placeholder(tf.float32, shape=[None, noise_size])
            keep_prob = tf.placeholder(tf.float32, name="keep_prob")

        x_generated = build_generator(z_prior, noise)
        y_data = build_discriminator(x_data, noise, keep_prob)
        y_generated = build_discriminator(x_generated, noise, keep_prob)

        with tf.name_scope('loss'):
            g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                logits=y_generated, labels=tf.ones_like(y_generated)))
            d_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                logits=y_generated, labels=tf.zeros_like(y_generated)))
            d_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                logits=y_data, labels=tf.ones_like(y_data)))
            d_loss = tf.add(d_fake_loss, d_real_loss)
            tf.summary.scalar('g_loss', g_loss)
            tf.summary.scalar('d_fake_loss', d_fake_loss)
            tf.summary.scalar('d_real_loss', d_real_loss)
            tf.summary.scalar('d_loss', d_loss)
        with tf.name_scope('optimizer'):
            optimizer = tf.train.AdamOptimizer(0.001)
            d_trainer = optimizer.minimize(d_loss, var_list=d_params)
            g_trainer = optimizer.minimize(g_loss, var_list=g_params)

        saver = tf.train.Saver()
        # merge summary
        merged = tf.summary.merge_all()
        # choose dir
        writer = tf.summary.FileWriter('F:/tf_board/basic_gan_mnist', sess.graph)
        sess.run(tf.global_variables_initializer())
        for e in range(max_epoch):
            for batch_i in range(mnist.train.num_examples//batch_size):
                batch_data, y_data = mnist.train.next_batch(batch_size)
                # noise
                y_noise = np.random.uniform(-1.0, 1.0, size=(batch_size, noise_size))
                # generator noise
                batch_noise = np.random.uniform(-1.0, 1.0, size=(batch_size, z_size))

                # Run optimizers
                sess.run(d_trainer, feed_dict={x_data: batch_data, z_prior: batch_noise, noise: y_noise, keep_prob: 0.7})
                sess.run(g_trainer, feed_dict={z_prior: batch_noise, noise: y_noise, keep_prob: 0.7})

                if ((mnist.train.num_examples//batch_size) * e + batch_i) % (mnist.train.num_examples//batch_size) == 0:
                    train_loss_d = sess.run(d_loss, feed_dict={x_data: batch_data, z_prior: batch_noise, noise: y_noise,
                                                               keep_prob: 1.0})
                    fake_loss_d = sess.run(d_fake_loss, feed_dict={z_prior: batch_noise, noise: y_noise, keep_prob: 1.0})
                    real_loss_d = sess.run(d_real_loss, feed_dict={x_data: batch_data, noise: y_noise, keep_prob: 1.0})
                    # generator loss
                    train_loss_g = sess.run(g_loss, feed_dict={z_prior: batch_noise, noise: y_noise, keep_prob: 1.0})

                    merge_result = sess.run(merged, feed_dict={x_data: batch_data, z_prior: batch_noise, noise: y_noise,
                                                               keep_prob: 1.0})
                    writer.add_summary(merge_result, (mnist.train.num_examples//batch_size) * e + batch_i)

                    print("Epoch {}/{}...".format(e+1, max_epoch),
                          "Discriminator Loss: {:.4f}(Real: {:.4f} + Fake: {:.4f})...".format(
                              train_loss_d, real_loss_d, fake_loss_d), "Generator Loss: {:.4f}".format(train_loss_g))

            if e % 10 == 0:
                n_sample = 16
                sample_noise = np.random.uniform(-1.0, 1.0, size=(n_sample, z_size))
                y_sample = np.random.uniform(-1.0, 1.0, size=(n_sample, noise_size))
                check_imgs = sess.run(x_generated, feed_dict={z_prior: sample_noise, noise: y_sample}
                                      ).reshape((n_sample, 28, 28))[:2]

                plt.imsave('F:/tf_board/basic_gan_mnist/' + str(e) + '-' + str(0) + '.png', check_imgs[0],
                           cmap='Greys_r')
                plt.imsave('F:/tf_board/basic_gan_mnist/' + str(e) + '-' + str(1) + '.png', check_imgs[1],
                           cmap='Greys_r')

        print('train done')
        n_sample = 16
        sample_noise = np.random.uniform(-1.0, 1.0, size=(n_sample, z_size))
        y_sample = np.random.uniform(-1.0, 1.0, size=(n_sample, noise_size))
        check_imgs = sess.run(x_generated, feed_dict={z_prior: sample_noise, noise: y_sample}
                              ).reshape((n_sample, 28, 28))[:5]
        for i in range(5):
            plt.imsave('F:/tf_board/basic_gan_mnist/' + 'final-' + str(i) + '.png', check_imgs[i], cmap='Greys_r')

        # save sess
        saver.save(sess, '/root/basic_gan_mnist.ckpt')
Esempio n. 56
0
def main():

    training_timestamp = str(datetime.now()).replace(":", "-").replace(
        " ", "_").replace(".", "-")[:19]
    os.mkdir(os.path.join(OUTPUT_DIR, training_timestamp))
    file1 = open(os.path.join(OUTPUT_DIR, training_timestamp, "log.txt"), "w+")
    print(1)

    _, input_right, input_left = loadAllPaths(datasetp=os.path.join(
        TEMPDIR, "./FlyingThings3d/input_webp/frames_cleanpass_webp/TRAIN"),
                                              subfolder=['A', 'B', 'C'],
                                              lr=True)
    _, output_right, output_left = loadAllPaths(datasetp=os.path.join(
        TEMPDIR, "./FlyingThings3d/disparity/disparity/TRAIN"),
                                                subfolder=['A', 'B', 'C'],
                                                lr=True)
    TOTAL_IMAGES = input_right.shape[0]

    printing_left = input_left[TOTAL_IMAGES - 1]
    printing_right = input_right[TOTAL_IMAGES - 1]
    printing_output = output_left[TOTAL_IMAGES - 1]
    TOTAL_IMAGES = TOTAL_IMAGES - 1

    input_one_image = Image.open(printing_left).convert('RGB')
    in_arr = np.array(input_one_image)
    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
    printing_left = _norm(
        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
    printing_lefts = printing_left
    for i in range(1, 10):
        printing_lefts = np.concatenate((printing_lefts, printing_left),
                                        axis=0)

    input_one_image = Image.open(printing_right).convert('RGB')
    in_arr = np.array(input_one_image)
    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
    printing_right = _norm(
        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
    printing_rights = printing_right
    for i in range(1, 10):
        printing_rights = np.concatenate((printing_rights, printing_right),
                                         axis=0)

    input_one_image = load_pfm(printing_output)[0]
    in_arr = np.array(input_one_image)
    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right]
    input_one_image = np.reshape(in_arr, (IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))
    printing_output = np.reshape(input_one_image,
                                 (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))
    printing_outputs = printing_output
    for i in range(1, 10):
        printing_outputs = np.concatenate((printing_outputs, printing_output),
                                          axis=0)

    validation_left = input_left[TOTAL_IMAGES - VALIDATION_SIZE:]
    validation_right = input_right[TOTAL_IMAGES - VALIDATION_SIZE:]
    validation_output = output_left[TOTAL_IMAGES - VALIDATION_SIZE:]
    TOTAL_IMAGES = TOTAL_IMAGES - VALIDATION_SIZE

    BATCH_NUM = TOTAL_IMAGES // BATCH_SIZE
    BATCH_NUM = 20
    VALIDATION_BATCHES = 2

    print("TOTAL IMAGES : {}, BATCH_NUM: {} ".format(TOTAL_IMAGES, BATCH_NUM))
    file1.write("TOTAL IMAGES : {}, BATCH_NUM: {} \n".format(
        TOTAL_IMAGES, BATCH_NUM))

    # with open(GT_DIR) as f:
    #    buf = cPickle.load(f, encoding="utf-8")
    #buf = load_pfm_files()

    image_left = tf.placeholder(tf.float32,
                                [None, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3],
                                name='image_left')
    image_right = tf.placeholder(tf.float32,
                                 [None, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3],
                                 name='image_right')
    ground_truth = tf.placeholder(tf.float32,
                                  [None, IMAGE_SIZE_Y, IMAGE_SIZE_X, 1],
                                  name='ground_truth')
    #image_left = tf.placeholder(tf.float32, [None, crop_down-crop_up, crop_right-crop_left, 3], name='image_left')
    #image_right = tf.placeholder(tf.float32, [None, crop_down-crop_up, crop_right-crop_left, 3], name='image_right')
    #ground_truth = tf.placeholder(tf.float32, [None,  crop_down-crop_up, crop_right-crop_left, 1], name='ground_truth')

    is_training = tf.placeholder(tf.bool, name="is_training")
    #Maksim je pica!
    combine_image = tf.concat([image_left, image_right], 3)
    final_output, total_loss, loss1, loss2, loss3, loss4, loss5, loss6, pr6, pr5, pr4, pr3, pr2, pr1, loss6_inf = model(
        combine_image=combine_image, ground_truth=ground_truth)
    tf.summary.scalar('loss', total_loss)

    with tf.name_scope('train'):
        optimizer = tf.train.AdamOptimizer(
            learning_rate=LEARNING_RATE).minimize(total_loss)

    merged = tf.summary.merge_all()

    # important step
    sess = tf.Session()

    if int((tf.__version__).split('.')[1]) < 12 and int(
        (tf.__version__).split('.')[0]) < 1:  # tensorflow version < 0.12
        writer = tf.train.SummaryWriter(LOGS_DIR, sess.graph)
    else:  # tensorflow version >= 0.12
        writer = tf.summary.FileWriter(LOGS_DIR, sess.graph)

    #left_images = sorted(os.listdir(DATA_DIR+ '//left//'))
    #right_images = sorted(os.listdir(DATA_DIR + '//right//'))
    left_images = input_left
    right_images = input_right
    # output_images = sorted(os.listdir(DATA_DIR + '/output/'))

    # tf.initialize_all_variables() no long valid from
    # 2017-03-02 if using tensorflow >= 0.12

    if int((tf.__version__).split('.')[1]) < 12:
        init = tf.initialize_all_variables()
    else:
        init = tf.global_variables_initializer()
    # saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V2)
    saver = tf.train.Saver()

    sess.run(init)
    # saver.restore(sess, MODEL_PATH)
    with open(
            RUNNING_LOGS_DIR + "/log" + date.isoformat(date.today()) +
            str(time.time()) + ".txt", "w+") as file:
        #   file.write('BATCH_SIZE ' + str(BATCH_SIZE) + '\n'
        #+ ' EPOCH ' + str(EPOCH) + '\n'
        # + ' image_num ' + str(image_num) + '\n'
        #+ ' LEARNING_RATE ' + str(LEARNING_RATE) + '\n')

        for round in range(EPOCH):
            #for i in range(0 , image_num - BATCH_SIZE, ROUND_STEP):
            for i in range(BATCH_NUM):

                trackTime(time.time())
                for j in range(BATCH_SIZE):
                    if (i == BATCH_NUM - 1
                            and i * BATCH_SIZE + j >= TOTAL_IMAGES):
                        break

                    # input data
                    #full_pic_name = DATA_DIR+ '/left/' + left_images[TRAIN_SERIES[i + j]]
                    full_pic_name = left_images[i * BATCH_SIZE + j]
                    input_one_image = Image.open(full_pic_name).convert('RGB')
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
                    input_one_image = _norm(
                        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
                    if (j == 0):
                        input_left_images = input_one_image
                    else:
                        input_left_images = np.concatenate(
                            (input_left_images, input_one_image), axis=0)

                    #full_pic_name = DATA_DIR + '/right/' + right_images[TRAIN_SERIES[i + j]]
                    full_pic_name = right_images[i * BATCH_SIZE + j]
                    input_one_image = Image.open(full_pic_name).convert('RGB')
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
                    input_one_image = _norm(
                        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
                    if (j == 0):
                        input_right_images = input_one_image
                    else:
                        input_right_images = np.concatenate(
                            (input_right_images, input_one_image), axis=0)

                    #input_one_image = buf[TRAIN_SERIES[i + j]]
                    input_one_image = load_pfm(output_left[i * BATCH_SIZE +
                                                           j])[0]
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right]
                    input_one_image = np.reshape(
                        in_arr, (IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))
                    input_one_image = np.reshape(
                        input_one_image, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))

                    if (j == 0):
                        input_gts = input_one_image
                    else:
                        input_gts = np.concatenate(
                            (input_gts, input_one_image), axis=0)

                result, optimizer_res, total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res, loss5_res, loss6_res, pr6_res, pr5_res, pr4_res, pr3_res, pr2_res, pr1_res, loss6_inf_res = sess.run(
                    [
                        merged, optimizer, total_loss, loss1, loss2, loss3,
                        loss4, loss5, loss6, pr6, pr5, pr4, pr3, pr2, pr1,
                        loss6_inf
                    ],
                    feed_dict={
                        image_left: input_left_images,
                        image_right: input_right_images,
                        ground_truth: input_gts
                    })
                #result, optimizer_res, total_loss_res =sess.run([merged, optimizer, total_loss],feed_dict={image_left:input_left_images, image_right:input_right_images, ground_truth:input_gts})

                print("training: epoch: {}, batch num: {}, total loss: {}".
                      format(round, i, total_loss_res))
                print("training: losses: total={}, {},{},{},{},{},{}".format(
                    total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res,
                    loss5_res, loss6_res))
                file1.write(
                    "training: epoch: {}, batch num: {}, total loss: {} \n".
                    format(round, i, total_loss_res))
                file1.write(
                    "training: losses: total={}, {},{},{},{},{},{} \n".format(
                        total_loss_res, loss1_res, loss2_res, loss3_res,
                        loss4_res, loss5_res, loss6_res))
                #print("losses:" + str(total_loss_res) + " " + str(loss1_res) + " " + str(loss2_res) + " " + str(loss3_res) + " " + str(loss4_res) + " " + str(loss5_res) + " " + str(loss6_res))
                #print("round: "str(round) + " total loss: " + str(total_loss_res))
                #print("losses:" + str(total_loss_res) + " " + str(loss1_res) + " " + str(loss2_res) + " " + str(loss3_res) + " " + str(loss4_res) + " " + str(loss5_res) + " " + str(loss6_res))

                if round % SAVE_PER_EPOCH == SAVE_PER_EPOCH - 1 and i % SAVE_PER_BATCH == SAVE_PER_BATCH - 1:
                    #print("ajde bre sacuvaj")
                    #result, total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res, loss5_res, loss6_res, pr6_res, pr5_res, pr4_res, pr3_res, pr2_res, pr1_res, loss6_inf_res =sess.run([merged, total_loss, loss1, loss2, loss3, loss4, loss5, loss6, pr6, pr5, pr4, pr3, pr2, pr1, loss6_inf],feed_dict={image_left:printing_left, image_right:printing_right, ground_truth:printing_output})
                    result, total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res, loss5_res, loss6_res, pr6_res, pr5_res, pr4_res, pr3_res, pr2_res, pr1_res, loss6_inf_res = sess.run(
                        [
                            merged, total_loss, loss1, loss2, loss3, loss4,
                            loss5, loss6, pr6, pr5, pr4, pr3, pr2, pr1,
                            loss6_inf
                        ],
                        feed_dict={
                            image_left: printing_lefts,
                            image_right: printing_rights,
                            ground_truth: printing_outputs
                        })

                    #slika = slika
                    #slika = np.min(1,slika)
                    plt.imsave(os.path.join(
                        OUTPUT_DIR, training_timestamp,
                        "epoch{}_batch{}.png".format(round, i)),
                               pr1_res[0, :, :, 0],
                               cmap="gray")

                    ##plt.imshow(slika[:,:,0], cmap="gray")
                    ##plt.show()

            saver = tf.train.Saver()
            saver.save(
                sess,
                os.path.join(OUTPUT_DIR, training_timestamp,
                             "model_epoch{}.ckpt".format(round)))

            for i in range(VALIDATION_BATCHES):
                trackTime(time.time())
                for j in range(BATCH_SIZE):
                    if (i == BATCH_NUM - 1
                            and i * BATCH_SIZE + j >= VALIDATION_SIZE):
                        break

                    # input data
                    #full_pic_name = DATA_DIR+ '/left/' + left_images[TRAIN_SERIES[i + j]]
                    full_pic_name = validation_left[i * BATCH_SIZE + j]
                    input_one_image = Image.open(full_pic_name).convert('RGB')
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
                    input_one_image = _norm(
                        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
                    if (j == 0):
                        input_left_images = input_one_image
                    else:
                        input_left_images = np.concatenate(
                            (input_left_images, input_one_image), axis=0)

                    #full_pic_name = DATA_DIR + '/right/' + right_images[TRAIN_SERIES[i + j]]
                    full_pic_name = validation_right[i * BATCH_SIZE + j]
                    input_one_image = Image.open(full_pic_name).convert('RGB')
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right, :]
                    input_one_image = _norm(
                        np.reshape(in_arr, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 3)))
                    if (j == 0):
                        input_right_images = input_one_image
                    else:
                        input_right_images = np.concatenate(
                            (input_right_images, input_one_image), axis=0)

                    #input_one_image = buf[TRAIN_SERIES[i + j]]
                    input_one_image = load_pfm(
                        validation_output[i * BATCH_SIZE + j])[0]
                    in_arr = np.array(input_one_image)
                    in_arr = in_arr[crop_up:crop_down, crop_left:crop_right]
                    input_one_image = np.reshape(
                        in_arr, (IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))
                    input_one_image = np.reshape(
                        input_one_image, (1, IMAGE_SIZE_Y, IMAGE_SIZE_X, 1))

                    if (j == 0):
                        input_gts = input_one_image
                    else:
                        input_gts = np.concatenate(
                            (input_gts, input_one_image), axis=0)

                result, total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res, loss5_res, loss6_res, pr6_res, pr5_res, pr4_res, pr3_res, pr2_res, pr1_res, loss6_inf_res = sess.run(
                    [
                        merged, total_loss, loss1, loss2, loss3, loss4, loss5,
                        loss6, pr6, pr5, pr4, pr3, pr2, pr1, loss6_inf
                    ],
                    feed_dict={
                        image_left: input_left_images,
                        image_right: input_right_images,
                        ground_truth: input_gts
                    })
                #result, optimizer_res, total_loss_res =sess.run([merged, optimizer, total_loss],feed_dict={image_left:input_left_images, image_right:input_right_images, ground_truth:input_gts})

                print("validation: epoch: {}, batch num: {}, total loss: {}".
                      format(round, i, total_loss_res))
                print("validation: losses: total={}, {},{},{},{},{},{}".format(
                    total_loss_res, loss1_res, loss2_res, loss3_res, loss4_res,
                    loss5_res, loss6_res))
                file1.write(
                    "validation: epoch: {}, batch num: {}, total loss: {} \n".
                    format(round, i, total_loss_res))
                file1.write(
                    "validation: losses: total={}, {},{},{},{},{},{} \n".
                    format(total_loss_res, loss1_res, loss2_res, loss3_res,
                           loss4_res, loss5_res, loss6_res))
Esempio n. 57
0
    return aia_sub


if __name__ == '__main__':

    # get the flare data
    data = pd.read_csv('hessi_goes_flare_data.csv')
    data['Peak_time'] = pd.to_datetime(data['Peak_time'],
                                       format='%Y-%m-%d %H:%M:%S')
    data = data[data['Peak_time'] >= dt.datetime.strptime(
        '2010-06-06 02:52:58',
        '%Y-%m-%d %H:%M:%S')]  # SDO started after HESSI and GOES!!!!
    print(data.head())
    print('number of valid flares = {}'.format(len(data.index)))

    # save the images to their respective directories
    for idx, row in data.head().iterrows():
        print('getting image {}/{}'.format(idx, len(data.index)))
        img = get_image(row['Peak_time'], row['X_pos'], row['Y_pos'])
        img.plot()
        img_name = str(row['Peak_time']).replace(' ', '_').replace(
            ':', '_').replace('-', '_') + '.png'
        if row['Class'] == 'B':
            img_name = os.path.join('B_class', img_name)
        elif row['Class'] == 'C':
            img_name = os.path.join('C_class', img_name)
        plt.imsave(fname=os.path.join('data', img_name),
                   arr=img.data,
                   cmap=plt.cm.gray)
        print('image saved\n')
    print('Done')
Esempio n. 58
0
            count = 0
            img = imgs[ix]
            tile_output = np.zeros((img.shape[0], img.shape[1], 4))
            zeros = np.zeros((img.shape[0], img.shape[1], 4))

            for i in xrange(0, img.shape[0], inner_size):
                for j in xrange(0, img.shape[1], inner_size):
                    zeros[i:i + inner_size, j:j + inner_size] = y[count]
                    count += 1

            for i in range(img.shape[0]):
                for j in range(img.shape[1]):
                    output = tile_output[i, j]
                    zeros[i, j, np.argmax(output)] = 1
                    #count += 1

            #import pdb; pdb.set_trace()
            zeros[:, :, 3] = 1

            #color = output_to_colors(zeros, img)

            #colors = [output_to_colors(y, imgs[i]) for i,y in enumerate(ys)]
            #colors = [label2rgb(y.argmax(axis=-1), image=imgs[i], colors=[(1,0,0), (0,1,0), (0,0,1), (0,0,0)], alpha=0.9, bg_label=3) for i,y in enumerate(ys)]

            #[plt.imsave('plots/%s_%s'%(model_n, file_names[i]), zeros) for i,zeros in enumerate(colors)]
            #print(file_names)
            plt.imsave('plots/results/%s.png' % (file_names[ix]), zeros)

print "total processing done in: " + str(
    (datetime.now() - tick).total_seconds())
Esempio n. 59
0
 def btn_save_Pressed(self):
     fileName, _ = QtWidgets.QFileDialog.getSaveFileName(None, 'Guardar Espectro',"","Bitmaps (*.bmp)")
     plt.imsave(fileName, self.out, cmap="gray")
Esempio n. 60
0
 def save_img(self, grid, filename):
     if not os.path.exists('./gif'):
         os.mkdir('./gif')
     
     plt.figure(figsize=(15,15))
     plt.imsave('./gif/' + filename, np.transpose(grid, (1,2,0)).numpy())