Example #1
0
def minverse(M):
	#{{{
	# convert M into a one-d array for easy referencing
	mm = np.reshape(M,9)
	
	x1 = mm[1]
	y1 = mm[2]
	x2 = mm[4]
	y2 = mm[5]
	x3 = mm[7]
	y3 = mm[8]
	
	twoA = (x2*y3 - x3*y2) - (x1*y3-x3*y1) + (x1*y2 - x2*y1)
	if (twoA < 1.0E-6):
		print('zero area triangle used for interpolation')
	
	minv = np.zeros(9)
	
	minv[0] = (1.0/twoA) * (y2-y3)
	minv[1] = (1.0/twoA) * (y3-y1)
	minv[2]= (1.0/twoA) * (y1-y2)
	
	minv[3] = (1.0/twoA) * (x3-x2)
	minv[4] = (1.0/twoA) * (x1-x3)
	minv[5] = (1.0/twoA) * (x2-x1)
	
	minv[6] = (1.0/twoA) * (x2*y3-x3*y2)
	minv[7] = (1.0/twoA) * (x3*y1-x1*y3)
	minv[8] = (1.0/twoA) * (x1*y2-x2*y1)

	# convert minv to a two-d array
	minv_matrix = np.reshape(minv,(3,3))
	#}}}
	return minv_matrix
Example #2
0
def combine_stains(stains, conv_matrix):
    """Stain to RGB color space conversion.

    Parameters
    ----------
    stains : array_like
        The image in stain color space, in a 3-D array of shape
        ``(.., .., 3)``.
    conv_matrix: ndarray
        The stain separation matrix as described by G. Landini [1]_.

    Returns
    -------
    out : ndarray
        The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.

    Raises
    ------
    ValueError
        If `stains` is not a 3-D array of shape ``(.., .., 3)``.

    Notes
    -----
    Stain combination matrices available in the ``color`` module and their
    respective colorspace:

    * ``rgb_from_hed``: Hematoxylin + Eosin + DAB
    * ``rgb_from_hdx``: Hematoxylin + DAB
    * ``rgb_from_fgx``: Feulgen + Light Green
    * ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin
    * ``rgb_from_rbd``: FastRed + FastBlue +  DAB
    * ``rgb_from_gdx``: Methyl Green + DAB
    * ``rgb_from_hax``: Hematoxylin + AEC
    * ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\
                        + Orange matrix Orange-G
    * ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin
    * ``rgb_from_ahx``: Alcian Blue + Hematoxylin
    * ``rgb_from_hpx``: Hematoxylin + PAS

    References
    ----------
    .. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html


    Examples
    --------
    >>> from skimage import data
    >>> from skimage.color import (separate_stains, combine_stains,
    ...                            hdx_from_rgb, rgb_from_hdx)
    >>> ihc = data.immunohistochemistry()
    >>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
    >>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)
    """
    from ..exposure import rescale_intensity

    stains = dtype.img_as_float(stains)
    logrgb2 = np.dot(-np.reshape(stains, (-1, 3)), conv_matrix)
    rgb2 = np.exp(logrgb2)
    return rescale_intensity(np.reshape(rgb2 - 2, stains.shape),
                             in_range=(-1, 1))
def compute_results(predict, X, Y):
    for x, y in zip(X, Y):
        depth, n_channels, height, width = x.shape
        start = time.time()
        model_output = predict(x[np.newaxis])
        end = time.time()
        n_classes = y.shape[-1]
        model_output = model_output.as_numpy_array() if isinstance(model_output, gnumpy.garray) else model_output
        seg = np.reshape(
            model_output,
            (height, width, depth, n_classes)
        )
        gt = np.reshape(y, (height, width, depth, n_classes))
        seg = discrete(seg, n_classes)
        dice_list = dice(seg, gt)
        seg = seg.argmax(axis=3)
        gt = gt.argmax(axis=3)
        dice_all = dice_alt(seg, gt)
        dice_list = [dice_all] + dice_list
        print '\tdice: ', dice_list
        print '\ttime taken: ', (end - start)
        print '-' * 20

        image = x[:, 0, :, :]
        image = np.transpose(image, (1, 2, 0))
        yield (image, seg, gt, dice_list)
def loadSamplePlanktons(numSamples=100, rotate=False, dim=28):
    if dim == 28:
        if not rotate:
            from pylearn2_plankton.planktonDataPylearn2 import PlanktonData
            ds = PlanktonData(which_set='train')
            designMatrix = ds.get_data()[0] # index 1 is the label
            print "Shape of Design Matrix", np.shape(designMatrix)
            designMatrix = np.reshape(designMatrix, 
                                      (ds.get_num_examples(), 1, MAX_PIXEL, MAX_PIXEL) )
            if numSamples != 'All':
                return np.array(designMatrix[:numSamples,...], dtype=np.float32)
            else:
                return np.array(designMatrix, dtype=np.float32)
        else:
            print "Loading Rotated Data"
            designMatrix = np.load(open(os.path.join(os.environ['PYLEARN2_DATA_PATH'] ,'planktonTrainRotatedX.p'), 'r'))
            return np.reshape(np.array(designMatrix[:numSamples,...], dtype=np.float32),
                              (numSamples,1,MAX_PIXEL,MAX_PIXEL))
    elif dim == 40:
        from pylearn2_plankton.planktonData40pixels import PlanktonData
        ds = PlanktonData(which_set='train')
        designMatrix = ds.get_data()[0] # index 1 is the label
        print "Shape of Design Matrix", np.shape(designMatrix)
        designMatrix = np.reshape(designMatrix, 
                                  (ds.get_num_examples(), 1, 40, 40) )
        if numSamples != 'All':
            return np.array(designMatrix[:numSamples,...], dtype=np.float32)
        else:
            return np.array(designMatrix, dtype=np.float32)
 def Plot2d(self, fignumStart):        
     # Plot xTrue
     plt.figure(fignumStart)
     plt.imshow(self.Theta, interpolation='none')
     plt.colorbar()    
     plt.title('xTrue')
     
     # Plot the reconstructed result
     plt.figure()
     plt.imshow(np.reshape(self.ThetaEstimated, self.Theta.shape), interpolation='none')
     plt.colorbar()
     plt.title('Reconstructed x')
     
     # Plot yErr and its histogram
     yErr = self.NoisyObs - np.reshape(self._reconstructor.hx, self.NoisyObs.shape)
     plt.figure()
     plt.imshow(yErr, interpolation='none')
     plt.colorbar()
     plt.title('yErr')
             
     plt.figure()
     plt.hist(yErr.flat, 20)
     plt.title('Histogram of yErr')       
 
     plt.show()
Example #6
0
    def test_n_dimensional_log_encoding_CanonLog(self):
        """
        Tests :func:`colour.models.rgb.transfer_functions.canon_log.\
log_encoding_CanonLog` definition n-dimensional arrays support.
        """

        L = 0.18
        V = 0.312012855550395
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.tile(L, 6)
        V = np.tile(V, 6)
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.reshape(L, (2, 3))
        V = np.reshape(V, (2, 3))
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)

        L = np.reshape(L, (2, 3, 1))
        V = np.reshape(V, (2, 3, 1))
        np.testing.assert_almost_equal(
            log_encoding_CanonLog(L),
            V,
            decimal=7)
def discrete(seg, n_classes):
    original_shape = seg.shape
    discrete_seg = seg.argmax(axis=3)
    discrete_seg = np.reshape(discrete_seg, (-1,))
    discrete_seg = np.reshape(one_hot(discrete_seg, n_classes), original_shape)

    return discrete_seg
Example #8
0
 def step(self, u, i):
     print(u, i)
     integrate = self.wfs.gd.integrate
     w_cG = self.w_ucG[u]
     y_cG = self.y_ucG[u]
     wold_cG = self.wold_ucG[u]
     z_cG = self.z_cG
     
     self.solver(w_cG, self.z_cG, u)
     I_c = np.reshape(integrate(np.conjugate(z_cG) * w_cG)**-0.5,
                       (self.dim, 1, 1, 1))
     z_cG *= I_c
     w_cG *= I_c
     
     if i != 0:
         b_c =  1.0 / I_c 
     else:
         b_c = np.reshape(np.zeros(self.dim), (self.dim, 1, 1, 1))
 
     self.hamiltonian.apply(z_cG, y_cG, self.wfs, self.wfs.kpt_u[u])
     a_c = np.reshape(integrate(np.conjugate(z_cG) * y_cG), (self.dim, 1, 1, 1))
     wnew_cG = (y_cG - a_c * w_cG - b_c * wold_cG)
     wold_cG[:] = w_cG
     w_cG[:] = wnew_cG
     self.a_uci[u, :, i] = a_c[:, 0, 0, 0]
     self.b_uci[u, :, i] = b_c[:, 0, 0, 0]
 def setParams(self, params):
     #Set W1 and W2 using single paramater vector.
     W1_start = 0
     W1_end = self.hiddenLayerSize * self.inputLayerSize
     self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize , self.hiddenLayerSize))
     W2_end = W1_end + self.hiddenLayerSize*self.outputLayerSize
     self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))
Example #10
0
def list2vec(li):
    try:
        li = np.array(li)
        return np.reshape(li, (len(li), 1))
    except TypeError:
        li = np.array([li])
        return np.reshape(li, (len(li), 1))
Example #11
0
def FFT_Correlation(x,y):
    """
    FFT-based correlation, much faster than numpy autocorr.
    x and y are row-based vectors of arbitrary lengths.
    This is a vectorized implementation of O(N*log(N)) flops.
    """

    lengthx = x.shape[0]
    lengthy = y.shape[0]

    x = np.reshape(x,(1,lengthx))
    y = np.reshape(y,(1,lengthy))

    length = np.array([lengthx, lengthy]).min()
    
    x = x[:length]
    y = y[:length]
    
    fftx = fft(x, 2 * length - 1, axis=1) #pad with zeros
    ffty = fft(y, 2 * length - 1, axis=1)

    corr_xy = fft.ifft(fftx * np.conjugate(ffty), axis=1)
    corr_xy = np.real(fft.fftshift(corr_xy, axes=1)) #should be no imaginary part

    corr_yx = fft.ifft(ffty * np.conjugate(fftx), axis=1)
    corr_yx = np.real(fft.fftshift(corr_yx, axes=1))

    corr = 0.5 * (corr_xy[:,length:] + corr_yx[:,length:]) / range(1,length)[::-1]
    return np.reshape(corr,corr.shape[1])
Example #12
0
def timeit_plot3D(data, xlabel='xlabel', ylabel='ylabel', **kwargs): 
    """3D plot of timeit data, one chart per function. 
    """
    dataT = {}
    figs = []
    series = kwargs.get('series', (0,1))
    cmap = kwargs.get('cmap', cm.coolwarm)
    for k, v in data.items():
        dataT[k] = zip(*v)
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        X, Y, Z = dataT[k][series[0]], dataT[k][series[1]], dataT[k][-1]
        wide, tall = (max(X)-min(X)+1), (max(Y)-min(Y)+1)
        intervalX = max(X) - min(heapq.nlargest(2,set(X)))
        intervalY = max(Y) - min(heapq.nlargest(2,set(Y)))
        wide, tall = 1+wide/intervalX, 1+tall/intervalY
        X = np.reshape(X, [wide, tall])
        Y = np.reshape(Y, [wide, tall])
        # TODO: BUG: fix so that Z transposes with x & y reversed
        Z = np.reshape(Z, [wide, tall])
        surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cmap, linewidth=0, antialiased=False)
        ax.zaxis.set_major_locator(LinearLocator(10))
        ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
        ax.set_xlabel(xlabel)
        ax.set_ylabel(ylabel)
        ax.set_title(substitute_titles(k,series))
        fig.colorbar(surf, shrink=0.5, aspect=5)
        figs.append(fig)
    return figs
Example #13
0
    def update_state(self, time, dtime, temp, dtemp, energy, rho, F0, F,
        stran, d, elec_field, stress, statev, **kwargs):
        """Compute updated stress given strain increment"""
        log = logging.getLogger('matmodlab.mmd.simulator')

        # defaults
        cmname = '{0:8s}'.format('umat')
        dfgrd0 = reshape(F0, (3, 3), order='F')
        dfgrd1 = reshape(F, (3, 3), order='F')
        dstran = d * dtime
        ddsdde = zeros((6, 6), order='F')
        ddsddt = zeros(6, order='F')
        drplde = zeros(6, order='F')
        predef = zeros(1, order='F')
        dpred = zeros(1, order='F')
        coords = zeros(3, order='F')
        drot = eye(3)
        ndi = nshr = 3
        spd = scd = rpl = drpldt = pnewdt = 0.
        noel = npt = layer = kspt = kinc = 1
        sse = mmlabpack.ddot(stress, stran) / rho
        celent = 1.
        kstep = 1
        time = array([time, time])

        self.lib.umat(stress, statev, ddsdde,
            sse, spd, scd, rpl, ddsddt, drplde, drpldt, stran, dstran,
            time, dtime, temp, dtemp, predef, dpred, cmname, ndi, nshr,
            self.num_sdv, self.params, coords, drot, pnewdt, celent, dfgrd0,
            dfgrd1, noel, npt, layer, kspt, kstep, kinc, log.info, log.warn,
            StopFortran)

        return stress, statev, ddsdde
Example #14
0
    def build(self):
        import re

        with open(self.filepath, 'r') as f:
            # Currently these are unused, but they are in the format
            # Could possibly store as metadata?
            # Assume first result for regexes
            re_rows = re.compile(u'([0-9]+) rows')
            n_rows = int(re_rows.findall(f.readline())[0])
            re_cols = re.compile(u'([0-9]+) columns')
            n_cols = int(re_cols.findall(f.readline())[0])

        # This also loads the mask
        #   >>> image_data[:, 0]
        image_data = np.loadtxt(self.filepath, skiprows=3, unpack=True)

        # Replace the lowest value with nan so that we can render properly
        data_view = image_data[:, 1:]
        corrupt_value = np.min(data_view)
        data_view[np.any(np.isclose(data_view, corrupt_value), axis=1)] = np.nan

        return MaskedImage(
            np.rollaxis(np.reshape(data_view, [n_rows, n_cols, 3]), -1),
            np.reshape(image_data[:, 0], [n_rows, n_cols]).astype(np.bool),
            copy=False)
Example #15
0
    def dot(self, coords_a, coords_b, frac_coords=False):
        """
        Compute the scalar product of vector(s).

        Args:
            coords_a, coords_b: Array-like objects with the coordinates.
            frac_coords (bool): Boolean stating whether the vector
                corresponds to fractional or cartesian coordinates.

        Returns:
            one-dimensional `numpy` array.
        """
        coords_a, coords_b = np.reshape(coords_a, (-1,3)), \
                             np.reshape(coords_b, (-1,3))

        if len(coords_a) != len(coords_b):
            raise ValueError("")

        if np.iscomplexobj(coords_a) or np.iscomplexobj(coords_b):
            raise TypeError("Complex array!")

        if not frac_coords:
            cart_a, cart_b = coords_a, coords_b
        else:
            cart_a = np.reshape([self.get_cartesian_coords(vec)
                                 for vec in coords_a], (-1,3))
            cart_b = np.reshape([self.get_cartesian_coords(vec)
                                 for vec in coords_b], (-1,3))

        return np.array([np.dot(a,b) for a,b in zip(cart_a, cart_b)])
    def plot_checkpoint(self,b):
        orig_filename = "/data/batch_check_"+str(b)+"_original.png"

        image_A = self.X_test_A[5]
        image_A = np.reshape(image_A, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("Image_A shape: " +str(np.shape(image_A)))
        fake_B = self.generator_A_to_B.Generator.predict(image_A.reshape(1, self.W_A, self.H_A, self.C_A ))
        fake_B = np.reshape(fake_B, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("fake_B shape: " +str(np.shape(fake_B)))
        reconstructed_A = self.generator_B_to_A.Generator.predict(fake_B.reshape(1, self.W_A, self.H_A, self.C_A ))
        reconstructed_A = np.reshape(reconstructed_A, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("reconstructed_A shape: " +str(np.shape(reconstructed_A)))
        # from IPython import embed; embed()

        checkpoint_images = np.array([image_A, fake_B, reconstructed_A])

        # Rescale images 0 - 1
        checkpoint_images = 0.5 * checkpoint_images + 0.5

        titles = ['Original', 'Translated', 'Reconstructed']
        fig, axes = plt.subplots(1, 3)
        for i in range(3):
            image = checkpoint_images[i]
            image = np.reshape(image, [self.H_A_test,self.W_A_test,self.C_A_test])
            axes[i].imshow(image)
            axes[i].set_title(titles[i])
            axes[i].axis('off')
        fig.savefig("/data/batch_check_"+str(b)+".png")
        plt.close('all')
        return
Example #17
0
    def test_cmac(self):
        input_train = np.reshape(np.linspace(0, 2 * np.pi, 100), (100, 1))
        input_train_before = input_train.copy()
        input_test = np.reshape(np.linspace(np.pi, 2 * np.pi, 50), (50, 1))
        input_test_before = input_test.copy()

        target_train = np.sin(input_train)
        target_train_before = target_train.copy()
        target_test = np.sin(input_test)

        cmac = algorithms.CMAC(
            quantization=100,
            associative_unit_size=32,
            step=0.2,
            verbose=False,
        )
        cmac.train(input_train, target_train, epochs=100)

        predicted_test = cmac.predict(input_test)
        predicted_test = predicted_test.reshape((len(predicted_test), 1))
        error = metrics.mean_absolute_error(target_test, predicted_test)

        self.assertAlmostEqual(error, 0.0024, places=4)

        # Test that algorithm didn't modify data samples
        np.testing.assert_array_equal(input_train, input_train_before)
        np.testing.assert_array_equal(input_train, input_train_before)
        np.testing.assert_array_equal(target_train, target_train_before)
 def SamplerSetup(self, convMatrixObj, initializationDict):
     """ This method must be called before SamplerRun """                
     if not('init_theta' in initializationDict) or not('init_var' in initializationDict):
         raise KeyError('Initialization dictionary missing keys init_theta and/or init_var')
     
     self.xSeq = [] # Contains x^{(t)}, t=0, ..., T
     x0 = np.array(initializationDict['init_theta']) # Expect x0 to be either a 2-d or 3-d array. 
                                                     # Notice nomenclature x here instead of theta.
                                                     # Nonetheless, use theta in the key.
     M = x0.size       
     self.xSeq.append(np.reshape(x0, (M, 1))) # Reshape x0 into a column array
     
     self.varianceSeq = [] # Contains \sigma^2^{(t)}, t=0, ..., T
     assert initializationDict['init_var'] >= 0, __file__ + ': SamplerSetup: missing init_var in init dict'
     self.varianceSeq.append(initializationDict['init_var'])
     
     self.hyperparameterSeq = [] # Contains hyperparameter estimates, t=1, ..., T                    
     
     # This will return a 2-d or 3-d matrix, so it'll have to be reshaped into a vector
     forwardMap = lambda x: convMatrixObj.Multiply(np.reshape(x, x0.shape))        
     self._h = np.zeros((M, M))
     self._hNormSquared = np.zeros((M,))
     for ind in range(M):
         eInd = np.zeros((M,1))         
         eInd[ind] = 1   
         tmp = forwardMap(eInd)            
         self._h[:, ind] = np.reshape(tmp, (M,)) 
         self._hNormSquared[ind] = np.dot(self._h[:,ind], self._h[:,ind])
         assert self._hNormSquared[ind] > 0, __file__ + ': SamplerSetup: norm is not strictly positive'
         
     self.hx = np.reshape(forwardMap(x0), (M, 1))
     
     self.bSamplerRun = False        
def plotKerasExperimentcifar10():

    index = 5
    for experiment_number in range(1,index+1):
        outputPath_part_final = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/hyperopt_experiment_withoutparam_accuracy" + str(experiment_number) + ".txt")
        output_plot = os.path.realpath(
                "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/plotErrorCurve" + str(experiment_number) + ".pdf")

        df = pd.read_csv(outputPath_part_final,delimiter='\t',header=None)
        df.drop(df.columns[[600]], axis=1, inplace=True)

        i=1
        epochnum = []
        while i<=250:
            epochnum.append(i)
            i = i+1
        i=0
        while i<10:
            df_1=df[df.columns[0:250]].ix[i]
            np.reshape(df_1, (1,250))
            plt.plot(epochnum,df_1)

            i = i+1
        # plt.show()
        # plt.show()
        plt.savefig(output_plot)
        plt.close()
Example #20
0
 def test(self):
     # Expected
     in_channels = 3
     in_dim = 11
     out_channels = 5
     out_dim = (in_dim/2 + 1)
     img = np.arange(0,in_dim*in_dim*in_channels*1, dtype=np.float32)
     img = np.reshape(img,[in_dim,in_dim,in_channels,1])
     filter = np.arange(0,3*3*in_channels*out_channels, dtype=np.float32)
     filter = np.reshape(filter,[3,3,in_channels,out_channels])
     bias = np.zeros([5])
     expected = np.zeros([out_dim,out_dim,out_channels])
     for och in range(out_channels):
         tmp = np.zeros([out_dim,out_dim,1])
         for ich in range(in_channels):
             imgslice = np.reshape(img[:,:,ich,0],[in_dim,in_dim])
             filterslice = np.reshape(filter[:,:,ich,och],[3,3])
             tmp += np.reshape(convolve(imgslice,filterslice,mode='constant',cval = 0.0)[::2,::2] , [out_dim, out_dim, 1])
         expected[:,:,och] = np.squeeze(tmp) + bias[och]
         
     # test
     owlimg = owl.from_numpy(np.transpose(img))
     owlfilter = owl.from_numpy(np.transpose(filter))
     owlbias = owl.from_numpy(bias)
     convolver = owl.conv.Convolver(1,1,2,2)   
     test = convolver.ff(owlimg, owlfilter, owlbias)
     
     print 'Expected\n',expected
     print "Actual\n",test.to_numpy()
     self.assertTrue(np.allclose(expected, test))
Example #21
0
def calc_pca(feature):
    # Filter out super high numbers due to some instability in the network
    feature[feature>5] = 5
    feature[feature<-5] = -5
    #### Missing an image guided filter with the image as input
    ##
    ##########
    # change to double precision
    feature = np.float64(feature)
    # retrieve size of feature array
    shape = feature.shape
    [h, w, d] = feature.shape
    # resize to a two-dimensional array
    feature = np.reshape(feature, (h*w,d))
    # calculate average of each column
    featmean = np.average(feature,0)
    onearray = np.ones((h*w,1))
    featmeanarray = np.multiply(np.ones((h*w,1)),featmean)
    feature = np.subtract(feature,featmeanarray)
    feature_transpose = np.transpose(feature)
    cover = np.dot(feature_transpose, feature)
    # get largest eigenvectors of the array
    val,vecs = eigs(cover, k=3, which='LI')
    pcafeature = np.dot(feature, vecs)
    pcafeature = np.reshape(pcafeature,(h,w,3))
    pcafeature = np.float64(pcafeature)
    return pcafeature
Example #22
0
def predict(theta, X, ninput, nhidden, noutput):
    theta1 = np.reshape(theta[0:nhidden*(ninput+1)], [nhidden, ninput + 1])
    theta2 = np.reshape(theta[nhidden*(ninput+1):],  [noutput, nhidden + 1])

    h1 = sigmoid(np.dot(np_extend(X, 1), theta1.T))
    h2 = sigmoid(np.dot(np_extend(h1, 1), theta2.T))    
    return np.argmax(h2, axis = 1)
Example #23
0
def calc_alm_chisq_fromfits(almfile, clfile):
    alm = pyfits.open(almfile)[0].data
    cls = pyfits.open(clfile)[0].data
    numiter = alm.shape[0]
    numchain = alm.shape[1]
    alm = np.reshape(alm, (numiter*numchain,alm.shape[2], alm.shape[3], alm.shape[4], alm.shape[5]))
    alm = alm[:, :, :, 2:, :]
    cls = cls[1:]
    cls = np.reshape(cls, (numiter * numchain, cls.shape[2], cls.shape[3]))
    if alm.shape[1] == 3:
        cls = np.concatenate((cls[:, 0:1, :], cls[:, 3:4, :], cls[:, 5:6, :]), 1)
    elif alm.shape[1] == 1:
        cls = cls[:, 0:1, :]
    cls = cls[:, :, 2:]
    cls = np.transpose(cls).copy()
    alm = np.transpose(alm).copy()
    chisq = np.zeros(cls.shape)
    for i in range(cls.shape[0]):
        l = i + 2
        for m in range(l):
            if m == 0:
                chisq[i, :, :] += alm[0, i, m, :, :] ** 2
            else:
                chisq[i, :, :] += np.sum(2 * alm[:, i, m, :, :] ** 2, 0)
        chisq[i, :, :] = chisq[i, :, :] / cls[i, :, :] / (2 * l + 1) * (l * (l + 1)) / (2 * np.pi)
    return chisq
Example #24
0
def kron(a,b):
    """Kronecker product of a and b.

    The result is the block matrix::

        a[0,0]*b    a[0,1]*b  ... a[0,-1]*b
        a[1,0]*b    a[1,1]*b  ... a[1,-1]*b
        ...
        a[-1,0]*b   a[-1,1]*b ... a[-1,-1]*b

    Parameters
    ----------
    a : array, shape (M, N)
    b : array, shape (P, Q)

    Returns
    -------
    A : array, shape (M*P, N*Q)
        Kronecker product of a and b

    Examples
    --------
    >>> from scipy import kron, array
    >>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
    array([[1, 1, 1, 2, 2, 2],
           [3, 3, 3, 4, 4, 4]])

    """
    if not a.flags['CONTIGUOUS']:
        a = np.reshape(a, a.shape)
    if not b.flags['CONTIGUOUS']:
        b = np.reshape(b, b.shape)
    o = np.outer(a,b)
    o = o.reshape(a.shape + b.shape)
    return np.concatenate(np.concatenate(o, axis=1), axis=1)
Example #25
0
def ReadBPLASMA(file_name,BNORM,Ns):
    #Read the BPLASMA output file from MARS-F
    #Return BM1, BM2, BM3
    BPLASMA = num.loadtxt(open(file_name))
 
    Nm1 = BPLASMA[0,0]
    n = num.round(BPLASMA[0,2])
    Mm = num.round(BPLASMA[1:Nm1+1,0])
    Mm.resize([len(Mm),1])


    BM1 = BPLASMA[Nm1+1:,0] + BPLASMA[Nm1+1:,1]*1j
    BM2 = BPLASMA[Nm1+1:,2] + BPLASMA[Nm1+1:,3]*1j
    BM3 = BPLASMA[Nm1+1:,4] + BPLASMA[Nm1+1:,5]*1j

    BM1 = num.reshape(BM1,[Ns,Nm1],order='F')
    BM2 = num.reshape(BM2,[Ns,Nm1],order='F')
    BM3 = num.reshape(BM3,[Ns,Nm1],order='F')

    BM1 = BM1[0:Ns,:]*BNORM
    BM2 = BM2[0:Ns,:]*BNORM
    BM3 = BM3[0:Ns,:]*BNORM

    #NEED TO KNOW WHY THIS SECTION IS INCLUDED - to do with half grid???!!
    #BM2[1:,:] = BM2[0:-1,:] Needed to comment out to compare with RZPlot3
    #BM3[1:,:] = BM3[0:-1,:]

    return BM1, BM2, BM3,Mm
Example #26
0
def randmeanfor(R):

    mean_p = 0
    count_p = 0
    mean_n = 0
    count_n = 0
    shape = np.shape(R)
    R = np.reshape(R, np.size(R))

    for k in np.arange(np.size(R)):
        if R[k] > 0:
            mean_p = mean_p + R[k]
            count_p = count_p + 1
        elif R[k] < 0:
            mean_n = mean_n + R[k]
            count_n = count_n + 1

    mean_p = mean_p / count_p
    mean_n = mean_n / count_n

    for k in np.arange(size(R)):
        if R[k] > 0:
            R[k] = mean_p
        elif R[k] < 0:
            R[k] = mean_n
    R = np.reshape(R, shape)

    return R
Example #27
0
def load_data(dirname="cifar-10-batches-py", one_hot=False):
    tarpath = maybe_download("cifar-10-python.tar.gz",
                             "http://www.cs.toronto.edu/~kriz/",
                             dirname)
    X_train = []
    Y_train = []

    for i in range(1, 6):
        fpath = os.path.join(dirname, 'data_batch_' + str(i))
        data, labels = load_batch(fpath)
        if i == 1:
            X_train = data
            Y_train = labels
        else:
            X_train = np.concatenate([X_train, data], axis=0)
            Y_train = np.concatenate([Y_train, labels], axis=0)

    fpath = os.path.join(dirname, 'test_batch')
    X_test, Y_test = load_batch(fpath)

    X_train = np.dstack((X_train[:, :1024], X_train[:, 1024:2048],
                         X_train[:, 2048:])) / 255.
    X_train = np.reshape(X_train, [-1, 32, 32, 3])
    X_test = np.dstack((X_test[:, :1024], X_test[:, 1024:2048],
                        X_test[:, 2048:])) / 255.
    X_test = np.reshape(X_test, [-1, 32, 32, 3])

    if one_hot:
        Y_train = to_categorical(Y_train, 10)
        Y_test = to_categorical(Y_test, 10)

    return (X_train, Y_train), (X_test, Y_test)
Example #28
0
def bp(theta, ninput, nhidden, noutput, Lambda, X, y):
    '''反向传播, 求得theta的梯度, 这里有很多计算是和fp重复的, 原因在于迭代函数
    fmin_cg的参数格式要求, 重复的程度很高, 很影响效率
    '''
    theta1 = np.reshape(theta[0:nhidden*(ninput+1)], [nhidden, ninput + 1])
    theta2 = np.reshape(theta[nhidden*(ninput+1):],  [noutput, nhidden + 1])

    m = X.shape[0]

    a1 = np_extend(X, 1)
    z2 = np.dot(a1, theta1.T)
    a2 = np_extend(sigmoid(z2), 1)
    z3 = np.dot(a2, theta2.T)
    a3 = sigmoid(z3)

    yTmp = np.eye(noutput)
    yy = yTmp[y][:]

    delta3 = a3 - yy
    delta2 = np.dot(delta3, theta2[:, 1:]) * a2[:, 1:] * (1-a2[:, 1:])

    theta1_g = np_extend(Lambda / m * theta1[:, 1:])
    theta2_g = np_extend(Lambda / m * theta2[:, 1:])
    theta1_g += 1.0 / m * np.dot(delta2.T, a1)
    theta2_g += 1.0 / m * np.dot(delta3.T, a2)

    grad = np.empty(theta.shape)
    grad[0:nhidden*(ninput+1)] = np.reshape(theta1_g, nhidden * (ninput + 1))
    grad[nhidden*(ninput+1):] = np.reshape(theta2_g, noutput * (nhidden + 1))

    return grad
def load_data_wrapper():
    """Return a tuple containing ``(training_data, validation_data,
    test_data)``. Based on ``load_data``, but the format is more
    convenient for use in our implementation of neural networks.

    In particular, ``training_data`` is a list containing 50,000
    2-tuples ``(x, y)``.  ``x`` is a 784-dimensional numpy.ndarray
    containing the input image.  ``y`` is a 10-dimensional
    numpy.ndarray representing the unit vector corresponding to the
    correct digit for ``x``.

    ``validation_data`` and ``test_data`` are lists containing 10,000
    2-tuples ``(x, y)``.  In each case, ``x`` is a 784-dimensional
    numpy.ndarry containing the input image, and ``y`` is the
    corresponding classification, i.e., the digit values (integers)
    corresponding to ``x``.

    Obviously, this means we're using slightly different formats for
    the training data and the validation / test data.  These formats
    turn out to be the most convenient for use in our neural network
    code."""
    tr_d, va_d, te_d = load_data()
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return (training_data, validation_data, test_data)
Example #30
0
def ClusterObjects(farn, struct_elem):
    magn_img = farn.magnitude_image
    dir_img = farn.direction_image    
    
    bin_img = np.zeros(shape=(magn_img.shape[0], magn_img.shape[1]), dtype=np.uint8)
    bin_img[magn_img < 25] = 0
    bin_img[magn_img >= 25] = 1
    
    bin_img = ndimage.binary_dilation(bin_img, structure=struct_elem, iterations=3).astype(bin_img.dtype)

    labels, nb_labels = Morphology.ConnenctedComponents(bin_img)    
    filt_labels, areas, nb_new_labels = Morphology.FilterArea(bin_img, labels, nb_labels, 480)
    
    
    temp_magn = ndimage.mean(magn_img, filt_labels, range(nb_new_labels + 1))
    temp_dir = ndimage.mean(dir_img, filt_labels, range(nb_new_labels + 1))
    
    data = np.concatenate((np.reshape(temp_magn, (-1,1)), np.reshape(temp_dir, (-1,1))), axis=1)
    
    clusters = -1
    if nb_new_labels >= 1:
        Y = pdist(data, 'euclidean')
        agglo = AgglomerativeClustering.Agglomerative(Y, 50.)
        agglo.AggloClustering(criterion = 'distance', method = 'single', metric = 'euclidean', normalized = False)

        clusters = agglo.clusters
             
    bin_img[filt_labels == 0] = 0
    bin_img[filt_labels >= 1] = 1
    
    
    
    return bin_img, nb_new_labels, temp_magn, temp_dir, data, clusters
Example #31
0
def get_positions(in_file):
    with open(in_file) as f:
        positions = [list(map(float, line.strip().split())) for line in f]
    return np.reshape(np.array(positions), (-1, len(positions[0]) / 3, 3))
    def odv_ascii(self, cruise="", variables=[], variable_units=[],
                  station=[], latitude=[], longitude=[], depth=[], time=[],
                  data=[]):
        with contextlib.closing(StringIO()) as buf:
            buf.write("//<CreateTime>%s</CreateTime>\n" % (
                datetime.datetime.now().isoformat()
            ))
            buf.write("//<Software>Ocean Navigator</Software>\n")
            buf.write("\t".join([
                "Cruise",
                "Station",
                "Type",
                "yyyy-mm-ddThh:mm:ss.sss",
                "Longitude [degrees_east]",
                "Latitude [degrees_north]",
                "Depth [m]",
            ] + ["%s [%s]" % x for x in zip(variables, variable_units)]))
            buf.write("\n")

            if len(depth.shape) == 1:
                depth = np.reshape(depth, (depth.shape[0], 1))

            for idx in range(0, len(station)):
                for idx2 in range(0, depth.shape[1]):
                    if idx > 0 or idx2 > 0:
                        cruise = ""

                    if isinstance(data[idx], np.ma.MaskedArray):
                        if len(data.shape) == 3 and \
                           data[idx, :, idx2].mask.all():
                            continue
                        if len(data.shape) == 2 and \
                           np.ma.is_masked(data[idx, idx2]):
                            continue

                    line = [
                        cruise,
                        station[idx],
                        "C",
                        time[idx].isoformat(),
                        "%0.4f" % longitude[idx],
                        "%0.4f" % latitude[idx],
                        "%0.1f" % depth[idx, idx2],
                    ]
                    if len(data.shape) == 1:
                        line.append(str(data[idx]))
                    elif len(data.shape) == 2:
                        line.append(str(data[idx, idx2]))
                    else:
                        line.extend(list(map(str, data[idx, :, idx2])))

                    if idx > 0 and station[idx] == station[idx - 1] or \
                       idx2 > 0:
                        line[1] = ""
                        line[2] = ""
                        line[3] = ""
                        line[4] = ""
                        line[5] = ""

                    buf.write("\t".join(line))
                    buf.write("\n")

            return (buf.getvalue(), self.mime, self.filename)
Example #33
0
    for i in range(c):
        arg = com_L(points_p, points[:, i], field, us)
        ls.append(arg)
        #print(arg.shape)
        #if np.amax(arg) > 0:
        #lines.append([points_p[:, np.argwhere(arg == np.amax(arg))[0][0]], points[:, i]])
        #points_p = np.delete(points_p, arg, 1)
        #break
    ls = np.array(ls)
    print(ls)
    while np.amax(ls) > 0:
        line_psi = np.argwhere(ls == np.amax(ls))
        if line_psi.shape[1] != 1:
            lines.append(
                [points_p[:, line_psi[0, 1]], points[:, line_psi[0, 0]]])
            ls = np.delete(ls, line_psi)
        else:
            break
        if ls.shape[0] == 0:
            break
    #while 0 in points.shape or 0 in points_p.shape:
    return lines


if __name__ == "__main__":

    point_p = np.ones([8, 2])
    points = range(4)
    points = np.reshape(points, [2, 2]) + 1e-3
    field = np.ones([100, 50, 2])
    print(coms(point_p, points, field, 5))
Example #34
0
File: app7.py Project: ronesim/CN
def main_function(type, number_of_values, start, end, x_value):
    eps = 10**(-10)

    if type == 1:
        # define numeric function - coefficients
        my_function = [12, 0, 30, -12, 1]
        # my_function = [1, 0, 1]

        # generate random points and get the values for my function in that points
        points = generate_random_points(number_of_values - 1, start, end, eps)
        points = np.insert(points, 0, start)
        points = np.append(points, end)
        # points = [0 , 2, 4]
        values = get_numeric_function_values(points, my_function)

        aprox_value_interpolation = numerical_interpolation(
            points, values, x_value)
        real_value = get_numeric_function_values([x_value], my_function)
        print("Lagrange interpolation: ", aprox_value_interpolation)
        print("Real value: ", real_value[0])
        print("Difference: ", abs(aprox_value_interpolation - real_value[0]))

        points_to_plot = np.arange(0.0, 10.0, 1.0)
        values_to_plot = get_numeric_function_values(points_to_plot,
                                                     my_function)
        interpolation_points = [
            numerical_interpolation(points_to_plot, values_to_plot, x)
            for x in points_to_plot
        ]

        plt.plot(points_to_plot, values_to_plot, 'r', points_to_plot,
                 interpolation_points, 'b--')
        plt.axis([0, 10, -200, 100])
        plt.show()

    else:
        # trigonometric function
        PI = math.pi
        points = generate_random_points(number_of_values, 0, end, eps)
        points = np.insert(points, 0, start)
        points = np.append(points, end)

        values = get_trigonometric_function_values(type, points)
        T = compute_matrix_T(points)
        values = np.reshape(values, (len(points), 1))

        # solve system TX = Y
        x = np.linalg.solve(T, values)

        aprox_value_interpolation = trigonometric_interpolation(x, x_value)
        real_value = trig_function(type, x_value)
        print("Trigonometric Interpolation: ", aprox_value_interpolation)
        print("Real value: ", real_value)
        print("Difference: ", abs(aprox_value_interpolation - real_value))

        points_to_plot = np.arange(start, end, 0.1)
        values_to_plot = get_trigonometric_function_values(
            type, points_to_plot)
        interpolation_result = []
        for point in points_to_plot:
            interpolation_result.append(trig_function(type, point))
        plt.plot(points_to_plot, values_to_plot, 'r', points_to_plot,
                 interpolation_result, 'b--')
        plt.axis([-10, 10, -2, 2])
        plt.show()
# =============================================================================
# First file: full-lift-down
# =============================================================================

### Start with uncertainty bars on landline and spaceline
## Load file archive and get data
filename = './../results/sweeps/Neptune_27_3sigLow_0.25_180_0522005334.npz'
data = np.load(filename, allow_pickle=True)
# params = data['params'][0] # array of 1
outsList = data['outsList']
efpaList = data['efpaList']
BCList = data['BCList']

## Create mesh grid for contour plots, reshape result arrays
BCgrid, EFPAgrid = np.meshgrid(BCList, efpaList)
fpafgrid = np.reshape([out.fpaf for out in outsList], BCgrid.shape)
engfgrid = np.reshape([out.engf for out in outsList], BCgrid.shape)

# find line between landing and aerocapture
landline_BC_low = BCList
landline_EFPA_low = []
for rind in range(fpafgrid.shape[1]):
    ind = next(ind for ind, val in enumerate(fpafgrid[:,rind])\
               if val < 0)
    landline_EFPA_low.append(efpaList[ind])
    
# find line between aerocapture and escape
spaceline_BC_low = []
spaceline_EFPA_low = []
for rind in range(engfgrid.shape[1]):
    ind = [ind for ind, val in enumerate(engfgrid[:,rind]) if val < 0]
 def data2img(self, data):
     return np.reshape(data, [data.shape[0]] + self.shape)
Example #37
0
def BGR2HSV(bgr):
    bgr= np.reshape(bgr,(bgr.shape[0],1,3))
    hsv= cv2.cvtColor(np.uint8(bgr), cv2.COLOR_BGR2HSV)
    hsv= np.reshape(hsv,(hsv.shape[0],3))

    return hsv
Example #38
0
 def _unstack(array):
   if array.shape[0] == 1:
     arrays = [array]
   else:
     arrays = np.split(array, array.shape[0])
   return [np.reshape(a, a.shape[1:]) for a in arrays]
ap = argparse.ArgumentParser();

ap.add_argument('-i', '--image')
args = vars(ap.parse_args());


image = cv2.imread(args["image"])
# image_small = cv2.resize(image, (0,0), fx = 0.25, fy= 0.25)
image_standard = cv2.resize(image, (640, 480))
# print image.shape

# cv2.imshow("originalimage", image_small)
# cv2.imshow("originalimage", image_standard)
cv2.waitKey(0)

image_vec = np.reshape(image_standard[:,:,1], image_standard.shape[0] * image_standard.shape[1])
image_vec1 = np.ravel(image_standard[:,:,0])
print np.min(image_vec1)
print np.max(image_vec1)
# print np.reshape(image_standard[:,:,1], image_standard.shape[0] * image_standard.shape[1])[:,np.newaxis]
# exit()
# set color boundaries for red, green, blue, yellow, orange, light green, fluroscent green and violet.
boundaries = [
	([0, 0, 100], [60, 70, 255]),
	([0, 40, 0], [70, 220, 100]),
	([40, 0, 0], [220, 90, 110]),
	([0, 20, 70], [40, 220, 250]),
	([0, 60, 70], [30, 200, 210]),

]
    test_sentences.append(sen)
    test_classes.append(lang_class)

#Pad the sequences to have a fixed length of 100
train_data = pad_sequences(train_sentences, maxlen=100)
train_data = np.array(train_data)

test_data = pad_sequences(test_sentences, maxlen=100)
test_data = np.array(test_data)

#Convert the integer language classes to one-hot encoding
train_y = np_utils.to_categorical(train_classes, 10)
test_y = np_utils.to_categorical(test_classes, 10)

#Reshape the sequences to have shape (num_of_sequence, sequence_length, 1)
train_x = np.reshape(train_data, (train_data.shape[0], train_data.shape[1], 1))
test_x = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], 1))

print(train_x.shape)
print(test_x.shape)

print(train_y.shape)
print(test_y.shape)

#DEFINE THE MODEL
model = Sequential()
model.add(GRU(256, activation="relu", input_shape=train_x.shape[1:]))
model.add(Dense(10, activation="softmax"))

#Compile the model
model.compile(optimizer=Adam(0.01),
Example #41
0
def do_cmc_validation(engine,network,data):
  m = data.num_test_id
  n = m * m
  idx_placeholder = data.idx_placeholder
  batch_size = network.batch_size
  debug = network.tags
  path = data.test_case
  end_net = data.use_end_network
  rank_out = ""
  errors = ""
  measures = {}
  merge_type = engine.config.unicode("merge_type", "")

  out_layer_name = engine.config.unicode("output_embedding_layer","fc1")
  out_layer = network.tower_layers[0][out_layer_name]
  assert len(out_layer.outputs) == 1
  out_feature = out_layer.outputs[0]
  out_feature_size = out_layer.n_features

  test_cases = engine.config.unicode_list("test_cases", [])

  for test_case in test_cases:
    errs = 0
    y_vals = numpy.empty([0,1])
    probe = numpy.empty([0, out_feature_size])
    gallery = numpy.empty([0, out_feature_size])

    idx = 0
    while idx < m:
      start = time.time()
      idx_value = [idx, min(idx + batch_size, m),1,0]

      feature_val, msg = engine.session.run([out_feature, debug],
                                            feed_dict={idx_placeholder: idx_value, path: test_case, end_net: False})
      probe = numpy.concatenate((probe, feature_val), axis=0)

      end = time.time()
      elapsed = end - start
      print (min(idx + batch_size, m), '/', m, "elapsed", elapsed)
      idx += batch_size

    idx = 0
    while idx < m:
      start = time.time()
      idx_value = [idx, min(idx + batch_size, m), 1, 1]

      feature_val, msg = engine.session.run([out_feature, debug],
                                            feed_dict={idx_placeholder: idx_value, path: test_case, end_net: False})
      gallery = numpy.concatenate((gallery, feature_val), axis=0)

      end = time.time()
      elapsed = end - start
      print (min(idx + batch_size, m), '/', m, "elapsed", elapsed)
      idx += batch_size

    start = time.time()
    for pdx in range(m):
      idx = 0
      while idx < m:
        idx_value = [idx, min(idx + batch_size, m), pdx, 1]
        r = numpy.arange(idx_value[0], idx_value[1])
        q = (pdx,) * (min(idx + batch_size, m) - idx)

        if data.validation_mode == "similarity":

          y = network.y_softmax
          e = network.measures_accumulated
          in_layer_name = engine.config.unicode("input_embedding_layer", "siam_concat")
          in_layer = network.tower_layers[0][in_layer_name]
          assert len(in_layer.outputs) == 1
          in_feature = in_layer.outputs[0]

          if merge_type == "add":
            feature_val = probe[q, :] + gallery[r, :]
          elif merge_type == "subtract":
            feature_val = probe[q, :] - gallery[r, :]
          elif merge_type == "abs_subtract":
            feature_val = numpy.abs(probe[q, :] - gallery[r, :])
          else: # merge_type == "concat":
            feature_val = numpy.concatenate((probe[q, :], gallery[r, :]), axis=1)

          y_val, err = engine.session.run([y, e], feed_dict={idx_placeholder: idx_value, in_feature: feature_val, end_net: True,path: test_case})
          y_val = y_val[:,0:1]
          errs += err["errors"]

        else: # data.validation_mode == "embedding":
          y_val = numpy.linalg.norm(probe[q,:] - gallery[r,:],axis=1)
          y_val = numpy.reshape(y_val,[y_val.size,1])

        y_vals = numpy.concatenate((y_vals, y_val), axis=0)
        idx += batch_size

    y_vals1 = y_vals
    Apsum = 0
    ranks = numpy.zeros(m)
    for i in range(m):
      r = numpy.arange(m * i, m * (i + 1))
      I = numpy.identity(m)
      corr = I[:, i]
      tab = numpy.column_stack((y_vals1[r], corr))
      id = numpy.argsort(y_vals1[r], axis=0)
      tab = tab[id, :]
      pos = numpy.where(tab[:,0, 1])[0]
      ranks[i] = pos[0] + 1
      Ap = numpy.zeros(1)
      f = numpy.zeros(1)
      for j in range(pos.size):
        f += 1
        Ap += f / (pos[j] + 1)

      Apsum += Ap

    mAp = Apsum / m
    cmc = numpy.zeros(m)
    for i in range(m):
      cmc[i] = 100 / m * ranks[ranks <= i + 1].size

    rank1 = cmc[0]
    rank5 = cmc[4]
    error = errs / n

    errors += "%.3f " % mAp
    rank_out += "%.1f " % rank1 + "%.1f " % rank5

    measures = {}
    measures["ranks"] = rank_out

    end = time.time()
    elapsed = end - start
    print (test_case, "elapsed", elapsed)

  return errors, measures
Example #42
0
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        try:
            saver.restore(sess, model_add+product_name+".ckpt")
    #############################
    #test
    #############################
            actual_low = np.zeros(len(testing_set))
            actual_high = np.zeros(len(testing_set))
            predicted_low = np.zeros(len(testing_set))

            count = 0
            loss = np.zeros(len(testing_set))
            for record in testing_set:
                input_vector = np.reshape(record[:length_of_training_records-1], (length_of_training_records-1,-1,1))
                label_vector = record[length_of_training_records-1:]

                OPEN = np.reshape(label_vector[0][0], (1,1))
                label_vector= np.reshape(label_vector[0][1:], (1,3))

                l, prediction = sess.run([MSE,h_fc_4], feed_dict = {
                    ph_input_vector:input_vector,
                    ph_latest_OPEN:OPEN,
                    ph_label_vector:label_vector,
                    ph_type1:type_1, ph_type2:type_2, ph_type3:type_3,ph_type4:type_4,
                    ph_type5:type_5, ph_type6:type_6,ph_type7:type_7                  
                    })
                loss[count] = l

                actual_low[count] = (label_vector*norms[1:])[0][1]
    #print(image_paths)

        
    img_path = PATH_TO_IMAGES+'1'+num+'00.jpg' 
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    #print(num+'00')
    features = model.predict(x)/196/512
    height=features.shape[1]
    width=features.shape[2]
    filters=features.shape[3]
    G= np.zeros((filters,filters))
        
    style= np.transpose(np.reshape(features,(height*width,filters)))
    G= np.dot(style,np.transpose(style))
    mat=np.transpose(G[np.triu_indices(512)])
    GM=np.reshape(mat,(1,256*513))
    G2= transformer.transform(GM)
    
    labelled_data[i,0]='1'+num+'00'
    labelled_data[i,1:]=G2# A numpy ndarray of dim (1,4096) is saved to dictionary to each image by its name as key.
    i=i+1
    print(i)
np.save('holidays_labelled_style',labelled_data)
#print(labelled_data)

        #print(features.shape)
        #print(features.tolist())
        
autoencoder_model.compile(optimizer='adadelta', loss='binary_crossentropy')


# The second NN model is only a half of the first model, it take the input image and gives the encoded vector as output
encoder_model = Model(inputs=autoencoder_model.input,
                                 outputs=autoencoder_model.get_layer('feature_vector').output) # <---- take the output from the feature vector
# Compile the second model
encoder_model.compile(optimizer='adadelta', loss='binary_crossentropy')

# We need to scale the image from [0-255] to [0-1] for better performance of activation functions
x_train = x_train / 255.
x_test = x_test / 255.


# We train the NN in batches (groups of images), so we reshape the dataset
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))

print("Train dataset size is {0}".format(x_train.shape))
print("Test dataset size is {0}".format(x_test.shape))

# Step 2 - Train a neural network
#################################
# It takes several minutes to train this neural network, depending on the configuration of your cluster.
learning_history=autoencoder_model.fit(x=x_train, y=x_train, epochs=10, batch_size=128, 
                                 shuffle=True, validation_data=(x_test, x_test), verbose=1)

# Step 3 - Test the model
##########################
encoded_decoded_image=autoencoder_model.predict(x_test)
		print "creating internal cost"
		#Internal cost: cost of ppl going to work within own TAZ
#		replace drvcost = 21.46*[(2/3)*(area_dest/_pi)^0.5]/15
#					+3.752*[(2/3)*(area_dest/_pi)^0.5]/20 if oID_TAZ12A== dID_TAZ12A
#					replace with 1 if lower than 1		
		dist = (2./3) * (area/pi)**0.5
		intci = 11.5375 * dist / 15 + 0.469 * dist
		intcp = 11.5375 * dist / 3
		
		#make diagonal (Origin = destination)
		I = np.identity(nTAZ)
		intci = intci*I
		intcp = intcp*I
		
		print "reshaping..."
		ttcosti = np.reshape(ttd['cost'],(nTAZ, nTAZ)) + intci
		ttcostp = np.reshape(ttp['cost'],(nTAZ, nTAZ)) + intcp
		
		
		print "calculate Sij"
		#New transit share
		deltaC = ttcostp/ttcosti
		exx = np.exp(c1+c2*deltaC)
		sij = exx/(1+exx)
#		outSij = sij.reshape(nTAZ**2,1)
#		outCSVsij = inSpace+'CSV/Sij'+str(currentIter)+'.csv'
#		print "Writing transit share to", outCSVsij
#		
#		with open(outCSVsij, 'wb') as f:
#			np.savetxt(f, outSij, delimiter=',', fmt='%7.10f')
		sij1 = ne.evaluate("(exp(c1+c2*(ttcostp/ttcosti)))/(1+exp(c1+c2*(ttcostp/ttcosti)))")
Example #46
0
fs = {}
for dis_var in ["dis", "dis_p", "dis_con"]:
    fs[dis_var] = {}
    for method in ["tra", "sem"]:
        fs[dis_var][method] = {}
        for scenario in SCENARIOS:
            fs[dis_var][method][scenario] = {}
            for var in ["her", "sha", "non"]:
                fs[dis_var][method][scenario][var] = []
                for i in range(data[scenario][dis_var][method][var].shape[0]):
                    # Flatten and sort data.
                    newshape = data[scenario][dis_var][method][var].shape[1] \
                        * data[scenario][dis_var][method][var].shape[2]
                    fs[dis_var][method][scenario][var].append( \
                        numpy.sort(numpy.reshape( \
                        data[scenario][dis_var][method][var][i,:,:], \
                        newshape)))
                    fs[dis_var][method][scenario][var][-1] = \
                        fs[dis_var][method][scenario][var][-1][numpy.isnan( \
                        fs[dis_var][method][scenario][var][-1])==False]

# Open a file to store stats output.
with open(os.path.join(OUTDIR, "stats.tsv"), "w") as f:
    # Write header.
    f.write("\t".join(["var", "n", "m", "sd", "sem", "z", "z_p", "t", "t_p", \
        "d", "str"]))

    # Compute z scores and effect sizes.
    z = {}
    d = {}
    for dis_var in ["dis", "dis_con"]:
new_files = data['filename']
new_files = list(new_files)
for i in range(len(new_files)) :
    new_files[i] = (new_files[i].split("chunk") )[0]


data = data.drop(['filename'],axis=1)
data = np.asarray(data)

X = data[:,6:]
y = data[:,0]
y[y=="prog"] = 0
y[y=="non_prog"] = 1
y[y=="djent"] = 0

X = np.reshape(X, (X.shape[0],1, X.shape[1]))

print("X_train shape ",X.shape)
y_test = y
y_test = to_categorical(y_test)
 

model = load_model('Model.h5')

print("\nTesting ...")

# Predict using saved model

predictions = model.predict(X)
print(predictions)
row,col = predictions.shape
def update(inSpace, currentIter, inTTp):
	try:
		from math import exp, sqrt, pi
		from operator import itemgetter
		import numpy as np
		import sys
		
		#print "Code starts on ", time.strftime("%d/%m/%Y - %H:%M:%S")
		#Parameters
		c1 = 5.45
		c2 = -5.05
		G = exp(-3.289)
		beta1 = 0.535
		beta2 = 0.589
		tau = -2.077
		
	
		#import various matrices
		fdtype = [('oid','i8'),('did','i8'),('flow','f8')]
		tttype = [('oid','i8'),('did','i8'),('name','S20'),('cost','f8')]
		petype = [('oid','i8'),('emp','f8'),('pop','f8')]
		areatype = [('oid','i8'),('area','f8')]
		
		#import from inFlow
		#This is used as OUTPUT TEMPLATE
		outFTT = inSpace + "inFlow.csv"
		outFTT = readcsv(outFTT, fdtype, incol = 3, sort = [0,1], header = None)
		
		#TT Cost (pub)
		#Post/PRE as SEPARATE CSV FILE
		print "importing TTcost for Transit"
		ttp = readcsv(inTTp, tttype, incol = 4, sort = [0,1], header = True)
		
		print "importing TTcost for driving (current)"
		#TT Cost (driving, CURRENT)
		inTTd = inSpace+"CSV/TT.csv"
		#inTTd = inSpace+"TTdrv.csv"
		ttd = readcsv(inTTd, tttype, incol = 4, sort = [0,1], header = None)
		
		#print "check sorting"
		#make sure both TT costs are sorted correctly:
		#if any(ttp['oid'] != ttd['oid']) or any(ttp['did'] != ttd['did']):
		#if any(ttp['oid'] != ttd['oid']) or any(ttp['did'] != ttd['did']):
		#	raise Exception('Driving and Transit TT not match!')
		#print "sorting is fine"
		
		print "importing census"
		#Population & employment
		inPE = inSpace+"census.csv"		
		pe = readcsv(inPE, petype, incol = 3, sort = [0], header = True)
		
		print "importing TAZ area"
		inArea = inSpace+"TAZarea.csv"		
		area = readcsv(inArea, areatype, incol=2, sort=[0], header = True)
		area = area['area']
		
		print "testing squareness"
		#Test square of TTcost and TTcostpub, test same size of pop/emp
		ttsize = sqrt(np.size(ttd))
		if ttsize != int(ttsize):
			raise Exception('Driving TT cost not square!!!')
		ttsize = sqrt(np.size(ttp))
		if ttsize != int(ttsize):
			raise Exception('Transit TT cost not square!!!')
		pesize = np.size(pe)
		if pesize != int(ttsize):
			raise Exception('Population/Employment vector not same size as TAZ!!!')
		nTAZ = ttsize
		print "square is fine, import completed"
		
		
		print "creating internal cost"
		#Internal cost: cost of ppl going to work within own TAZ
#		replace drvcost = 21.46*[(2/3)*(area_dest/_pi)^0.5]/15
#					+3.752*[(2/3)*(area_dest/_pi)^0.5]/20 if oID_TAZ12A== dID_TAZ12A
#					replace with 1 if lower than 1		
		dist = (2./3) * (area/pi)**0.5
		intci = 11.5375 * dist / 15 + 0.469 * dist
		intcp = 11.5375 * dist / 3
		
		#make diagonal (Origin = destination)
		I = np.identity(nTAZ)
		intci = intci*I
		intcp = intcp*I
		
		print "reshaping..."
		ttcosti = np.reshape(ttd['cost'],(nTAZ, nTAZ)) + intci
		ttcostp = np.reshape(ttp['cost'],(nTAZ, nTAZ)) + intcp
		
		
		print "calculate Sij"
		#New transit share
		deltaC = ttcostp/ttcosti
		exx = np.exp(c1+c2*deltaC)
		sij = exx/(1+exx)
		outSij = sij.reshape(nTAZ**2,1)
		outCSVsij = inSpace+'CSV/Sij'+str(currentIter)+'.csv'
		print "Writing transit share to", outCSVsij
		
		with open(outCSVsij, 'wb') as f:
			np.savetxt(f, outSij, delimiter=',', fmt='%7.10f')


		
		print "population and employment"
		#Gravity prediction
		pop = pe['pop'] ** beta1
		pop = np.matrix(pop)
		emp = pe['emp'] ** beta2
		emp = np.matrix(emp)
		
		print "final matrix calculation"
		FTT = G * np.array(pop.T * emp) * (sij*ttcostp + (1-sij)*ttcosti)**tau
		pFTT= FTT * (1-sij)
		pFTT= pFTT.reshape(1,nTAZ**2)
		outFTT['flow'] = pFTT
		
		outCSV = inSpace+'CSV/TTflow'+str(currentIter)+'.csv'
		print "Writing output to", outCSV
		
		with open(outCSV, 'wb') as f:
			np.savetxt(f, outFTT, delimiter=',', fmt='%7.0f, %7.0f, %7.10f')


	except Exception as e:
		tb = sys.exc_info()[2]
		print "Error occurred in ModUpdateFlow %i" % tb.tb_lineno 
		print e
Example #49
0
def save_memmap_chunks(filename,
                       base_name='Yr',
                       resize_fact=(1, 1, 1),
                       remove_init=0,
                       idx_xy=None,
                       order='F',
                       xy_shifts=None,
                       is_3D=False,
                       add_to_movie=0,
                       border_to_0=0,
                       n_chunks=1):
    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number of frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))
        order: string
            whether to save the file in 'C' or 'F' order     
        xy_shifts: list 
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping    

        is_3D: boolean
            whether it is 3D data
    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """

    #TODO: can be done online
    print(filename)

    Yr = cm.load(filename, fr=1)

    T, dims = Yr.shape[0], Yr.shape[1:]
    step = np.int(old_div(T, n_chunks))
    bins = []

    for i in range(0, T, step):
        bins.append(i)
    bins.append(T)

    for j in range(0, len(bins) - 1):
        tmp = np.array(Yr[bins[j]:bins[j + 1], :, :])
        if xy_shifts is not None:
            tmp = tmp.apply_shifts(xy_shifts,
                                   interpolation='cubic',
                                   remove_blanks=False)

        if idx_xy is None:
            if remove_init > 0:
                tmp = np.array(tmp)[remove_init:]
        elif len(idx_xy) == 2:
            tmp = np.array(tmp)[remove_init:, idx_xy[0], idx_xy[1]]
        else:
            raise Exception('You need to set is_3D=True for 3D data)')
            tmp = np.array(tmp)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

        if border_to_0 > 0:
            min_mov = np.nanmin(tmp)
            tmp[:, :border_to_0, :] = min_mov
            tmp[:, :, :border_to_0] = min_mov
            tmp[:, :, -border_to_0:] = min_mov
            tmp[:, -border_to_0:, :] = min_mov

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:

            tmp = cm.movie(tmp, fr=1)
            tmp = Yr.resize(fx=fx, fy=fy, fz=fz)

        Tc, dimsc = tmp.shape[0], tmp.shape[1:]
        tmp = np.transpose(tmp, list(range(1, len(dimsc) + 1)) + [0])
        tmp = np.reshape(tmp, (np.prod(dimsc), Tc), order='F')

        if j == 0:
            fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(
                dims[1]) + '_d3_' + str(
                    1 if len(dims) == 2 else dims[2]) + '_order_' + str(order)
            fname_tot = os.path.join(os.path.split(filename)[0], fname_tot)
            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(np.prod(dims), T),
                                order=order)
        else:
            big_mov = np.memmap(fname_tot,
                                dtype=np.float32,
                                mode='r+',
                                shape=(np.prod(dims), T),
                                order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, bins[j]:bins[j + 1]] = np.asarray(
            tmp, dtype=np.float32) + 1e-10 + add_to_movie
        big_mov.flush()
        del big_mov

#    if ref+step+1<d:
#        print 'running on remaining pixels:' + str(ref+step-d)
#        pars.append([fname_tot,d,tot_frames,mmap_fnames,ref+step,d])

    fname_new = fname_tot + '_frames_' + str(T) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
        # their channels separated.
        images = np.transpose(images, (0, 3, 2, 1))
        return images.reshape((-1, 96 * 96 * 3))


if __name__ == "__main__":
    # test to check if the whole dataset is read correctly
    train_images = read_all_images(TRAIN_DATA_PATH)
    test_images = read_all_images(TEST_DATA_PATH)
    images = np.concatenate((train_images, test_images))

    train_labels = read_labels(TRAIN_LABEL_PATH)
    test_labels = read_labels(TEST_LABEL_PATH)
    labels = np.concatenate((train_labels, test_labels))

    plt.imshow(np.reshape(images[2, :], (96, 96, 3)))
    plt.show()

    concat = np.c_[labels, images]

    skf = StratifiedKFold(n_splits=5, shuffle=True)

    file_name = ['A.txt', 'B.txt', 'C.txt', 'D.txt', 'E.txt']
    file_index = 0

    for _, test_index in skf.split(images, labels):
        np.savetxt(file_name[file_index],
                   concat[test_index, :],
                   fmt='%d',
                   delimiter=',')
        file_index += 1
Example #51
0
    analyzer = innvestigate.create_analyzer(method[0], model_wo_softmax, **method[1])
    # Some analyzers require training.
  #   analyzer.fit(test_img, batch_size=30, verbose=1)
  #  analyzers.append(analyzer)

print("subject_ID Sum_activation_of_right_hippocampal_volume Sum_activation_of_left_hippocampal_volume Sum_activation_of_both_hippocampal_volume ")
#subj_idx = 9 # good visualizations for subjects idx 4 (AD), 5 (AD), 6 (LMCI), 8 (AD), 10 (LMCI), 27 (CN)
for indx in range(len(grps)):
    test_img = images[indx]
    #test_orig = images_orig[indx]
    #print('test image for subject of binary group: %d' % test_Y[subj_idx, 1]) # first col will indicate CN, second col indicates MCI/AD
    #print('test image for subject of ADNI diagnosis: %d [1-CN, 3-LMCI, 4-AD]' % testgrps.Group.to_numpy(dtype=np.int)[subj_idx])
    
    ####print('test subject ID %s' % grps.RID.to_numpy(dtype=np.int)[indx])

    test_img = np.reshape(test_img, (1,)+ test_img.shape) # add first subj index again to mimic original array structure
    #test_orig = np.reshape(test_orig, (1,)+ test_orig.shape) # add first subj index again to mimic original array structure

    #for method,analyzer in zip(methods, analyzers):
    a = np.reshape(analyzer.analyze(test_img,neuron_selection=1), test_img.shape[1:4])
    #"""
    np.clip(a,a_min=0,a_max=None, out=a)
    a = scipy.ndimage.filters.gaussian_filter(a, sigma=0.8) # smooth activity image
    scale = np.quantile(np.absolute(a), 0.99)
    if scale==0:
        scale = max(np.amax(a))   #scale = max(-np.amin(a), np.amax(a))
            #print(scale)
    a = (a/scale)
    #"""
    #a = (a - np.min(a)) / (np.max(a) - np.min(a)) 
    overlay_act_both = hippo_both * a
Example #52
0
    def work(self):
        global GLOBAL_EP, GLOBAL_COUNTER
        t = 0
        while not COORD.should_stop():
            s = self.env.reset()
            ep_r = 0
            buffer_s, buffer_a, buffer_r, buffer_v ,buffer_done = [], [], [], [], []
            done = False
            
            while not done:
                if not COLLECT_EVENT.is_set():                  
                    COLLECT_EVENT.wait()                        
                    buffer_s, buffer_a, buffer_r, buffer_v ,buffer_done = [], [], [], [], []
                a,v = self.ppo.choose_action(s)
                s_, r, done, _ = self.env.step(a)
                buffer_s.append(s)
                buffer_a.append(a)
                buffer_r.append(r)
                buffer_v.append(v)
                buffer_done.append(done)
                s = s_
                ep_r += r
                t+=1
                GLOBAL_COUNTER += 1
                # update ppo
                if (done or GLOBAL_COUNTER >= BATCH):
                    
                    t = 0
                    rewards = np.array(buffer_r)
                    v_final = [v * (1 - done)] 
                    terminals = np.array(buffer_done + [done])
                    values = np.array(buffer_v + v_final)
                    delta = rewards + GAMMA * values[1:] * (1 - terminals[1:]) - values[:-1]
                    advantage = discount(delta, GAMMA * LAMBDA, terminals)
                    returns = advantage + np.array(buffer_v)
                    advantage = (advantage - advantage.mean()) / np.maximum(advantage.std(), 1e-6)


                    bs, ba, br,badv = np.reshape(buffer_s, (-1,) + self.ppo.s_dim), np.vstack(buffer_a), \
                                    np.vstack(returns), np.vstack(advantage)
                    buffer_s, buffer_a, buffer_r = [], [], []
                    buffer_v, buffer_done = [], []
                    COLLECT_EVENT.wait()
                    self.lock.acquire()
                    for i in range(len(bs)):
                        GLOBAL_DATA["state"].append(bs[i])
                        GLOBAL_DATA["reward"].append(br[i])
                        GLOBAL_DATA["action"].append(ba[i])
                        GLOBAL_DATA["advantage"].append(badv[i])
                    self.lock.release()
                    if GLOBAL_COUNTER >= BATCH and len(GLOBAL_DATA["state"])>= BATCH:
                        COLLECT_EVENT.clear()
                        UPDATE_EVENT.set() 
                    # self.ppo.update(bs, ba, br,badv)

                if GLOBAL_EP >= EP_MAX:
                    self.env.close()
                    COORD.request_stop()
                    break
            print("episode = {}, ep_r = {}, wid = {}".format(GLOBAL_EP,ep_r,self.wid))
            GLOBAL_EP += 1
            if GLOBAL_EP != 0 and GLOBAL_EP % 500 == 0:
                self.ppo.save_model(steps=GLOBAL_EP)
T[-1,-1] = .5


fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

# Make data
n = 21
u = np.linspace(0, 2*np.pi,n)
v = np.linspace(0, np.pi,n)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))

# put coords in a matrix to be transformed
M = np.concatenate( (np.reshape(x,(n*n,1)),np.reshape(y,(n*n,1)),np.reshape(z,(n*n,1))) ,axis=1)

# apply the transformation matrix to the coordinates
TM = [email protected]

# get out the new coordinates
xp = np.reshape(TM[0,:],(n,n))
yp = np.reshape(TM[1,:],(n,n))
zp = np.reshape(TM[2,:],(n,n))

# Plot the surface
ax.plot_surface(xp,yp,zp, color='b')
#ax.axis('square')
ax.set_xlim3d(-1,1)
ax.set_ylim3d(-1,1)
ax.set_zlim3d(-1,1)
Example #54
0
def save_memmap(filenames,
                base_name='Yr',
                resize_fact=(1, 1, 1),
                remove_init=0,
                idx_xy=None,
                order='F',
                xy_shifts=None,
                is_3D=False,
                add_to_movie=0,
                border_to_0=0,
                save_dir=None):
    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number of frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))
        order: string
            whether to save the file in 'C' or 'F' order     
        xy_shifts: list 
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping    

        is_3D: boolean
            whether it is 3D data
    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """

    #TODO: can be done online
    Ttot = 0
    for idx, f in enumerate(filenames):
        print(f)

        if is_3D:
            import tifffile
            #            print("Using tifffile library instead of skimage because of  3D")

            if idx_xy is None:
                Yr = tifffile.imread(f)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1],
                                        idx_xy[2]]


#        elif :
#
#            if xy_shifts is not None:
#                raise Exception('Calblitz not installed, you cannot motion correct')
#
#            if idx_xy is None:
#                Yr = imread(f)[remove_init:]
#            elif len(idx_xy) == 2:
#                Yr = imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
#            else:
#                raise Exception('You need to set is_3D=True for 3D data)')

        else:

            Yr = cm.load(f, fr=1, in_memory=True)
            if xy_shifts is not None:
                Yr = Yr.apply_shifts(xy_shifts,
                                     interpolation='cubic',
                                     remove_blanks=False)

            if idx_xy is None:
                if remove_init > 0:
                    Yr = np.array(Yr)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                raise Exception('You need to set is_3D=True for 3D data)')
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1],
                                  idx_xy[2]]

        if border_to_0 > 0:

            min_mov = Yr.calc_min()
            Yr[:, :border_to_0, :] = min_mov
            Yr[:, :, :border_to_0] = min_mov
            Yr[:, :, -border_to_0:] = min_mov
            Yr[:, -border_to_0:, :] = min_mov

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:

            if 'movie' not in str(type(Yr)):
                Yr = cm.movie(Yr, fr=1)

            Yr = Yr.resize(fx=fx, fy=fy, fz=fz)

        T, dims = Yr.shape[0], Yr.shape[1:]
        Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
        Yr = np.reshape(Yr, (np.prod(dims), T), order='F')

        if idx == 0:
            fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(
                dims[1]) + '_d3_' + str(
                    1 if len(dims) == 2 else dims[2]) + '_order_' + str(order)

            if save_dir is None:
                fname_tot = os.path.join(os.path.split(f)[0], fname_tot)
            else:
                fname_tot = os.path.join(save_dir, fname_tot)

            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(np.prod(dims), T),
                                order=order)
        else:
            big_mov = np.memmap(fname_tot,
                                dtype=np.float32,
                                mode='r+',
                                shape=(np.prod(dims), Ttot + T),
                                order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, Ttot:Ttot +
                T] = np.asarray(Yr, dtype=np.float32) + 1e-10 + add_to_movie
        big_mov.flush()
        del big_mov
        Ttot = Ttot + T

    fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
Example #55
0
def main(argv):

    parser = argparse.ArgumentParser()

    parser.add_argument("input_folder", help="folder to load")

    args = parser.parse_args()

    # Load all labels in a folder
    os.chdir(args.input_folder)
    print(len(glob.glob('*.txt')))

    area = np.array(1)

    min_width = 10000000000
    min_height = 10000000000

    for file_ptr in glob.glob("*.txt"):

        data = np.genfromtxt(args.input_folder + file_ptr,
                             delimiter=' ',
                             dtype=str)

        if (np.shape(data)) == (0, ):
            pass  # empty

        elif len(np.shape(data)) == 1:
            width = float(data[6]) - float(data[4])
            height = float(data[7]) - float(data[5])
            area = np.concatenate((np.reshape(
                area, (-1, 1)), np.reshape(np.array(width * height), (-1, 1))),
                                  axis=0)

            if 0 < float(data[6]) - float(data[4]) < min_width:
                min_width = float(data[6]) - float(data[4])
            if 0 < float(data[7]) - float(data[5]) < min_height:
                min_height = float(data[7]) - float(data[5])

        else:
            for row_ptr in range(0, np.shape(data)[0]):
                width = float(data[row_ptr, 6]) - float(data[row_ptr, 4])
                height = float(data[row_ptr, 7]) - float(data[row_ptr, 5])

                area = np.concatenate((np.reshape(
                    area,
                    (-1, 1)), np.reshape(np.array(width * height), (-1, 1))),
                                      axis=0)

                if 0 < float(data[row_ptr, 6]) - float(data[row_ptr,
                                                            4]) < min_width:
                    min_width = float(data[row_ptr, 6]) - float(data[row_ptr,
                                                                     4])
                if 0 < float(data[row_ptr, 7]) - float(data[row_ptr,
                                                            5]) < min_height:
                    min_height = float(data[row_ptr, 7]) - float(data[row_ptr,
                                                                      5])

    print('min height and width', min_height, min_width)

    max_width = min_width
    max_height = min_height

    for file_ptr in glob.glob("*.txt"):

        data = np.genfromtxt(args.input_folder + file_ptr,
                             delimiter=' ',
                             dtype=str)

        if (np.shape(data)) == (0, ):
            pass  # empty

        elif len(np.shape(data)) == 1:
            if float(data[6]) - float(data[4]) > min_width:
                max_width = float(data[6]) - float(data[4])
            if float(data[7]) - float(data[5]) > min_height:
                max_height = float(data[7]) - float(data[5])

        else:
            for row_ptr in range(0, np.shape(data)[0]):
                if float(data[row_ptr, 6]) - float(data[row_ptr,
                                                        4]) > min_width:
                    max_width = float(data[row_ptr, 6]) - float(data[row_ptr,
                                                                     4])
                if float(data[row_ptr, 7]) - float(data[row_ptr,
                                                        5]) > min_height:
                    max_height = float(data[row_ptr, 7]) - float(data[row_ptr,
                                                                      5])

    print('max height and width', max_height, max_width)

    hfont = {'fontname': 'FreeSerif'}

    fig, ax = plt.subplots(figsize=(4, 3))

    plt.hist(area, normed=True)
    plt.xlabel(
        'area of BB',
        fontsize=14,
    )
    plt.xlabel('xlabel', **hfont)
    plt.ylabel('ylabel', **hfont)
    plt.ylabel('relative frequency', fontsize=14)
    plt.tight_layout()
    start, end = ax.get_xlim()
    ax.xaxis.set_ticks(np.arange(start, end, end / 4.0))
    start, end = ax.get_ylim()
    ax.yaxis.set_ticks(np.arange(start, end, end / 4.0))
    ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4f'))

    fig.savefig('./test.eps', format='eps', dpi=400, bbox_inches='tight')
    plt.show()
Example #56
0
import streamlit as st
import os
import matplotlib.pyplot as plt

URL = 'http://127.0.0.1:5000'

st.title('Neural Network Visualizer')
st.sidebar.markdown('# Input Image')

if st.button('Get random predictions'):
    response = requests.post(URL, data={})
    # print(response.text)
    response = json.loads(response.text)
    preds = response.get('prediction')
    image = response.get('image')
    image = np.reshape(image, (28, 28))

    st.sidebar.image(image, width=150)

    for layer, p in enumerate(preds):
        numbers = np.squeeze(np.array(p))

        plt.figure(figsize=(32, 4))

        if layer == 2:
            row = 1
            col = 10
        else:
            row = 2
            col = 16
Example #57
0
def train(sess, env, args, actor, critic, actor_noise):

    # Set up summary Ops
    summary_ops, summary_vars = build_summaries()

    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)

    # Initialize target network weights
    actor.update_target_network()
    critic.update_target_network()

    # Initialize replay memory
    replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))

    # Needed to enable BatchNorm. 
    # This hurts the performance on Pendulum but could be useful
    # in other environments.
    # tflearn.is_training(True)

    for i in range(int(args['max_episodes'])):

        s = env.reset()

        ep_reward = 0
        ep_ave_max_q = 0

        for j in range(int(args['max_episode_len'])):

            if args['render_env']:
                env.render()

            # Added exploration noise
            #a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
            a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()

            s2, r, terminal, info = env.step(a[0])

            replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
                              terminal, np.reshape(s2, (actor.s_dim,)))

            # Keep adding experience to the memory until
            # there are at least minibatch size samples
            if replay_buffer.size() > int(args['minibatch_size']):
                s_batch, a_batch, r_batch, t_batch, s2_batch = \
                    replay_buffer.sample_batch(int(args['minibatch_size']))

                # Calculate targets
                target_q = critic.predict_target(
                    s2_batch, actor.predict_target(s2_batch))

                y_i = []
                for k in range(int(args['minibatch_size'])):
                    if t_batch[k]:
                        y_i.append(r_batch[k])
                    else:
                        y_i.append(r_batch[k] + critic.gamma * target_q[k])

                # Update the critic given the targets
                predicted_q_value, _ = critic.train(
                    s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))

                ep_ave_max_q += np.amax(predicted_q_value)

                # Update the actor policy using the sampled gradient
                a_outs = actor.predict(s_batch)
                grads = critic.action_gradients(s_batch, a_outs)
                actor.train(s_batch, grads[0])

                # Update target networks
                actor.update_target_network()
                critic.update_target_network()

            s = s2
            ep_reward += r

            if terminal:

                summary_str = sess.run(summary_ops, feed_dict={
                    summary_vars[0]: ep_reward,
                    summary_vars[1]: ep_ave_max_q / float(j)
                })

                writer.add_summary(summary_str, i)
                writer.flush()

                print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
                        i, (ep_ave_max_q / float(j))))
                break
Example #58
0
n_vocab = len(chars)
print "Total Characters: ", n_chars
print "Total Vocab: ", n_vocab
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
    seq_in = raw_text[i:i + seq_length]
    seq_out = raw_text[i + seq_length]
    dataX.append([char_to_int[char] for char in seq_in])
    dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print "Total Patterns: ", n_patterns
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(
    LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# define the checkpoint
filepath = "weights-improvement-wonderland-multilayer-{epoch:02d}-{loss:.4f}-bigger.hdf5"
Example #59
0
def InverseCompositionAffine(It, It1):
	# Input: 
	#	It: template image
	#	It1: Current image

	# Output:
	#	M: the Affine warp matrix [2x3 numpy array]

    # put your implementation here
    M = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
    
    del_p_norm = 10
    
    temp = M +0
    
    e= 0.1
      
    It1 = It1[:-30,40:-40]
    
    It = It[:-30,40:-40]

    template_flatten = np.reshape(It , (It.shape[0]*It.shape[1],1))
    
    dx,dy = np.gradient(It)
    
    dx = dx.flatten()
    
    dy = dy.flatten()
    
    h=It.shape[0]
        
    w=It.shape[1]
        
    pad_M = np.array([0,0,1])
#        
    M_1 =np.vstack((temp,pad_M))

        
    gradients = np.array([dx,dy]).T
        
    print("grad", gradients.shape)
    
    Jacobian = np.zeros((w*h,2,6))
        
    x1 = np.arange(h)
       
    y1 = np.arange(w)
                
    xx1,yy1 = np.meshgrid(x1,y1)
        
    x_cod_it = xx1.flatten('F')
    
    y_cod_it= yy1.flatten('F')


    Jacobian[:,0,0] = x_cod_it
        
    Jacobian[:,0,1] = y_cod_it
        
    Jacobian[:,0,2] = 1
        
    Jacobian[:,1,3] = x_cod_it
        
    Jacobian[:,1,4] = y_cod_it
        
    Jacobian[:,1,5] = 1
     
    A=np.zeros((Jacobian.shape[0],6))
    

        
    for j in range(Jacobian.shape[0]):
            
        A[j] =  np.matmul(gradients[j,:],Jacobian[j,:,:])
         
    A_1 = np.matmul(np.linalg.inv(np.matmul(A.T,A)), A.T)
        
     
        
        
    #print("HI")
    i = 0
    while(del_p_norm > e):
        


        mask = np.ones((It1.shape[0], It1.shape[1]))
        
        pad_M = np.array([0,0,1])
        
        M_1 =np.vstack((temp,pad_M))

        template_flatten = affine_transform(It, np.linalg.inv(M_1))
        
        template_flatten = template_flatten.flatten()

        mask_warped=affine_transform(mask ,np.linalg.inv(M_1))

        warped_source = np.multiply(mask_warped, It1).reshape(w*h,1)

        template_flatten = np.reshape(template_flatten , (template_flatten.shape[0],1))

        
        b =  warped_source - template_flatten
        
        del_p,res,rank,s=np.linalg.lstsq(A,b ,rcond=None)
        
        temp[0][0]+= del_p[0]
        
        temp[0][1]+=del_p[1]
        
        temp[0][2]+=del_p[2]
        
        temp[1][0]+= del_p[3]
        
        temp[1][1]+=del_p[4]
        
        temp[1][2]+=del_p[5]
        
#
#        del_p = np.matmul(A_1, b)
#
#
#        
#        del_p[0]+= 1 + del_p[0]
#        
#        del_p[4]+=1 + del_p[4]
#        
#        del_p = np.reshape(del_p , (2,3))
#        
#        del_p= np.vstack((del_p ,np.array([0,0,1])))
#
#        temp = np.matmul(temp, np.linalg.inv(del_p))

        
        del_p_norm = np.linalg.norm(del_p) **2

        i += 1
    
    
    return temp
#importing the dataset
from sklearn.datasets import load_digits
digits = load_digits()
# Print to show there are 1797 images (8 by 8 images for a dimensionality of 64)
print("Image Data Shape" , digits.data.shape)
import numpy as np 
import matplotlib.pyplot as plt

plt.figure(figsize=(20,4))
for index, (image, label) in enumerate(zip(digits.data[0:5], digits.target[0:5])):
    plt.subplot(1, 5, index + 1)
    plt.imshow(np.reshape(image, (8,8)), cmap=plt.cm.gray)
    plt.title('Training: %i\n' % label, fontsize = 20)

from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=0)
from sklearn.linear_model import LogisticRegression
logisticRegr = LogisticRegression()
logisticRegr.fit(x_train, y_train)
predictions = logisticRegr.predict(x_test)
score = logisticRegr.score(x_test, y_test)
print(score)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predictions)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Pastel1');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)