Example #1
0
 def progress_all(self):
     """
     Competence progress of the overall tree.
     
     """
     return self.progress_idxs(range(np.shape(self.get_data_x())[0] - self.progress_win_size, 
                                     np.shape(self.get_data_x())[0]))
Example #2
0
def sum_to_shape(X, s):
    """
    Sum axes of the array such that the resulting shape is as given.

    Thus, the shape of the result will be s or an error is raised.
    """
    # First, sum and remove axes that are not in s
    if np.ndim(X) > len(s):
        axes = tuple(range(-np.ndim(X), -len(s)))
    else:
        axes = ()
    Y = np.sum(X, axis=axes)

    # Second, sum axes that are 1 in s but keep the axes
    axes = ()
    for i in range(-np.ndim(Y), 0):
        if s[i] == 1:
            if np.shape(Y)[i] > 1:
                axes = axes + (i,)
        else:
            if np.shape(Y)[i] != s[i]:
                raise ValueError("Shape %s can't be summed to shape %s" %
                                 (np.shape(X), s))
    Y = np.sum(Y, axis=axes, keepdims=True)
    
    return Y
 def __init__(self, data, classes, tree_features, n_trees=100):
     self.n_features = np.shape(data)[1]
     n_rows = np.shape(data)[0]
     n_nans = np.sum(np.isnan(data), 0)
     data = data[:, n_nans < n_rows]
     self.n_features = np.shape(data)[1]
     
     n_nans = np.sum(np.isnan(data), 1)
     data = data[n_nans < self.n_features, :]
     self.n_rows = np.shape(data)[0]
     
     if (tree_features > self.n_features):
         tree_features = self.n_features
     
     self.col_list = np.zeros((n_trees, tree_features), dtype='int')
     self.n_trees = n_trees
     self.bags = []
     for i in range(n_trees):
         cols = sample(range(self.n_features), tree_features)
         cols.sort()
         self.col_list[i, :] = cols
         data_temp = data[:, cols]
         n_nans = np.sum(np.isnan(data_temp), 1)
         data_temp = data_temp[n_nans == 0, :]
         classes_temp = classes[n_nans == 0]
         #bag = BaggingClassifier(n_estimators=1, max_features=tree_features)
         bag = RandomForestClassifier(n_estimators=1, max_features=tree_features)
         bag.fit(data_temp, classes_temp)
         self.bags.append(bag)
         print(np.shape(data_temp))
	def train( self, eta = 0.25, iterations = 1000, outtype = "logistic" ):
		""" Train the network. Used instead of method early_stopping().

		eta        -- Learning rate.
		iterations -- Number of iterations to do.
		outtype    -- Activation function to use: "linear", "logistic"
		"""

		self.eta = eta
		self.outtype = outtype
		shuffle = range( self.data_amount )

		# Init arrays for weight updates
		self.update_w1 = ny.zeros( ( ny.shape( self.weights_layer1 ) ) )
		self.update_w2 = ny.zeros( ( ny.shape( self.weights_layer2 ) ) )

		# Start training
		for n in range( iterations ):
			self.outputs = self._forward( self.inputs )

			# Compute error and update weights
			deltao, deltah = self._compute_errors()
			self._update_weights( deltao, deltah )

			# Randomise order of training data
			ny.random.shuffle( shuffle )
			self.inputs = self.inputs[shuffle,:]
			self.targets = self.targets[shuffle,:]
Example #5
0
def computeNumericalGradient(J,theta):
    # numgrad = computeNumericalGradient(J, theta)
    # theta: a vector of parameters
    # J: a function that outputs r.
    # Calling y = J(theta) will return the function value at theta. 
      
    # Initialize numgrad with zeros
    numgrad = np.zeros(np.shape(theta))

    ## ---------- YOUR CODE HERE --------------------------------------
    # Instructions: 
    # Implement numerical gradient checking, and return the result in numgrad.  
    # (See Section 2.3 of the lecture notes.)
    # You should write code so that numgrad(i) is (the numerical approximation to) the 
    # partial derivative of J with respect to the i-th input argument, evaluated at theta.  
    # I.e., numgrad(i) should be the (approximately) the partial derivative of J with 
    # respect to theta(i).
    #               
    # Hint: You will probably want to compute the elements of numgrad one at a time.
    for i in range(0,numgrad.shape[0]):
        k = np.zeros(np.shape(theta))
        k[i] = 0.0001
        y1 = J(theta+k)
        y2 = J(theta - k)
        numgrad[i] = (y1-y2)/0.0002

    ## ---------------------------------------------------------------
    return numgrad
Example #6
0
    def test_2d_array_parameters_2d_array_input(self):
        """
        When given an array input it must be broadcastable with all the
        parameters.
        """

        t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]],
                          [[1000, 2000], [3000, 4000]])

        y1, z1 = t([[100, 200], [300, 400]])
        assert np.shape(y1) == np.shape(z1) == (2, 2)
        assert np.all(y1 == [[111, 222], [333, 444]])
        assert np.all(z1 == [[1111, 2222], [3333, 4444]])

        y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
        assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2)
        assert np.all(y2 == [[[[111, 122], [133, 144]],
                              [[211, 222], [233, 244]]],
                             [[[311, 322], [333, 344]],
                              [[411, 422], [433, 444]]]])
        assert np.all(z2 == [[[[1111, 2122], [3133, 4144]],
                              [[1211, 2222], [3233, 4244]]],
                             [[[1311, 2322], [3333, 4344]],
                              [[1411, 2422], [3433, 4444]]]])

        with pytest.raises(ValueError):
            # Doesn't broadcast
            y3, z3 = t([[100, 200, 300], [400, 500, 600]])
Example #7
0
def checkJointsNonLinearised(joints):
    """Check if joints are in the [[x,y],[x,y],...] or [[x,y,z],[x,y,z],...] format"""
    try:
        check = np.shape(joints)[0] > np.shape(joints)[1]
        return check
    except:
        return False
Example #8
0
    def test_scalar_parameters_1d_array_input(self):
        """
        The dimension of the input should match the number of models unless
        model_set_axis=False is given, in which case the input is copied across
        all models.
        """

        t = TModel_1_1([1, 2], [10, 20], n_models=2)

        with pytest.raises(ValueError):
            y = t(np.arange(5) * 100)

        y1 = t([100, 200])
        assert np.shape(y1) == (2,)
        assert np.all(y1 == [111, 222])

        y2 = t([100, 200], model_set_axis=False)
        # In this case the value [100, 200, 300] should be evaluated on each
        # model rather than evaluating the first model with 100 and the second
        # model  with 200
        assert np.shape(y2) == (2, 2)
        assert np.all(y2 == [[111, 211], [122, 222]])

        y3 = t([100, 200, 300], model_set_axis=False)
        assert np.shape(y3) == (2, 3)
        assert np.all(y3 == [[111, 211, 311], [122, 222, 322]])
Example #9
0
    def test_1d_array_parameters_1d_array_input(self):
        """
        When the input is an array, if model_set_axis=False then it must
        broadcast with the shapes of the parameters (excluding the
        model_set_axis).

        Otherwise all dimensions must be broadcastable.
        """

        t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
                          [[10, 20, 30], [40, 50, 60]], n_models=2)

        with pytest.raises(ValueError):
            y1 = t([100, 200, 300])

        y1 = t([100, 200])
        assert np.shape(y1) == (2, 3)
        assert np.all(y1 == [[111, 122, 133], [244, 255, 266]])

        with pytest.raises(ValueError):
            # Doesn't broadcast with the shape of the parameters, (3,)
            y2 = t([100, 200], model_set_axis=False)

        y2 = t([100, 200, 300], model_set_axis=False)
        assert np.shape(y2) == (2, 3)
        assert np.all(y2 == [[111, 222, 333],
                             [144, 255, 366]])
Example #10
0
def mlr_show4( clf, RMv, yEv, disp = True, graph = True):
	yEv_calc = clf.predict( RMv)

	if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
		yEv_calc = np.mat( yEv_calc).T

	r_sqr, RMSE, MAE, DAE = estimate_accuracy4( yEv, yEv_calc, disp = disp)

	if graph:
		plt.figure()
		ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
		plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
		ax = plt.gca()
		lims = [
			np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
			np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
		]
		# now plot both limits against eachother
		#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
		ax.plot(lims, lims, '-', color = 'pink')
		plt.xlabel('Experiment')
		plt.ylabel('Prediction')
		#plt.title( '$r^2$={0:.2e}, RMSE={1:.2e}, AAE={2:.2e}'.format( r_sqr, RMSE, aae))
		plt.title( '$r^2$={0:.1e},$\sigma$={1:.1e},MAE={2:.1e},DAE={3:.1e}'.format( r_sqr, RMSE, MAE, DAE))
		plt.show()

	return r_sqr, RMSE, MAE, DAE
Example #11
0
    def test_mixed_array_parameters_1d_array_input(self):
        """
        When given an array input it must be broadcastable with all the
        parameters.
        """

        t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
                           [[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
                          [1, 2, 3])

        y1 = t([10, 20, 30])
        assert np.shape(y1) == (2, 2, 3)
        assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
                             [[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])

        y2 = t([[[[10]]], [[[20]]], [[[30]]]])
        assert np.shape(y2) == (3, 2, 2, 3)
        assert_allclose(y2, [[[[11.01, 12.02, 13.03],
                               [11.04, 12.05, 13.06]],
                              [[11.07, 12.08, 13.09],
                               [11.10, 12.11, 13.12]]],
                             [[[21.01, 22.02, 23.03],
                               [21.04, 22.05, 23.06]],
                              [[21.07, 22.08, 23.09],
                               [21.10, 22.11, 23.12]]],
                             [[[31.01, 32.02, 33.03],
                               [31.04, 32.05, 33.06]],
                              [[31.07, 32.08, 33.09],
                               [31.10, 32.11, 33.12]]]])
Example #12
0
    def mm_to_galvo_approx(x, y=None):
        """ Given one or many points in mm space, map them to galvo space.
            e.g.,
            >>> Printer.mm_to_galvo(0, 0) # -> galvo ticks for middle of build area.
            >>> Printer.mm_to_galvo([[0, 1, 2], [0, 0, 0]]) # -> A three-segment line along the x axis.
        """
        xy = x
        if y is not None:
            if np.shape(x) != np.shape(y):
                raise TypeError('x and y shapes must match. Got x.shape: {}, y.shape: {}'.format(np.shape(x), np.shape(y)))
            xy = np.array([x, y]) # Allows calling with just an x and a y.
        # These polynomials are a fit to all Form 1/1+s.
        Px = np.array([  3.27685507e+04,   4.80948842e+02,  -1.22079970e-01,
                         -2.88953161e-03,   6.08478254e-01,  -8.81889894e-02,
                         -2.20922460e-05,   4.41734858e-07,   6.76006698e-03,
                         -1.02093319e-05,  -1.43020804e-06,   2.03140758e-08,
                         -6.71090318e-06,  -4.36026159e-07,   2.62988209e-08,
                         8.32187652e-11])
        Py = np.array([  3.27661362e+04,   5.69452975e-01,  -2.39793282e-03,
                         9.83778919e-06,   4.79035581e+02,  -8.13031539e-02,
                         -2.66499770e-03,  -4.40219799e-07,  -1.06247442e-01,
                         5.18419181e-05,   1.47754740e-06,  -1.60049118e-09,
                         -2.44473912e-03,  -1.31398011e-06,   1.83452740e-08,
                         3.16943985e-10])

        xy = np.asarray(xy, dtype=float)
        if xy.shape[0] != 2:
            raise TypeError('xy must be a two-vector or 2xn or 2xmxn... not shape {}.'.format(xy.shape))
        shp = xy.shape[1:] # polyval2d wants vector inputs, not multidimensional.
        return np.array([polyval2d(P, *xy.reshape(2,-1)).reshape(shp) for P in (Px, Py)])
Example #13
0
def cv_show( yEv, yEv_calc, disp = True, graph = True, grid_std = None):

	# if the output is a vector and the original is a metrix, 
	# the output is translated to a matrix. 
	if len( np.shape(yEv_calc)) == 1:	
		yEv_calc = np.mat( yEv_calc).T
	if len( np.shape(yEv)) == 1:
		yEv = np.mat( yEv).T

	r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
	if graph:
		#plt.scatter( yEv.tolist(), yEv_calc.tolist())	
		plt.figure()	
		ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
		plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms 
		ax = plt.gca()
		lims = [
			np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
			np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
		]
		# now plot both limits against eachother
		#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
		ax.plot(lims, lims, '-', color = 'pink')
		plt.xlabel('Experiment')
		plt.ylabel('Prediction')
		if grid_std:
			plt.title( '($r^2$, std) = ({0:.2e}, {1:.2e}), RMSE = {2:.2e}'.format( r_sqr, grid_std, RMSE))
		else:
			plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
		plt.show()
	return r_sqr, RMSE
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1, 4)):
    tolS = ops[0]
    tolN = ops[1]
    if len(set(dataSet[:, -1].T.tolist()[0])) == 1:
        return None, leafType(dataSet)
    m, n = np.shape(dataSet)
    S = errType(dataSet)
    bestS = np.inf
    bestIndex = 0
    bestValue = 0
    for featIndex in xrange(n - 1):
        for splitVal in set(dataSet[:, featIndex]):
            mat0, mat1 = binSplitData(dataSet, featIndex, splitVal)
            if np.shape(mat0)[0] < tolN or np.shape(mat1)[0] < tolN:
                continue
            newS = errType(mat0) + errType(mat1)
            if newS < bestS:
                bestIndex = featIndex
                bestValue = splitVal
                bestS = newS
    if S - bestS < tolS:
        return None, leafType(dataSet)
    mat0, mat1 = binSplitData(dataSet, bestIndex, bestValue)
    if np.shape(mat0)[0] < tolN or np.shape(mat1)[0] < tolN:
        return None, leafType(dataSet)
    return bestIndex, bestValue
Example #15
0
    def mm_to_galvo(self, x, y):
        """ Given one or many points in mm space, map them to galvo space.
            e.g.,
            >>> Printer.mm_to_galvo(0, 0) # -> galvo ticks for middle of build area.
            >>> Printer.mm_to_galvo([[0, 1, 2], [0, 0, 0]]) # -> A three-segment line along the x axis.
            The returned array is 2xN, where N is the number of source points
        """
        xshape = np.shape(x)
        if self._grid_table is None:
            grid = np.array(self.read_grid_table())
            assert grid.shape == (5, 5, 2)

            pts_mm = np.linspace(-64, 64, 5) # Grid positions in mm

            # Interpolators for X and Y values (mm to galvo ticks)
            fit_x = scipy.interpolate.interp2d(pts_mm, pts_mm, grid[:,:,0])
            fit_y = scipy.interpolate.interp2d(pts_mm, pts_mm, grid[:,:,1])
            self._grid_table = (fit_x, fit_y)

        if np.shape(x) != np.shape(y):
            raise TypeError('x and y shapes must match. Got x.shape: {}, y.shape: {}'.format(np.shape(x), np.shape(y)))

        x = np.atleast_1d(x)
        y = np.atleast_1d(y)

        x_ = [self._grid_table[0](a, b) for a, b in zip(x, y)]
        y_ = [self._grid_table[1](a, b) for a, b in zip(x, y)]

        result = np.hstack([x_, y_]).T
        if xshape == (): # If it's called with scalars, return a flat result.
            return result.flatten()
        return result
    def plot_checkpoint(self,b):
        orig_filename = "/data/batch_check_"+str(b)+"_original.png"

        image_A = self.X_test_A[5]
        image_A = np.reshape(image_A, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("Image_A shape: " +str(np.shape(image_A)))
        fake_B = self.generator_A_to_B.Generator.predict(image_A.reshape(1, self.W_A, self.H_A, self.C_A ))
        fake_B = np.reshape(fake_B, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("fake_B shape: " +str(np.shape(fake_B)))
        reconstructed_A = self.generator_B_to_A.Generator.predict(fake_B.reshape(1, self.W_A, self.H_A, self.C_A ))
        reconstructed_A = np.reshape(reconstructed_A, [self.W_A_test,self.H_A_test,self.C_A_test])
        print("reconstructed_A shape: " +str(np.shape(reconstructed_A)))
        # from IPython import embed; embed()

        checkpoint_images = np.array([image_A, fake_B, reconstructed_A])

        # Rescale images 0 - 1
        checkpoint_images = 0.5 * checkpoint_images + 0.5

        titles = ['Original', 'Translated', 'Reconstructed']
        fig, axes = plt.subplots(1, 3)
        for i in range(3):
            image = checkpoint_images[i]
            image = np.reshape(image, [self.H_A_test,self.W_A_test,self.C_A_test])
            axes[i].imshow(image)
            axes[i].set_title(titles[i])
            axes[i].axis('off')
        fig.savefig("/data/batch_check_"+str(b)+".png")
        plt.close('all')
        return
Example #17
0
def load_xvg():															#DONE
	
	global nb_rows, nb_cols
	global first_col
	global label_xaxis
	global label_yaxis
	global f_data
	global f_legend
	global f_col_legend
	global nb_col_tot
	f_data = {}
	f_legend = {}
	label_xaxis = "x axis"
	label_yaxis = "y axis"
	tmp_nb_rows_to_skip = 0

	#get file content
	with open(args.xvgfilename) as f:
		lines = f.readlines()
			
	#determine legends and nb of lines to skip
	c_index = 0
	for l_index in range(0,len(lines)):
		line = lines[l_index]
		if line[-1] == '\n':
			line = line[:-1]
		if line[0] in args.comments:
			tmp_nb_rows_to_skip += 1
			if "legend \"" in line:
				try:
					tmp_col = int(int(line.split("@ s")[1].split(" ")[0]))
					tmp_name = line.split("legend \"")[1][:-1]
					f_legend[c_index] = tmp_name
					c_index += 1
				except:
					print "\nError: unexpected data format in line " + str(l_index) + " in file " + str(filename) + "."
					print " -> " + str(line)
					sys.exit(1)
			if "xaxis" in line and  "label " in line:
				label_xaxis = line.split("label ")[1]
			if "yaxis" in line and  "label " in line:
				label_yaxis = line.split("label ")[1]
			
	#get all data in the file
	tmp_f_data = np.loadtxt(args.xvgfilename, skiprows = tmp_nb_rows_to_skip)
										
	#get nb of rows and cols
	nb_rows = np.shape(tmp_f_data)[0]
	nb_cols = np.shape(tmp_f_data)[1] -1 

	#get first col
	first_col = tmp_f_data[:,0] * float(args.xaxis)

	#stock data
	if scale_x_only:
		f_data = tmp_f_data[:,1:]
	else:
		f_data = tmp_f_data[:,1:] * conv_factor
			
	return
def solve_master(tree, num_node, Current_node, g_flag, thetaB_list, SUBD,  coefficients, xBar, thetaOpt, lamOpt, muOpt, y,  cons, iteration,  pool=None):
    '''we solve the relaxed master problems based on thetaB_list, then select the infimum of all minimum values.
       Parameters: About the tree : tree, Current_node
                   About the subproblem: SUBD, xBar, thetaOpt, lamOpt, muOpt, y
                   About the boundary: theta_L, theta_U''' 
                   
    (M, N) = np.shape(y)
    K = np.shape(xBar[-1])[0]
    
    x_stor = None
    Q_stor = np.inf
    next_node = -1
    
    #store all the MLBD
    MLBD_stor = [] 

    #store all the MLBD
    if pool == None:
        tree.nodes[Current_node].set_parameters_qualifying_constraint(lamOpt,  thetaOpt, muOpt,  xBar, SUBD,  g_flag,  coefficients)
        #check whether the coefficients are already stored into the parents or not.
        
        print ('\n%d master problems are solving...'  %len(thetaB_list))

        for index in xrange(len(thetaB_list)):
            thetaB = thetaB_list[index].copy()
            status, objVal, xOpt,  thetaB, lagrangian_coefficient= solve_master_s(tree, Current_node, coefficients, thetaOpt, xBar, lamOpt, muOpt, thetaB.copy(), y, g_flag, cons)
            #print objVal, xOpt
            
            if status == 2 and objVal < SUBD - np.spacing(1):
                node = tree.add_node(num_node, 0, 1, Current_node)
                node.set_parameters_thetaB(thetaB,  xOpt, objVal, lagrangian_coefficient)
                MLBD_stor.append(objVal)
                if objVal < Q_stor:
                    Q_stor = objVal
                    next_node = num_node
                    x_stor = xOpt
                num_node = num_node + 1

    else:
        tree.nodes[Current_node].set_parameters_qualifying_constraint(lamOpt,  thetaOpt, muOpt,  xBar, SUBD,  g_flag,  coefficients)
        len_thetaB = len(thetaB_list)
        print ('\n%d master problems are solving...'  %len_thetaB)
        results = [pool.apply_async(solve_master_s,  args = (tree, Current_node, coefficients, thetaOpt, xBar, lamOpt, muOpt, thetaB.copy(), y, g_flag, cons)) for thetaB in thetaB_list]

        #put all the result into the tree.
        for p in results:
            #result = [status, objVal, xOpt, thetaB]
            result = p.get() 
            if result[0] == 2 and result[1] < SUBD - np.spacing(1):
                node = tree.add_node(num_node,  0,  1,  Current_node)
                node.set_parameters_thetaB(result[3],  result[2],  result[1], result[4])
                #node.set_parameter(lamOpt,  thetaOpt, result[3],  muOpt,  xBar,  result[2],  SUBD,  result[1], g_flag,  coefficients)
                MLBD_stor.append(result[1])
                if result[1] < Q_stor:
                    Q_stor = result[1]
                    next_node = num_node
                    x_stor =  result[2]
                num_node += 1

    return x_stor, Q_stor, next_node, num_node, MLBD_stor
def find_radial_centers(singleton):
    '''Intended to find areas where the rule-image is blue and return the center of such areas'''
    img=singleton.img
    Aufloesung=singleton.center_find_resolution
    quadrate=[]
    for x1 in range( np.shape(img)[0] //Aufloesung ):
        for x2 in range( np.shape(img)[1]//Aufloesung):
            quadrate.append([x1, x2])

    quadrate= [q for q in quadrate if np.argmax(img[q[0]*Aufloesung][q[1]*Aufloesung]) == 2] #auf blaue reduzieren

    areas=[]

    while len(quadrate)>0:   #Flaechen finden, also Quadrate auf Flaechen aufteilen
        areas.append([quadrate[0]])
        quadrate.pop(0)
        h=0
        while h<len( areas[len(areas)-1] ):
            q=areas[len(areas)-1][h]
            for nachbar in[[q[0]-1, q[1]], [q[0]+1, q[1]], [q[0], q[1]-1], [q[0], q[1]+1]]:
                if nachbar in quadrate:
                    areas[len(areas)-1].append(nachbar)
                    quadrate.remove(nachbar)
            h+=1

    centers=   [ np.array([sum( [x[0] for x in y])/len(y)*Aufloesung, sum([x[1] for x in y])/len(y)*Aufloesung]) for y in areas]
    return centers
def null(A, eps=1e-12):
    '''Compute a base of the null space of A.'''
    u, s, vh = np.linalg.svd(A)
    padding = max(0,np.shape(A)[1]-np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding,),dtype=bool)),axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Example #21
0
def count_clones_old(data):
    """
    Count number of alive(cellcount > 0) clones at each timestep.
    
    Variables type:
    data - numpy array
    
    Output type:
    numpy 1-D array
    
    Details about data:
    data stores information about colonies in format
            time: 0     time: 1     time: 2 ...    
    
    clone 1   23          23          24
    
    clone 2   45          44          44
    
    using numpy array
    >> data
    [[2, 1, 1, 0, 0],
     [0, 0, 1, 1, 0]]
     
    Usage example:
    >> data
    [[2,1,1,0,0],
     [0,0,1,1,0]]
    >> count_clones(data)
    [1,1,2,1,0]
    """
    count = []
    print "Shape of data array:", np.shape(data)
    for ind in range(np.shape(data)[1]):
        count.append(np.count_nonzero(data[:,ind]))
    return np.array(count)
Example #22
0
 def __init__(self, x, y):
             
     assert np.ndim(x)==2 and np.ndim(y)==2 and np.shape(x)==np.shape(y), \
         'x and y must be 2D arrays of the same size.'
     
     if np.any(np.isnan(x)) or np.any(np.isnan(y)):
         x = np.ma.masked_where( (isnan(x)) | (isnan(y)) , x)
         y = np.ma.masked_where( (isnan(x)) | (isnan(y)) , y)
         
     self.x_vert = x
     self.y_vert = y
     
     mask_shape = tuple([n-1 for n in self.x_vert.shape])
     self.mask_rho = np.ones(mask_shape, dtype='d')
     
     # If maskedarray is given for verticies, modify the mask such that 
     # non-existant grid points are masked.  A cell requires all four
     # verticies to be defined as a water point.
     if isinstance(self.x_vert, np.ma.MaskedArray):
         mask = (self.x_vert.mask[:-1,:-1] | self.x_vert.mask[1:,:-1] | \
                 self.x_vert.mask[:-1,1:] | self.x_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     if isinstance(self.y_vert, np.ma.MaskedArray):
         mask = (self.y_vert.mask[:-1,:-1] | self.y_vert.mask[1:,:-1] | \
                 self.y_vert.mask[:-1,1:] | self.y_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     self._calculate_subgrids()
     self._calculate_metrics()        
def get_boundaries(img, color_img_blanks):
    """This function calculates the upper and lower boundary of text in
the document."""
    img_height = np.shape(img)[0]
    blank_lines = []
    first_non_blank = False
    upper_boundary = 0
    lower_boundary = 0

    for idx in range(img_height/2):
        line = img[idx,:]
        if len(np.where(line == 0)[0]) >= 50 and len(np.where(line == 0)[0]) < 0.1 * len(line):
            first_non_blank = True
        if len(np.where(line == 0)[0]) < 50 and first_non_blank:
            blank_lines.append(idx)
            cv2.line(color_img_blanks, (0, idx), (np.shape(img)[1], idx), (0,255,0), 1)
            
    first_non_blank = False
    for idx in reversed(range(img_height/2, img_height)):
        line = img[idx,:]
        if len(np.where(line == 0)[0]) >= 50 and len(np.where(line == 0)[0]) < 0.1 * len(line):
            first_non_blank = True
        if len(np.where(line == 0)[0]) < 50 and first_non_blank:
            blank_lines.append(idx)
            cv2.line(color_img_blanks, (0, idx), (np.shape(img)[1], idx), (0,255,0), 1)

    upper_blank_lines = [item for item in blank_lines if item < img_height/2]
    if len(upper_blank_lines) > 0:
        upper_boundary = longest_increasing_run(upper_blank_lines)[0]
    
    lower_blank_lines = [item for item in blank_lines if item >= img_height/2]
    if len(lower_blank_lines) > 0:
        lower_boundary = longest_increasing_run(lower_blank_lines)[0]

    return upper_boundary, lower_boundary
Example #24
0
def equal(a, b, exact):
    if array_equal(a, b):
        return True

    if hasattr(a, 'dtype') and a.dtype in ['f4','f8']:
        nnans = isnan(a).sum()
        if nnans > 0:
            # For results containing NaNs, just check that the number
            # of NaNs is the same in both arrays.  This check could be
            # made more exhaustive, but checking element by element in
            # python space is very expensive in general.
            return nnans == isnan(b).sum()
        ninfs = isinf(a).sum()
        if ninfs > 0:
            # Ditto for Inf's
            return ninfs == isinf(b).sum()
    if exact:
        return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
    else:
        if hasattr(a, 'dtype') and a.dtype == 'f4':
            atol = 1e-5   # Relax precission for special opcodes, like fmod
        else:
            atol = 1e-8
        return (shape(a) == shape(b) and
                allclose(ravel(a), ravel(b), atol=atol))
def loadSamplePlanktons(numSamples=100, rotate=False, dim=28):
    if dim == 28:
        if not rotate:
            from pylearn2_plankton.planktonDataPylearn2 import PlanktonData
            ds = PlanktonData(which_set='train')
            designMatrix = ds.get_data()[0] # index 1 is the label
            print "Shape of Design Matrix", np.shape(designMatrix)
            designMatrix = np.reshape(designMatrix, 
                                      (ds.get_num_examples(), 1, MAX_PIXEL, MAX_PIXEL) )
            if numSamples != 'All':
                return np.array(designMatrix[:numSamples,...], dtype=np.float32)
            else:
                return np.array(designMatrix, dtype=np.float32)
        else:
            print "Loading Rotated Data"
            designMatrix = np.load(open(os.path.join(os.environ['PYLEARN2_DATA_PATH'] ,'planktonTrainRotatedX.p'), 'r'))
            return np.reshape(np.array(designMatrix[:numSamples,...], dtype=np.float32),
                              (numSamples,1,MAX_PIXEL,MAX_PIXEL))
    elif dim == 40:
        from pylearn2_plankton.planktonData40pixels import PlanktonData
        ds = PlanktonData(which_set='train')
        designMatrix = ds.get_data()[0] # index 1 is the label
        print "Shape of Design Matrix", np.shape(designMatrix)
        designMatrix = np.reshape(designMatrix, 
                                  (ds.get_num_examples(), 1, 40, 40) )
        if numSamples != 'All':
            return np.array(designMatrix[:numSamples,...], dtype=np.float32)
        else:
            return np.array(designMatrix, dtype=np.float32)
Example #26
0
 def test_shapes_scalarvalue_derivative(self):
     P = KroghInterpolator(self.xs,self.ys)
     n = P.n
     assert_array_equal(np.shape(P.derivatives(0)), (n,))
     assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
     assert_array_equal(np.shape(P.derivatives([0])), (n,1))
     assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
Example #27
0
def pop_plot(data, thresh = a + L + 10., save = False, fname = '0.png'):
    '''Plot percentage of population in target zone.'''
    t = np.arange(0,np.shape(data)[0])
    frac = 100*np.sum(data >= thresh, axis = 1)/np.float(np.shape(data)[1])
    fig = plt.figure("Population Plot", (9,6))
    ax = fig.add_subplot(111)
    #ax.scatter(t, frac, c = 'black', lw = 0, s = 5)
    ax.plot(t, frac, c = 'red', lw = 1)
    idx = (np.abs(frac - 75.0)).argmin()
    ax.hlines(75.0, t[0], t[-1], color = 'black', linestyle = 'dashed')
    ax.text(idx, 70.0, '$t_{75} = %d$' % idx, fontsize = 16)
    idx = (np.abs(frac - 50.0)).argmin()
    ax.hlines(50.0, t[0], t[-1], color = 'black', linestyle = 'dashed')
    ax.text(idx, 45.0, '$t_{50} = %d$' % idx, fontsize = 16)
    ax.grid(True)
    ax.axis([t[0],t[-1],0.0,100.0])
    ax.set_ylabel("Population (%)", size = 16)
    ax.set_xlabel("Time (s)", size = 16)
    ax.set_title("Population vs. Time")
    # Choose whether or not to save the file.
    if save == False:
        plt.show()
    else:
        plt.savefig(fname)
        plt.close()
    return None
Example #28
0
def test_array_sizes():
    """Check we have the right sizes for the arrays"""
    assert(np.shape(snap.dm['pos']) == (4096, 3))
    assert(np.shape(snap['vel']) == (8192, 3))
    assert(np.shape(snap.gas['rho']) == (4039,))
    assert(snap.gas['u'].dtype == np.float32)
    assert(snap.gas['iord'].dtype == np.int32)
Example #29
0
def border_mask(img, p1, p2, device, debug, color="black"):
  # by using rectangle_mask to mask the edge of plotting regions you end up missing the border of the images by 1 pixel
  # This function fills this region in
  # note that p1 = (0,0) is the top left hand corner bottom right hand corner is p2 = (max-value(x), max-value(y))
  # device = device number. Used to count steps in the pipeline
  # debug = True/False. If True; print output image
  if color=="black":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (255,255,255))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(255,255,255), 5)
    device +=1
  if color=="gray":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (192,192,192))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(192,192,192), 5)
    device +=1
  if debug:
    print_image(bnk, (str(device) + '_brd_mskd_' + '.png'))
  return device, bnk, contour, hierarchy
Example #30
0
 def mutual_information(self, x_index, y_index, log_base, debug=False):
     """
     Calculate and return Mutual information between two random variables
     """
     # Check if index are into the bounds
     assert (0 <= x_index <= self.n_rows)
     assert (0 <= y_index <= self.n_rows)
     # Variable to return MI
     summation = 0.0
     # Get uniques values of random variables
     values_x = set(self.data[x_index])
     values_y = set(self.data[y_index])
     # Print debug info
     if debug:
         print 'MI between'
         print self.data[x_index]
         print self.data[y_index]
         # For each random
     for value_x in values_x:
         for value_y in values_y:
             px = shape(where(self.data[x_index] == value_x))[1] / self.n_cols
             py = shape(where(self.data[y_index] == value_y))[1] / self.n_cols
             pxy = len(where(in1d(where(self.data[x_index] == value_x)[0],
                                  where(self.data[y_index] == value_y)[0]) == True)[0]) / self.n_cols
             if pxy > 0.0:
                 summation += pxy * math.log((pxy / (px * py)), log_base)
             if debug:
                 print '(%d,%d) px:%f py:%f pxy:%f' % (value_x, value_y, px, py, pxy)
     return summation
def heat_map(model, char2int, unit_num=args.unit_number, song_path=args.heat_map_path, file_path='saves/heat_map',
             find_special=args.find_special):
    """
    Generate the heat map and show activation functions for the LSTM/GRU hidden cells
    :param model: model to pass in (pytorch model)
    :param char2int: dictionary to tokenize characters
    :type char2int: dict
    :param unit_num: which unit to display
    :type unit_num: int
    :param song_path: path to song file
    :type song_path: str
    :param file_path: path to where to save the heat map
    :type file_path: str
    :param find_special: Boolean to indicate whether to return body and header finders
    :type find_special: bool
    :return: None
    """
    generated_songs = utils.song_parser(song_path)
    num_songs = len(generated_songs)

    for song_ind in range(num_songs):

        generated_song = generated_songs[song_ind]
        song_length = len(generated_song)

        tensor_song = utils.string_to_tensor([list(generated_song)], char2int , 1, song_length)

        tensor_song = Variable(tensor_song)

        if gpu:
            tensor_song = tensor_song.cuda()

        if find_special:

            output = model(tensor_song[0])
            hidden, cell = model.hidden
            hidden = hidden.data.numpy()
            num_units = np.shape(hidden)[2]
            header_correlation = -99999999
            body_correlation =-999999999
            header_filter = np.ones(song_length)
            header_filter[song_length/8:]=-1
            header_filter[:song_length/8]= 6
            body_filter = np.ones(song_length)
            body_filter[:song_length/8] = -6

            for unit in range(num_units):
                activations = []
                for index in range(song_length):
                    output = model(tensor_song[index])
                    hidden, cell = model.hidden
                    hidden = hidden.data.numpy()
                    activations.append(hidden[0,0,unit])
                activations = np.asarray(activations)
                correlation = np.dot(activations,header_filter)
                if correlation > header_correlation:
                    header_correlation = correlation
                    header = (activations,unit)
                correlation = np.dot(activations,body_filter)
                if correlation > body_correlation:
                    body_correlation = correlation
                    body = (activations,unit)
            activations = header[0]
            print("Header Detector for song "+str(song_ind)+" is Unit: "+str(header[1]))
            print("Body Detector for song "+str(song_ind)+" is Unit: " +str(body[1]))
        else:
            activations = []
            for index in range(song_length):
            #model.zero_grad()
                output = model(tensor_song[index])
                hidden, cell = model.hidden
                cell = cell.data.numpy()
                hidden = hidden.data.numpy()
                    # print cell[0,0,0]
                activations.append(hidden[0, 0, unit_num])
            activations = np.asarray(activations)


        print("Song Length: "+str(song_length))
        height = int(np.sqrt(song_length)) + 1
        width = int(song_length/height) + 1
        print("height %d"% height)
        print("width %d"% width)
        song_activations = np.zeros(height * width)
        song_activations[:song_length] = activations[:]
        song_activations = np.reshape(song_activations, (height, width))
        song_activations = [x for x in song_activations[::-1]]
        fig = plt.figure()
        heatmap = plt.pcolormesh(song_activations,cmap = 'coolwarm')

        countH = height-1
        countW = 0
        for index in range(len(generated_song)):
            char = generated_song[index]
            if char == '\n':
                char = 'nl'
            elif char == ' ':
                char = 'sp'
            plt.text(countW, countH, char)
            countW += 1
            if countW >= width:
                countH -= 1
                countW = 0

        plt.colorbar(heatmap)
        plt.show()
        fig.savefig(file_path+str(song_ind)+'.png')
Example #32
0
    for time in times:
        k=k+1
        if (k%packet == 0):
            sys.stdout.write('.')
        index = bisect.bisect_left(times, time)
        index = max(0, index)
        index = min(index, len(times) - 1)

        contact_info_source._time = times[index]

        # fix: should be called by contact_source?
        contact_info_source.method()

        id_t = numpy.where(pos_data[:, 0] == times[index])

        if numpy.shape(spos_data)[0] > 0:
            set_positionv(spos_data[:, 1], spos_data[:, 2],
                          spos_data[:, 3],
                          spos_data[:, 4], spos_data[:, 5],
                          spos_data[:, 6],
                          spos_data[:, 7], spos_data[:, 8])

        set_positionv(
            pos_data[id_t, 1], pos_data[id_t, 2], pos_data[id_t, 3],
            pos_data[id_t, 4], pos_data[id_t, 5], pos_data[id_t, 6],
            pos_data[id_t, 7], pos_data[id_t, 8])

        id_tv = numpy.where(velo_data[:, 0] == times[index])

        set_velocityv(
            velo_data[id_tv, 1],
Example #33
0
data_test = np.hstack((data_test,aug_test))
data_test = np.hstack((data_test,feat_test))
data_test = np.hstack((data_test,azure_test))
data_test = np.hstack((data_test,azure2_test))
data_test = np.hstack((data_test,chichi_test))
data_test = np.hstack((data_test,svd_test))

#Preprocessing
preprocess = StandardScaler()
preprocess.fit(np.vstack((data_train,data_test)))
data_train = preprocess.transform(data_train)
data_test = preprocess.transform(data_test)

#Adding User ID and Course ID
fuck_train = np.vstack((train_users_id,train_courses_id))
#fuck_train = np.vstack((fuck_train,bycourse_train))
#fuck_train = np.vstack((fuck_train,ps_train))
data_train = np.hstack((np.transpose(fuck_train),data_train))
print np.shape(data_train)

fuck_test = np.vstack((test_users_id,test_courses_id))
#fuck_test = np.vstack((fuck_test,bycourse_test))
#fuck_test = np.vstack((fuck_test,ps_test))
data_test = np.hstack((np.transpose(fuck_test),data_test))

label_train[label_train==0]=-1

np.savetxt("data_train_forrgf", data_train, delimiter=" ",fmt="%s")
np.savetxt("data_test_forrgf", data_test, delimiter=" ",fmt="%s")

np.savetxt("label_train_forrgf", np.c_[label_train],delimiter=" ",fmt="%s")
def Model():
    epoch=1
    LR=0.1
    Gamma=0.9
    s=0.1
    F1_1=np.random.normal(0,s,[3,5,5])
    F1_2=np.random.normal(0,s,[3,5,5])
    F1_3=np.random.normal(0,s,[3,5,5])
    F1_4=np.random.normal(0,s,[3,5,5])
    F1_5=np.random.normal(0,s,[3,5,5])
    F1_6=np.random.normal(0,s,[3,5,5])
    F1_7=np.random.normal(0,s,[3,5,5])
    F1_8=np.random.normal(0,s,[3,5,5])
    F1_9=np.random.normal(0,s,[3,5,5])
    F1_10=np.random.normal(0,s,[3,5,5])
    
    F2_1=np.random.normal(0,s,[10,3,3])
    F2_2=np.random.normal(0,s,[10,3,3])
    F2_3=np.random.normal(0,s,[10,3,3])
    F2_4=np.random.normal(0,s,[10,3,3])
    F2_5=np.random.normal(0,s,[10,3,3])
    F2_6=np.random.normal(0,s,[10,3,3])
    F2_7=np.random.normal(0,s,[10,3,3])
    F2_8=np.random.normal(0,s,[10,3,3])
    F2_9=np.random.normal(0,s,[10,3,3])
    F2_10=np.random.normal(0,s,[10,3,3])
    F2_11=np.random.normal(0,s,[10,3,3])
    F2_12=np.random.normal(0,s,[10,3,3])
    F2_13=np.random.normal(0,s,[10,3,3])
    F2_14=np.random.normal(0,s,[10,3,3])
    F2_15=np.random.normal(0,s,[10,3,3])

    W1=np.random.normal(0,s,[200,540])
    W2=np.random.normal(0,s,[10,200])
    
    Update1W=np.zeros([200,540])
    Update2W=np.zeros([10,200])
    
    Update1f=np.zeros([3,3])
    Update2f=np.zeros([3,3])
    Update3f=np.zeros([3,3])
    Update4f=np.zeros([3,3])
    Update5f=np.zeros([3,3])
    Update6f=np.zeros([3,3])
    Update7f=np.zeros([3,3])
    Update8f=np.zeros([3,3])
    Update9f=np.zeros([3,3])
    Update10f=np.zeros([3,3])
    Update11f=np.zeros([3,3])
    Update12f=np.zeros([3,3])
    Update13f=np.zeros([3,3])
    Update14f=np.zeros([3,3])
    Update15f=np.zeros([3,3])
    
    Update1ff=np.zeros([5,5])
    Update2ff=np.zeros([5,5])
    Update3ff=np.zeros([5,5])
    Update4ff=np.zeros([5,5])
    Update5ff=np.zeros([5,5])
    Update6ff=np.zeros([5,5])
    Update7ff=np.zeros([5,5])
    Update8ff=np.zeros([5,5])
    Update9ff=np.zeros([5,5])
    Update10ff=np.zeros([5,5])
    while(epoch<=10):
        correct=0
        for i in range(500):    
            image=x_train[i,:,:,:]/255
            R=image[:,:,0]
            B=image[:,:,1]
            G=image[:,:,2]
            c=np.concatenate((R,B,G))
            d_img=np.reshape(c,[3,32,32])
            Y=y_Train[i]
            target = np.zeros([10,1]) + 0.01
            target[Y]= 0.99
            #Input Ready
            #Forward Propagation
            First_conv=Conv_layer(d_img,F1_1,F1_2,F1_3,F1_4,F1_5,F1_6,F1_7,F1_8,F1_9,F1_10)           
            First_convS=Relu(First_conv)
            pool_1,mask1=Pooling_Layer(First_convS)
            
            Second_conv=Conv_layer1(pool_1,F2_1,F2_2,F2_3,F2_4,F2_5,F2_6,F2_7,F2_8,F2_9,F2_10,F2_11,F2_12,F2_13,F2_14,F2_15)           
            Second_convS=Relu(Second_conv)
            pool_2,mask2=Pooling_Layer(Second_convS)
            flat_layer=pool_2.flatten() # 16 nodes
            flat_layer=np.reshape(flat_layer,(540,1))
            
            Z1=np.dot(W1,flat_layer)
            A1=Relu(Z1)
            Z2=np.dot(W2,A1)
            A2=sigmoid(Z2)
            output=np.reshape(A2,(10,1))
            
             #Cost FUnction Calculation
            Error=target-output
            cost=0.5*sum((Error)**2)/10
            #Backward Propagation
             #dw2 ????
            sigmoido_grad=np.multiply(output,1-output)
            er_and_sigmd=np.multiply(Error,sigmoido_grad)
            A1_T=A1.T
            dW2=np.dot(er_and_sigmd,A1_T)
            #dW1 ??
            W2_T=W2.T
            er_flat=np.dot(W2_T,er_and_sigmd)
            sigmd_der_flat=dReLU(Z1)
            this=np.multiply(er_flat,sigmd_der_flat)
            flat_layer_T=flat_layer.T
            dW1=np.dot(this,flat_layer_T)
             #error propagated at flatten and pooling layer
            W1_T=W1.T
            err_flatten=np.dot(W1_T,this)
            FD=np.reshape(err_flatten,(15,6,6))
            repeat_2=FD.repeat(2,axis=1).repeat(2,axis=2)
            maxpol2_eror=repeat_2*mask2
            Sig_der_fil2=dReLU(Second_conv)
            err_Conv2=np.multiply(Sig_der_fil2,maxpol2_eror)
            F=[]
            cc=[]
            for n in range(15):
                F1=err_Conv2[n,:,:]
                dF2_1=np.zeros([10,3,3])
                for m in range(10):
                    b=Conv_layerB(pool_1[m,:,:],F1)
                    dF2_1[m,:,:]=b
                F=np.append(F,[dF2_1])
            dF2=np.reshape(F,(15,10,3,3))
            df1=dF2[0,:,:,:]
            df2=dF2[1,:,:,:]
            df3=dF2[2,:,:,:]
            df4=dF2[3,:,:,:]
            df5=dF2[4,:,:,:]
            df6=dF2[5,:,:,:]
            df7=dF2[6,:,:,:]
            df8=dF2[7,:,:,:]
            df9=dF2[8,:,:,:]
            df10=dF2[9,:,:,:]
            df11=dF2[10,:,:,:]
            df12=dF2[11,:,:,:]
            df13=dF2[12,:,:,:]
            df14=dF2[13,:,:,:]
            df15=dF2[14,:,:,:]
            F2_1R=np.rot90(F2_1,2, axes=(2,1))
            F2_2R=np.rot90(F2_2,2, axes=(2,1))
            F2_3R=np.rot90(F2_3,2, axes=(2,1))
            F2_4R=np.rot90(F2_4,2, axes=(2,1))
            F2_5R=np.rot90(F2_5,2, axes=(2,1))
            F2_6R=np.rot90(F2_6,2, axes=(2,1))
            F2_7R=np.rot90(F2_7,2, axes=(2,1))
            F2_8R=np.rot90(F2_8,2, axes=(2,1))
            F2_9R=np.rot90(F2_9,2, axes=(2,1))
            F2_10R=np.rot90(F2_10,2, axes=(2,1))
            F2_11R=np.rot90(F2_11,2, axes=(2,1))
            F2_12R=np.rot90(F2_12,2, axes=(2,1))
            F2_13R=np.rot90(F2_13,2, axes=(2,1))
            F2_14R=np.rot90(F2_14,2, axes=(2,1))
            F2_15R=np.rot90(F2_15,2, axes=(2,1))
            cc=np.append(cc,[F2_1R,F2_2R,F2_3R,F2_4R,F2_5R,F2_6R,F2_7R,F2_8R,F2_9R,F2_10R,F2_11R,F2_12R,F2_13R,F2_14R,F2_15R])
            A_rotated=np.reshape(cc,(15,10,3,3))
            #padding
            f=np.shape(err_Conv2)
            padded_err_Conv2=np.zeros([15,16,16])
            for t in range(f[0]):
                padded_err_Conv2[t,:,:]=np.pad(err_Conv2[t], (2, 2), 'constant', constant_values=(0,0))
            Accumulated_Error=np.zeros([10,14,14])
            for k in range(15):
                F_P=padded_err_Conv2[k,:,:]
                for n in range(10):
                    F_map=A_rotated[k,n,:,:]
                    ER=Conv_layerB(F_P,F_map)
                    Accumulated_Error[n,:,:]=Accumulated_Error[n,:,:]+ER
                
            repeat_1=Accumulated_Error.repeat(2,axis=1).repeat(2,axis=2)
            maxpol1_eror=repeat_1*mask1 
            
            Sig_der_fil1=dReLU(First_conv)
            err_Conv1=np.multiply(Sig_der_fil1,maxpol1_eror)
            #df1 calculation
            FF=[]
            for n in range(10):
                F1=err_Conv1[n,:,:]
                dF2_1=np.zeros([3,5,5])
                for m in range(3):
                    b=Conv_layerB(d_img[m,:,:],F1)
                    dF2_1[m,:,:]=b
                FF=np.append(FF,[dF2_1])
            dF1=np.reshape(FF,(10,3,5,5))
            
            dff1=dF1[0,:,:,:]
            dff2=dF1[1,:,:,:]
            dff3=dF1[2,:,:,:]
            dff4=dF1[3,:,:,:]
            dff5=dF1[4,:,:,:]
            dff6=dF1[5,:,:,:]
            dff7=dF1[6,:,:,:]
            dff8=dF1[7,:,:,:]
            dff9=dF1[8,:,:,:]
            dff10=dF1[9,:,:,:]
            
            Update1W=Gamma*Update1W+(1-Gamma)*dW1
            Update2W=Gamma*Update2W+(1-Gamma)*dW2
    
            Update1f=Gamma*Update1f+(1-Gamma)*df1
            Update2f=Gamma*Update2f+(1-Gamma)*df2
            Update3f=Gamma*Update3f+(1-Gamma)*df3
            Update4f=Gamma*Update4f+(1-Gamma)*df4
            Update5f=Gamma*Update5f+(1-Gamma)*df5
            Update6f=Gamma*Update6f+(1-Gamma)*df6
            Update7f=Gamma*Update7f+(1-Gamma)*df7
            Update8f=Gamma*Update8f+(1-Gamma)*df8
            Update9f=Gamma*Update9f+(1-Gamma)*df9
            Update10f=Gamma*Update10f+(1-Gamma)*df10
            Update11f=Gamma*Update11f+(1-Gamma)*df11
            Update12f=Gamma*Update12f+(1-Gamma)*df12
            Update13f=Gamma*Update13f+(1-Gamma)*df13
            Update14f=Gamma*Update14f+(1-Gamma)*df14
            Update15f=Gamma*Update15f+(1-Gamma)*df15
            
            
            Update1ff=Gamma*Update1ff+(1-Gamma)*dff1
            Update2ff=Gamma*Update2ff+(1-Gamma)*dff2
            Update3ff=Gamma*Update3ff+(1-Gamma)*dff3
            Update4ff=Gamma*Update4ff+(1-Gamma)*dff4
            Update5ff=Gamma*Update5ff+(1-Gamma)*dff5
            Update6ff=Gamma*Update6ff+(1-Gamma)*dff6
            Update7ff=Gamma*Update7ff+(1-Gamma)*dff7
            Update8ff=Gamma*Update8ff+(1-Gamma)*dff8
            Update9ff=Gamma*Update9ff+(1-Gamma)*dff9
            Update10ff=Gamma*Update10ff+(1-Gamma)*dff10

           
            
            W2=W2+LR*Update2W
            W1=W1+LR*Update1W
            
            F1_1=F1_1+LR*Update1ff
            F1_2=F1_2+LR*Update2ff
            F1_3=F1_3+LR*Update3ff
            F1_4=F1_4+LR*Update4ff
            F1_5=F1_5+LR*Update5ff
            F1_6=F1_6+LR*Update6ff
            F1_7=F1_7+LR*Update7ff
            F1_8=F1_8+LR*Update8ff
            F1_9=F1_9+LR*Update9ff
            F1_10=F1_10+LR*Update10ff
            
            F2_1=F2_1+LR*Update1f
            F2_2=F2_2+LR*Update2f
            F2_3=F2_3+LR*Update3f
            F2_4=F2_4+LR*Update4f
            F2_5=F2_5+LR*Update5f
            F2_6=F2_6+LR*Update6f
            F2_7=F2_7+LR*Update7f
            F2_8=F2_8+LR*Update8f
            F2_9=F2_9+LR*Update9f
            F2_10=F2_10+LR*Update10f
            F2_11=F2_11+LR*Update11f
            F2_12=F2_12+LR*Update12f
            F2_13=F2_13+LR*Update13f
            F2_14=F2_14+LR*Update14f
            F2_15=F2_15+LR*Update15f
            #print(Z2)
        cost_f.append(cost)
        print("Cost Function After ", epoch , " Epoch : ",cost)     
        #accuracy calculation
        for j in range(200):
            image1=x_test[j,:,:,:]/255
            R=image1[:,:,0]
            B=image1[:,:,1]
            G=image1[:,:,2]
            c=np.concatenate((R,B,G))
            d_img=np.reshape(c,[3,32,32])
            y=y_Test[j]
            First_conv=Conv_layer(d_img,F1_1,F1_2,F1_3,F1_4,F1_5,F1_6,F1_7,F1_8,F1_9,F1_10)           
            First_convS=Relu(First_conv)
            pool_1,mask1=Pooling_Layer(First_convS)
            
            Second_conv=Conv_layer1(pool_1,F2_1,F2_2,F2_3,F2_4,F2_5,F2_6,F2_7,F2_8,F2_9,F2_10,F2_11,F2_12,F2_13,F2_14,F2_15)           
            Second_convS=Relu(Second_conv)
            pool_2,mask2=Pooling_Layer(Second_convS)
            flat_layer=pool_2.flatten() # 16 nodes
            flat_layer=np.reshape(flat_layer,(540,1))
            Z1=np.dot(W1,flat_layer)
            A1=Relu(Z1)
            Z2=np.dot(W2,A1)
            A2=sigmoid(Z2)
            output=np.reshape(A2,(10,1))
            index = np.argmax(output,axis=0)
            #print(index,y)
            #print(y,index)
            if (index==y):
                correct+=1
            accuracy=(correct/200)
        acc.append(accuracy)
        print("Test Accuracy After ", epoch , " Epoch : ",accuracy)
        for j in range(500):
            image1=x_train[j,:,:,:]/255
            R=image1[:,:,0]
            B=image1[:,:,1]
            G=image1[:,:,2]
            c=np.concatenate((R,B,G))
            d_img=np.reshape(c,[3,32,32])
            y=y_Train[j]
            First_conv=Conv_layer(d_img,F1_1,F1_2,F1_3,F1_4,F1_5,F1_6,F1_7,F1_8,F1_9,F1_10)           
            First_convS=Relu(First_conv)
            pool_1,mask1=Pooling_Layer(First_convS)
            
            Second_conv=Conv_layer1(pool_1,F2_1,F2_2,F2_3,F2_4,F2_5,F2_6,F2_7,F2_8,F2_9,F2_10,F2_11,F2_12,F2_13,F2_14,F2_15)           
            Second_convS=Relu(Second_conv)
            pool_2,mask2=Pooling_Layer(Second_convS)
            flat_layer=pool_2.flatten() # 16 nodes
            flat_layer=np.reshape(flat_layer,(540,1))
            Z1=np.dot(W1,flat_layer)
            A1=Relu(Z1)
            Z2=np.dot(W2,A1)
            A2=sigmoid(Z2)
            output=np.reshape(A2,(10,1))
            index = np.argmax(output,axis=0)
            #print(index,y)
            #print(y,index)
            if (index==y):
                correct+=1
            accuracy1=(correct/500)
        acctr.append(accuracy1)
        print("Train  Accuracy After ", epoch , " Epoch : ",accuracy1)
        epoch =epoch + 1
    return output
Example #35
0
ax.set_title("Countdown Network")
ax.set_xlabel("Epoch")
ax.set_ylabel("Time step")
cb = fig.colorbar(im)

# Now: try to inspect output of LSTM neurons at intermediate times. This is also a nice example of how to use some smart keras functionality.

from tensorflow.keras import Model
# get a function that represents the mapping from the 
# network inputs to the neuron output values of the first LSTM layer:
neuron_values = Model([rnn.inputs], [firstLSTMlayer.output])

batchsize=1
test_observations,test_target=produce_batch_counting(batchsize, timesteps)

print(test_observations)

the_values=neuron_values.predict_on_batch([test_observations])

np.shape(the_values)

fig,ax=plt.subplots(figsize=(7,5))
im=ax.imshow(the_values[0,:,:],origin='lower',interpolation='nearest',aspect='auto')
ax.set_title("Neuron Value")
ax.set_xlabel("1st LSTM Index")
ax.set_ylabel("Time step")
cb = fig.colorbar(im)
ax.set_xticks([0,1])


Example #36
0
def generate_SeqFile_SpiralDiffusion(gx, gy, tr, n_shots, mg, ms, fA, n_slices,
                                     reps, st, tPlot, tReport, b_values,
                                     n_dirs, fov, Nx):

    #%% --- 1 - Create new Sequence Object + Parameters
    seq = Sequence()

    # =========
    # Parameters
    # =========
    i_raster_time = 100000
    assert 1 / i_raster_time == seq.grad_raster_time, "Manualy inputed inverse raster time does not match the actual value."

    # =========
    # Code parameters
    # =========
    fatsat_enable = 0  # Fat saturation
    kplot = 0

    # =========
    # Acquisition Parameters
    # =========
    TR = tr  # Spin-Echo parameters - TR in [s]
    n_TR = math.ceil(
        TR * i_raster_time)  # Spin-Echo parameters - number of points TR
    bvalue = b_values  # b-value [s/mm2]
    nbvals = np.shape(bvalue)[0]  # b-value parameters
    ndirs = n_dirs  # b-value parameters
    Ny = Nx
    slice_thickness = st  # Acquisition Parameters in [m]
    Nshots = n_shots

    # =========
    # Gradient Scaling
    # =========
    gscl = np.zeros(nbvals + 1)
    gscl[1:] = np.sqrt(bvalue / np.max(bvalue))
    gdir, nb0s = difunc.get_dirs(ndirs)

    # =========
    # Create system
    # =========
    system = Opts(max_grad=mg,
                  grad_unit='mT/m',
                  max_slew=ms,
                  slew_unit='T/m/s',
                  rf_ringdown_time=20e-6,
                  rf_dead_time=100e-6,
                  adc_dead_time=10e-6)

    #%% --- 2 - Fat saturation
    if fatsat_enable:
        fatsat_str = "_fatsat"
        b0 = 1.494
        sat_ppm = -3.45
        sat_freq = sat_ppm * 1e-6 * b0 * system.gamma
        rf_fs, _, _ = make_gauss_pulse(flip_angle=110 * math.pi / 180,
                                       system=system,
                                       duration=8e-3,
                                       bandwidth=abs(sat_freq),
                                       freq_offset=sat_freq)
        gz_fs = make_trapezoid(channel='z',
                               system=system,
                               delay=calc_duration(rf_fs),
                               area=1 / 1e-4)
    else:
        fatsat_str = ""

    #%% --- 3 - Slice Selection
    # =========
    # Create 90 degree slice selection pulse and gradient
    # =========
    flip90 = fA * pi / 180
    rf, gz, _ = make_sinc_pulse(flip_angle=flip90,
                                system=system,
                                duration=3e-3,
                                slice_thickness=slice_thickness,
                                apodization=0.5,
                                time_bw_product=4)

    # =========
    # Refocusing pulse with spoiling gradients
    # =========
    rf180, gz180, _ = make_sinc_pulse(flip_angle=math.pi,
                                      system=system,
                                      duration=5e-3,
                                      slice_thickness=slice_thickness,
                                      apodization=0.5,
                                      time_bw_product=4)
    rf180.phase_offset = math.pi / 2
    gz_spoil = make_trapezoid(channel='z',
                              system=system,
                              area=6 / slice_thickness,
                              duration=3e-3)

    #%% --- 4 - Gradients
    # =========
    # Spiral trajectory
    # =========
    G = gx + 1J * gy

    #%% --- 5 - ADCs / Readouts
    delta_k = 1 / fov
    adc_samples = math.floor(
        len(G) / 4
    ) * 4 - 2  # Apparently, on Siemens the number of samples needs to be divisible by 4...
    adc = make_adc(num_samples=adc_samples,
                   system=system,
                   duration=adc_samples / i_raster_time)

    # =========
    # Pre-phasing gradients
    # =========
    pre_time = 1e-3
    n_pre_time = math.ceil(pre_time * i_raster_time)
    gz_reph = make_trapezoid(channel='z',
                             system=system,
                             area=-gz.area / 2,
                             duration=pre_time)

    #%% --- 6 - Obtain TE and diffusion-weighting gradient waveform
    # For S&T monopolar waveforms
    # From an initial TE, check we satisfy all constraints -> otherwise increase TE.
    # Once all constraints are okay -> check b-value, if it is lower than the target one -> increase TE
    # Looks time-inefficient but it is fast enough to make it user-friendly.
    # TODO: Re-scale the waveform to the exact b-value because increasing the TE might produce slightly higher ones.

    # Calculate some times constant throughout the process
    # We need to compute the exact time sequence. For the normal SE-MONO-EPI sequence micro second differences
    # are not important, however, if we wanna import external gradients the allocated time for them needs to
    # be the same, and thus exact timing is mandatory. With this in mind, we establish the following rounding rules:
    # Duration of RFs + spoiling, and EPI time to the center of the k-space is always math.ceil().

    # The time(gy) refers to the number of blips, thus we substract 0.5 since the number of lines is always even.
    # The time(gx) refers to the time needed to read each line of the k-space. Thus, if Ny is even, it would take half of the lines plus another half.
    n_duration_center = 0  # The spiral starts right in 0 -- or ADC_dead_time??
    rf_center_with_delay = rf.delay + calc_rf_center(rf)[0]

    n_rf90r = math.ceil((calc_duration(gz) - rf_center_with_delay + pre_time) /
                        seq.grad_raster_time)
    n_rf180r = math.ceil((calc_duration(rf180) + 2 * calc_duration(gz_spoil)) /
                         2 / seq.grad_raster_time)
    n_rf180l = math.floor(
        (calc_duration(rf180) + 2 * calc_duration(gz_spoil)) / 2 /
        seq.grad_raster_time)

    # =========
    # Find minimum TE considering the readout times.
    # =========
    n_TE = math.ceil(20e-3 / seq.grad_raster_time)
    n_delay_te1 = -1
    while n_delay_te1 <= 0:
        n_TE = n_TE + 2

        n_tINV = math.floor(n_TE / 2)
        n_delay_te1 = n_tINV - n_rf90r - n_rf180l

    # =========
    # Find minimum TE for the target b-value
    # =========
    bvalue_tmp = 0
    while bvalue_tmp < np.max(bvalue):
        n_TE = n_TE + 2

        n_tINV = math.floor(n_TE / 2)
        n_delay_te1 = n_tINV - n_rf90r - n_rf180l
        delay_te1 = n_delay_te1 / i_raster_time
        n_delay_te2 = n_tINV - n_rf180r - n_duration_center
        delay_te2 = n_delay_te2 / i_raster_time

        # Waveform Ramp time
        n_gdiff_rt = math.ceil(system.max_grad / system.max_slew /
                               seq.grad_raster_time)

        # Select the shortest available time
        n_gdiff_delta = min(n_delay_te1, n_delay_te2)
        n_gdiff_Delta = n_delay_te1 + 2 * math.ceil(
            calc_duration(gz_spoil) / seq.grad_raster_time) + math.ceil(
                calc_duration(gz180) / seq.grad_raster_time)

        gdiff = make_trapezoid(channel='x',
                               system=system,
                               amplitude=system.max_grad,
                               duration=n_gdiff_delta / i_raster_time)

        # delta only corresponds to the rectangle.
        n_gdiff_delta = n_gdiff_delta - 2 * n_gdiff_rt

        bv = difunc.calc_bval(system.max_grad, n_gdiff_delta / i_raster_time,
                              n_gdiff_Delta / i_raster_time,
                              n_gdiff_rt / i_raster_time)
        bvalue_tmp = bv * 1e-6

    # =========
    # Show final TE and b-values:
    # =========
    print("TE:", round(n_TE / i_raster_time * 1e3, 2), "ms")
    for bv in range(1, nbvals + 1):
        print(
            round(
                difunc.calc_bval(system.max_grad * gscl[bv], n_gdiff_delta /
                                 i_raster_time, n_gdiff_Delta / i_raster_time,
                                 n_gdiff_rt / i_raster_time) * 1e-6, 2),
            "s/mm2")

    TE = n_TE / i_raster_time
    TR = n_TR / i_raster_time

    #%% --- 7 - Crusher gradients
    gx_crush = make_trapezoid(channel='x',
                              area=2 * Nx * delta_k,
                              system=system)
    gz_crush = make_trapezoid(channel='z',
                              area=4 / slice_thickness,
                              system=system)

    # TR delay - Takes everything into account
    # Distance between the center of the RF90s must be TR
    # The n_pre_time here is the time used to drive the Gx, and Gy spiral gradients to zero.
    n_spiral_time = adc_samples
    n_tr_per_slice = math.ceil(TR / n_slices * i_raster_time)
    if fatsat_enable:
        n_tr_delay = n_tr_per_slice - (n_TE - n_duration_center + n_spiral_time) \
                            - math.ceil(rf_center_with_delay * i_raster_time) \
                            - n_pre_time \
                            - math.ceil(calc_duration(gx_crush, gz_crush) * i_raster_time) \
                            - math.ceil(calc_duration(rf_fs, gz_fs) * i_raster_time)
    else:
        n_tr_delay = n_tr_per_slice - (n_TE - n_duration_center + n_spiral_time) \
                        - math.ceil(rf_center_with_delay * i_raster_time) \
                        - n_pre_time \
                        - math.ceil(calc_duration(gx_crush, gz_crush) * i_raster_time)
    tr_delay = n_tr_delay / i_raster_time

    #%% --- 8 - Checks
    # =========
    # Check TR delay time
    # =========
    assert n_tr_delay > 0, "Such parameter configuration needs longer TR."

    # =========
    # Delay time,
    # =========
    # Time between the gradient and the RF180. This time might be zero some times, although it is not normal.
    if n_delay_te1 > n_delay_te2:
        n_gap_te1 = n_delay_te1 - n_delay_te2
        gap_te1 = n_gap_te1 / i_raster_time
        gap_te2 = 0
    else:
        n_gap_te2 = n_delay_te2 - n_delay_te1
        gap_te2 = n_gap_te2 / i_raster_time
        gap_te1 = 0

    #%% --- 9 - b-zero acquisition
    for r in range(reps):
        for d in range(nb0s):
            for nshot in range(Nshots):
                for s in range(n_slices):
                    # Fat saturation
                    if fatsat_enable:
                        seq.add_block(rf_fs, gz_fs)

                    # RF90
                    rf.freq_offset = gz.amplitude * slice_thickness * (
                        s - (n_slices - 1) / 2)
                    seq.add_block(rf, gz)
                    seq.add_block(gz_reph)

                    # Delay for RF180
                    seq.add_block(make_delay(delay_te1))

                    # RF180
                    seq.add_block(gz_spoil)
                    rf180.freq_offset = gz180.amplitude * slice_thickness * (
                        s - (n_slices - 1) / 2)
                    seq.add_block(rf180, gz180)
                    seq.add_block(gz_spoil)

                    # Delay for spiral
                    seq.add_block(make_delay(delay_te2))

                    # Read k-space
                    # Imaging Gradient waveforms
                    gx = make_arbitrary_grad(channel='x',
                                             waveform=np.squeeze(
                                                 G[:, nshot].real),
                                             system=system)
                    gy = make_arbitrary_grad(channel='y',
                                             waveform=np.squeeze(
                                                 G[:, nshot].imag),
                                             system=system)
                    seq.add_block(gx, gy, adc)

                    # Make the spiral finish in zero - I use pre_time because I know for sure it's long enough.
                    # Furthermore, this is after readout and TR is supposed to be long.
                    amp_x = [G[:, nshot].real[-1], 0]
                    amp_y = [G[:, nshot].imag[-1], 0]
                    gx_to_zero = make_extended_trapezoid(channel='x',
                                                         amplitudes=amp_x,
                                                         times=[0, pre_time],
                                                         system=system)
                    gy_to_zero = make_extended_trapezoid(channel='y',
                                                         amplitudes=amp_y,
                                                         times=[0, pre_time],
                                                         system=system)
                    seq.add_block(gx_to_zero, gy_to_zero)

                    seq.add_block(gx_crush, gz_crush)

                    # Wait TR
                    if tr_delay > 0:
                        seq.add_block(make_delay(tr_delay))

    #%% --- 9 - DWI acquisition
    for r in range(reps):
        for bv in range(1, nbvals + 1):
            for d in range(ndirs):
                for nshot in range(Nshots):
                    for s in range(n_slices):
                        # Fat saturation
                        if fatsat_enable:
                            seq.add_block(rf_fs, gz_fs)

                        # RF90
                        rf.freq_offset = gz.amplitude * slice_thickness * (
                            s - (n_slices - 1) / 2)
                        seq.add_block(rf, gz)
                        seq.add_block(gz_reph)

                        # Diffusion-weighting gradient
                        gdiffx = make_trapezoid(channel='x',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 0],
                                                duration=calc_duration(gdiff))
                        gdiffy = make_trapezoid(channel='y',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 1],
                                                duration=calc_duration(gdiff))
                        gdiffz = make_trapezoid(channel='z',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 2],
                                                duration=calc_duration(gdiff))

                        seq.add_block(gdiffx, gdiffy, gdiffz)

                        # Delay for RF180
                        seq.add_block(make_delay(gap_te1))

                        # RF180
                        seq.add_block(gz_spoil)
                        rf180.freq_offset = gz180.amplitude * slice_thickness * (
                            s - (n_slices - 1) / 2)
                        seq.add_block(rf180, gz180)
                        seq.add_block(gz_spoil)

                        # Diffusion-weighting gradient
                        seq.add_block(gdiffx, gdiffy, gdiffz)

                        # Delay for spiral
                        seq.add_block(make_delay(gap_te2))

                        # Read k-space
                        # Imaging Gradient waveforms
                        gx = make_arbitrary_grad(channel='x',
                                                 waveform=np.squeeze(
                                                     G[:, nshot].real),
                                                 system=system)
                        gy = make_arbitrary_grad(channel='y',
                                                 waveform=np.squeeze(
                                                     G[:, nshot].imag),
                                                 system=system)
                        seq.add_block(gx, gy, adc)

                        # Make the spiral finish in zero - I use pre_time because I know for sure it's long enough.
                        # Furthermore, this is after readout and TR is supposed to be long.
                        amp_x = [G[:, nshot].real[-1], 0]
                        amp_y = [G[:, nshot].imag[-1], 0]
                        gx_to_zero = make_extended_trapezoid(
                            channel='x',
                            amplitudes=amp_x,
                            times=[0, pre_time],
                            system=system)
                        gy_to_zero = make_extended_trapezoid(
                            channel='y',
                            amplitudes=amp_y,
                            times=[0, pre_time],
                            system=system)
                        seq.add_block(gx_to_zero, gy_to_zero)

                        seq.add_block(gx_crush, gz_crush)

                        # Wait TR
                        if tr_delay > 0:
                            seq.add_block(make_delay(tr_delay))

    if tPlot:
        seq.plot()

    if tReport:
        print(seq.test_report())
        seq.check_timing()

    return seq, TE, TR, fatsat_str
Example #37
0
def generate_SeqFile_EPIDiffusion(FOV, nx, ny, ns, mg, ms, reps, st, tr, fA,
                                  b_values, n_dirs, partialFourier, tPlot,
                                  tReport):

    #%% --- 1 - Create new Sequence Object + Parameters
    seq = Sequence()

    # =========
    # Parameters
    # =========
    i_raster_time = 100000
    assert 1 / i_raster_time == seq.grad_raster_time, "Manualy inputed inverse raster time does not match the actual value."

    # =========
    # Code parameters
    # =========
    fatsat_enable = 0  # Fat saturation
    kplot = 0

    # =========
    # Acquisition Parameters
    # =========
    fov = FOV
    Nx = nx
    Ny = ny
    n_slices = ns
    TR = tr  # Spin-Echo parameters - TR in [s]
    n_TR = math.ceil(
        TR * i_raster_time)  # Spin-Echo parameters - number of points TR
    bvalue = b_values  # b-value [s/mm2]
    nbvals = np.shape(bvalue)[0]  # b-value parameters
    ndirs = n_dirs  # b-value parameters
    slice_thickness = st  # Acquisition Parameters in [m]

    # =========
    # Partial Fourier
    # =========
    pF = partialFourier
    Nyeff = int(pF * Ny)  # Number of Ny samples acquired
    if pF is not 1:
        pF_str = "_" + str(pF) + "pF"
    else:
        pF_str = ""

    # =========
    # Gradient Scaling
    # =========
    gscl = np.zeros(nbvals + 1)
    gscl[1:] = np.sqrt(bvalue / np.max(bvalue))
    gdir, nb0s = difunc.get_dirs(ndirs)

    # =========
    # Create system
    # =========
    system = Opts(max_grad=mg,
                  grad_unit='mT/m',
                  max_slew=ms,
                  slew_unit='T/m/s',
                  rf_ringdown_time=20e-6,
                  rf_dead_time=100e-6,
                  adc_dead_time=10e-6)

    #%% --- 2 - Fat saturation
    if fatsat_enable:
        fatsat_str = "_fatsat"
        b0 = 1.494
        sat_ppm = -3.45
        sat_freq = sat_ppm * 1e-6 * b0 * system.gamma
        rf_fs, _, _ = make_gauss_pulse(flip_angle=110 * math.pi / 180,
                                       system=system,
                                       duration=8e-3,
                                       bandwidth=abs(sat_freq),
                                       freq_offset=sat_freq)
        gz_fs = make_trapezoid(channel='z',
                               system=system,
                               delay=calc_duration(rf_fs),
                               area=1 / 1e-4)
    else:
        fatsat_str = ""

    #%% --- 3 - Slice Selection
    # =========
    # Create 90 degree slice selection pulse and gradient
    # =========
    flip90 = fA * pi / 180
    rf, gz, _ = make_sinc_pulse(flip_angle=flip90,
                                system=system,
                                duration=3e-3,
                                slice_thickness=slice_thickness,
                                apodization=0.5,
                                time_bw_product=4)

    # =========
    # Refocusing pulse with spoiling gradients
    # =========
    rf180, gz180, _ = make_sinc_pulse(flip_angle=math.pi,
                                      system=system,
                                      duration=5e-3,
                                      slice_thickness=slice_thickness,
                                      apodization=0.5,
                                      time_bw_product=4)
    rf180.phase_offset = math.pi / 2
    gz_spoil = make_trapezoid(channel='z',
                              system=system,
                              area=6 / slice_thickness,
                              duration=3e-3)

    #%% --- 4 - Define other gradients and ADC events
    delta_k = 1 / fov
    k_width = Nx * delta_k
    dwell_time = seq.grad_raster_time  # Full receiver bandwith
    readout_time = Nx * dwell_time  # T_acq (acquisition time)
    flat_time = math.ceil(
        readout_time / seq.grad_raster_time) * seq.grad_raster_time
    gx = make_trapezoid(channel='x',
                        system=system,
                        amplitude=k_width / readout_time,
                        flat_time=flat_time)
    adc = make_adc(num_samples=Nx,
                   duration=readout_time,
                   delay=gx.rise_time + flat_time / 2 -
                   (readout_time - dwell_time) / 2)

    # =========
    # Pre-phasing gradients
    # =========
    pre_time = 1e-3
    gx_pre = make_trapezoid(channel='x',
                            system=system,
                            area=-gx.area / 2,
                            duration=pre_time)
    gz_reph = make_trapezoid(channel='z',
                             system=system,
                             area=-gz.area / 2,
                             duration=pre_time)
    gy_pre = make_trapezoid(
        channel='y',
        system=system,
        area=-(Ny / 2 - 0.5 - (Ny - Nyeff)) * delta_k,
        duration=pre_time
    )  # Es -0.5 y no +0.5 porque hay que pensar en areas, no en rayas!

    # =========
    # Phase blip in shortest possible time
    # =========
    gy = make_trapezoid(channel='y', system=system, area=delta_k)
    dur = math.ceil(
        calc_duration(gy) / seq.grad_raster_time) * seq.grad_raster_time

    #%% --- 5 - Obtain TE and diffusion-weighting gradient waveform
    # =========
    # Calculate some times constant throughout the process
    # =========
    duration_center = math.ceil(
        (calc_duration(gx) * (Ny / 2 + 0.5 -
                              (Ny - Nyeff)) + calc_duration(gy) *
         (Ny / 2 - 0.5 -
          (Ny - Nyeff))) / seq.grad_raster_time) * seq.grad_raster_time
    rf_center_with_delay = rf.delay + calc_rf_center(rf)[0]
    rf180_center_with_delay = rf180.delay + calc_rf_center(rf180)[0]

    # =========
    # Find minimum TE considering the readout times.
    # =========
    TE = 40e-3  # [s]
    delay_te2 = -1
    while delay_te2 <= 0:
        TE = TE + 0.02e-3  # [ms]
        delay_te2 = math.ceil((TE / 2 - calc_duration(rf180) + rf180_center_with_delay - calc_duration(gz_spoil) - \
                               calc_duration(gx_pre,
                                             gy_pre) - duration_center) / seq.grad_raster_time) * seq.grad_raster_time

    # =========
    # Find minimum TE for the target b-value
    # =========
    bvalue_tmp = 0
    while bvalue_tmp < np.max(bvalue):
        TE = TE + 2 * seq.grad_raster_time  # [ms]
        delay_te1 = math.ceil((TE / 2 - calc_duration(gz) + rf_center_with_delay - pre_time - calc_duration(gz_spoil) - \
                               rf180_center_with_delay) / seq.grad_raster_time) * seq.grad_raster_time
        delay_te2 = math.ceil((TE / 2 - calc_duration(rf180) + rf180_center_with_delay - calc_duration(gz_spoil) - \
                               calc_duration(gx_pre,
                                             gy_pre) - duration_center) / seq.grad_raster_time) * seq.grad_raster_time

        # Waveform Ramp time
        gdiff_rt = math.ceil(system.max_grad / system.max_slew /
                             seq.grad_raster_time) * seq.grad_raster_time

        # Select the shortest available time
        gdiff_delta = min(delay_te1, delay_te2)
        gdiff_Delta = math.ceil(
            (delay_te1 + 2 * calc_duration(gz_spoil) + calc_duration(gz180)) /
            seq.grad_raster_time) * seq.grad_raster_time

        gdiff = make_trapezoid(channel='x',
                               system=system,
                               amplitude=system.max_grad,
                               duration=gdiff_delta)

        # delta only corresponds to the rectangle.
        gdiff_delta = math.ceil((gdiff_delta - 2 * gdiff_rt) /
                                seq.grad_raster_time) * seq.grad_raster_time

        bv = difunc.calc_bval(system.max_grad, gdiff_delta, gdiff_Delta,
                              gdiff_rt)
        bvalue_tmp = bv * 1e-6

    # =========
    # Show final TE and b-values:
    # =========
    print("TE:", round(TE * 1e3, 2), "ms")
    for bv in range(1, nbvals + 1):
        print(
            round(
                difunc.calc_bval(system.max_grad * gscl[bv], gdiff_delta,
                                 gdiff_Delta, gdiff_rt) * 1e-6, 2), "s/mm2")

    # =========
    # Crusher gradients
    # =========
    gx_crush = make_trapezoid(channel='x',
                              area=2 * Nx * delta_k,
                              system=system)
    gz_crush = make_trapezoid(channel='z',
                              area=4 / slice_thickness,
                              system=system)

    #%% --- 6 - Delays
    # =========
    # TR delay - Takes everything into account
    # EPI reading time:
    # Distance between the center of the RF90s must be TR
    # =========
    EPI_time = calc_duration(gx) * Nyeff + calc_duration(gy) * (Nyeff - 1)
    if fatsat_enable:
        tr_delay = math.floor(
            (TR - (TE - duration_center + EPI_time) - rf_center_with_delay - calc_duration(gx_crush, gz_crush) \
             - calc_duration(rf_fs, gz_fs)) \
            / seq.grad_raster_time) * seq.grad_raster_time
    else:
        tr_delay = math.floor(
            (TR - (TE - duration_center + EPI_time) - rf_center_with_delay - calc_duration(gx_crush, gz_crush)) \
            / seq.grad_raster_time) * seq.grad_raster_time

    # =========
    # Check TR delay time
    # =========
    assert tr_delay > 0, "Such parameter configuration needs longer TR."

    # =========
    # Delay time
    # =========

    # =========
    # Time between the gradient and the RF180. This time might be zero some times, although it is not normal.
    # =========
    gap_te1 = math.ceil((delay_te1 - calc_duration(gdiff)) /
                        seq.grad_raster_time) * seq.grad_raster_time

    # =========
    # Time between the gradient and the locate k-space gradients.
    # =========
    gap_te2 = math.ceil((delay_te2 - calc_duration(gdiff)) /
                        seq.grad_raster_time) * seq.grad_raster_time

    #%% --- 9 - b-zero acquisition
    for d in range(nb0s):
        for s in range(n_slices):
            # Fat saturation
            if fatsat_enable:
                seq.add_block(rf_fs, gz_fs)

            # RF90
            rf.freq_offset = gz.amplitude * slice_thickness * (
                s - (n_slices - 1) / 2)
            seq.add_block(rf, gz)
            seq.add_block(gz_reph)

            # Delay for RF180
            seq.add_block(make_delay(delay_te1))

            # RF180
            seq.add_block(gz_spoil)
            rf180.freq_offset = gz180.amplitude * slice_thickness * (
                s - (n_slices - 1) / 2)
            seq.add_block(rf180, gz180)
            seq.add_block(gz_spoil)

            # Delay for EPI
            seq.add_block(make_delay(delay_te2))

            # Locate k-space
            seq.add_block(gx_pre, gy_pre)

            for i in range(Nyeff):
                seq.add_block(gx, adc)  # Read one line of k-space
                if i is not Nyeff - 1:
                    seq.add_block(gy)  # Phase blip
                gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient

            seq.add_block(gx_crush, gz_crush)

            # Wait TR
            if tr_delay > 0:
                seq.add_block(make_delay(tr_delay))

    #%% --- 10 - DWI acquisition
    for bv in range(1, nbvals + 1):
        for d in range(ndirs):
            for s in range(n_slices):
                # Fat saturation
                if fatsat_enable:
                    seq.add_block(rf_fs, gz_fs)

                # RF90
                rf.freq_offset = gz.amplitude * slice_thickness * (
                    s - (n_slices - 1) / 2)
                seq.add_block(rf, gz)
                seq.add_block(gz_reph)

                # Diffusion-weighting gradient
                gdiffx = make_trapezoid(channel='x',
                                        system=system,
                                        amplitude=system.max_grad * gscl[bv] *
                                        gdir[d, 0],
                                        duration=calc_duration(gdiff))
                gdiffy = make_trapezoid(channel='y',
                                        system=system,
                                        amplitude=system.max_grad * gscl[bv] *
                                        gdir[d, 1],
                                        duration=calc_duration(gdiff))
                gdiffz = make_trapezoid(channel='z',
                                        system=system,
                                        amplitude=system.max_grad * gscl[bv] *
                                        gdir[d, 2],
                                        duration=calc_duration(gdiff))

                seq.add_block(gdiffx, gdiffy, gdiffz)

                # Delay for RF180
                seq.add_block(make_delay(gap_te1))

                # RF180
                seq.add_block(gz_spoil)
                rf180.freq_offset = gz180.amplitude * slice_thickness * (
                    s - (n_slices - 1) / 2)
                seq.add_block(rf180, gz180)
                seq.add_block(gz_spoil)

                # Diffusion-weighting gradient
                seq.add_block(gdiffx, gdiffy, gdiffz)

                # Delay for EPI
                seq.add_block(make_delay(gap_te2))

                # Locate k-space
                seq.add_block(gx_pre, gy_pre)

                for i in range(Nyeff):
                    seq.add_block(gx, adc)  # Read one line of k-space
                    if i is not Nyeff - 1:
                        seq.add_block(gy)  # Phase blip
                    gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient

                seq.add_block(gx_crush, gz_crush)

                # Wait TR
                if tr_delay > 0:
                    seq.add_block(make_delay(tr_delay))

    if tPlot:
        seq.plot()

    if tReport:
        print(seq.test_report())
        seq.check_timing()

    return seq, TE, TR, fatsat_str
Example #38
0
 def predict(self, X):
     prob_matrix = np.zeros((np.shape(X)[0], self.num_classes))
     for i in range(self.num_classes):
         prob_matrix[:, i] = self._softmax_score(X, i)
     print(prob_matrix)
     return np.max(X, axis=1)
Example #39
0
 def __init__(self, data, thr=0.2):
     self.dtype = data.dtype
     self.shape = np.shape(data)
     self.data = data.astype(np.float)
     self.thr = thr
 
                
             batch_x_val = batch_x_val.cpu().data.numpy()
           
             batch_y_val = batch_y_val.cpu().data.numpy() 
             output_val = output_val.cpu().data.numpy()            
             output_val = np.moveaxis(output_val, 1, -1)       
             seg_val = np.argmax(output_val[0], axis=-1)  
               
             input_3D = batch_x_val[0][0]
             seed_3D = batch_x_val[0][1]
             truth_3D = batch_y_val[0]
             seg_3D = seg_val
             intersect = truth_3D + seg_3D
            
             combined = np.zeros(np.shape(seg_3D))
            
             combined[truth_3D > 0] = 1
             combined[seg_3D > 0] = 2
             combined[intersect > 1] = 3
            
             # plt.figure();
             # ma = np.amax(combined, axis=0)
             # plt.imshow(ma, cmap='magma')
            
            
            
             """ Get sklearn metric """
             from sklearn.metrics import jaccard_score
             #jacc_new = jaccard_score(truth_3D.flatten(), seg_3D.flatten())
            
Example #41
0
def main():
  # MAIN -- TRADES + EMCEE
  # READ COMMAND LINE ARGUMENTS
  cli = get_args()

  # STARTING TIME
  start = time.time()

  # RENAME 
  working_path = cli.full_path
  nthreads=cli.nthreads
  np.random.RandomState(cli.seed)
  
  # INITIALISE TRADES WITH SUBROUTINE WITHIN TRADES_LIB -> PARAMETER NAMES, MINMAX, INTEGRATION ARGS, READ DATA ...
  pytrades_lib.pytrades.initialize_trades(working_path, cli.sub_folder, nthreads)

  # RETRIEVE DATA AND VARIABLES FROM TRADES_LIB MODULE
  
  #global n_bodies, n_planets, ndata, npar, nfit, dof, inv_dof
  n_bodies = pytrades_lib.pytrades.n_bodies # NUMBER OF TOTAL BODIES OF THE SYSTEM
  n_planets = n_bodies - 1 # NUMBER OF PLANETS IN THE SYSTEM
  ndata = pytrades_lib.pytrades.ndata # TOTAL NUMBER OF DATA AVAILABLE
  npar  = pytrades_lib.pytrades.npar # NUMBER OF TOTAL PARAMATERS ~n_planets X 6
  nfit  = pytrades_lib.pytrades.nfit # NUMBER OF PARAMETERS TO FIT
  nfree  = pytrades_lib.pytrades.nfree # NUMBER OF FREE PARAMETERS (ie nrvset)
  dof   = pytrades_lib.pytrades.dof # NUMBER OF DEGREES OF FREEDOM = NDATA - NFIT
  global inv_dof
  #inv_dof = np.float64(1.0 / dof)
  inv_dof = pytrades_lib.pytrades.inv_dof

  # READ THE NAMES OF THE PARAMETERS FROM THE TRADES_LIB AND CONVERT IT TO PYTHON STRINGS
  #reshaped_names = pytrades_lib.pytrades.parameter_names.reshape((10,nfit), order='F').T
  #parameter_names = [''.join(reshaped_names[i,:]).strip() for i in range(0,nfit)]
  
  #parameter_names = anc.convert_fortran2python_strarray(pytrades_lib.pytrades.parameter_names, nfit, str_len=10)
  #trades_names = anc.convert_fortran2python_strarray(pytrades_lib.pytrades.parameter_names,
                                                     #nfit, str_len=10
                                                    #)
  ##parameter_names = anc.trades_names_to_emcee(trades_names)
  str_len = pytrades_lib.pytrades.str_len
  temp_names = pytrades_lib.pytrades.get_parameter_names(nfit,str_len)
  trades_names = anc.convert_fortran_charray2python_strararray(temp_names)
  parameter_names = trades_names
  
  
  if(cli.trades_previous is not None):
    temp_names, trades_parameters = anc.read_fitted_file(cli.trades_previous)
    if(nfit != np.shape(trades_parameters)[0]):
      anc.print_both(' NUMBER OF PARAMETERS (%d) IN TRADES-PREVIOUS FILE DOES NOT' \
                 'MATCH THE CURRENT CONFIGURATION nfit=%d\nSTOP' \
                 %(np.shape(trades_parameters)[0], nfit)
                )
      sys.exit()
    del temp_names
  else:
    # INITIAL PARAMETER SET (NEEDED ONLY TO HAVE THE PROPER ARRAY/VECTOR)
    #fitting_parameters = pytrades_lib.pytrades.fitting_parameters
    trades_parameters = pytrades_lib.pytrades.fitting_parameters
  
  # save initial_fitting parameters into array  
  original_fit_parameters = trades_parameters.copy()
  #fitting_parameters = anc.e_to_sqrte_fitting(trades_parameters, trades_names)
  fitting_parameters = trades_parameters
  
  trades_minmax = pytrades_lib.pytrades.parameters_minmax # PARAMETER BOUNDARIES
  parameters_minmax = trades_minmax.copy()
  #parameters_minmax[:,0] = anc.e_to_sqrte_fitting(parameters_minmax[:,0], trades_names)
  #parameters_minmax[:,1] = anc.e_to_sqrte_fitting(parameters_minmax[:,1], trades_names)

  # RADIAL VELOCITIES SET
  n_rv = pytrades_lib.pytrades.nrv
  n_set_rv = pytrades_lib.pytrades.nrvset

  # TRANSITS SET
  n_t0 = pytrades_lib.pytrades.nt0
  #n_t0_sum = np.sum(n_t0)
  n_t0_sum = pytrades_lib.pytrades.ntts
  n_set_t0 = 0
  for i in range(0, n_bodies):
    #if (np.sum(n_t0[i]) > 0): n_set_t0 += 1
    if (n_t0[i] > 0): n_set_t0 += 1

  # compute global constant for the loglhd
  global ln_err_const

  #try:
    ## fortran variable RV in python will be rv!!!
    #e_RVo = np.array(pytrades_lib.pytrades.ervobs[:], dtype=np.float64)
  #except:
    #e_RVo = np.array([0.], dtype=np.float64)
  #try:
    #e_T0o = np.array(pytrades_lib.pytrades.et0obs[:,:], dtype=np.float64).reshape((-1))
  #except:
    #e_T0o = np.array([0.], dtype=np.float64)
  #ln_err_const = anc.compute_ln_err_const(dof, e_RVo, e_T0o, cli.ln_flag)
  ln_err_const = pytrades_lib.pytrades.ln_err_const

  # SET EMCEE PARAMETERS:
  nwalkers, nruns, nsave, npost = get_emcee_arguments(cli,nfit)

  # INITIALISE SCRIPT FOLDER/LOG FILE
  working_folder, run_log, of_run = init_folder(working_path, cli.sub_folder)

  anc.print_both('',of_run)
  anc.print_both(' ======== ',of_run)
  anc.print_both(' pyTRADES' ,of_run)
  anc.print_both(' ======== ',of_run)
  anc.print_both('',of_run)
  anc.print_both(' WORKING PATH = %s' %(working_path),of_run)
  anc.print_both(' NUMBER OF THREADS = %d' %(nthreads),of_run)
  anc.print_both(' dof = ndata(%d) - nfit(%d) - nfree(%d) = %d' %(ndata, nfit, nfree, dof),of_run)
  anc.print_both(' Total N_RV = %d for %d set(s)' %(n_rv, n_set_rv),of_run)
  anc.print_both(' Total N_T0 = %d for %d out of %d planet(s)' %(n_t0_sum, n_set_t0, n_planets),of_run)
  anc.print_both(' %s = %.7f' %('log constant error = ', ln_err_const),of_run)
  anc.print_both(' %s = %.7f' %('IN FORTRAN log constant error = ', pytrades_lib.pytrades.ln_err_const),of_run)
  anc.print_both(' seed = %s' %(str(cli.seed)), of_run)

  if(cli.trades_previous is not None):
    anc.print_both('\n ******\n INITIAL FITTING PARAMETERS FROM PREVIOUS' \
              ' TRADES-EMCEE SIM IN FILE:\n %s\n ******\n' %(cli.trades_previous),
              of_run
              )
    
  anc.print_both(' ORIGINAL PARAMETER VALUES -> 0000', of_run)
  fitness_0000, lgllhd_0000, check_0000 = pytrades_lib.pytrades.write_summary_files(0, original_fit_parameters)
  anc.print_both(' ', of_run)
  #anc.print_both(' TESTING LNPROB_SQ ...', of_run)
  
  lgllhd_zero = lnprob(trades_parameters)
  #lgllhd_sq_zero = lnprob(fitting_parameters, parameter_names)

  anc.print_both(' ', of_run)
  anc.print_both(' %15s %23s %23s' %('trades_names', 'original_trades', 'trades_par'), of_run)
  for ifit in range(0, nfit):
    anc.print_both(' %15s %23.16e %23.16e' %(trades_names[ifit], original_fit_parameters[ifit], trades_parameters[ifit]), of_run)
  anc.print_both(' ', of_run)
  anc.print_both(' %15s %23.16e %23.16e' %('lnprob', lgllhd_0000,lgllhd_zero), of_run)
  anc.print_both(' ', of_run)
  
  # INITIALISES THE WALKERS
  if(cli.emcee_previous is not None):
    anc.print_both(' Use a previous emcee simulation: %s' %(cli.emcee_previous), of_run)
    last_p0, old_nwalkers, last_done = anc.get_last_emcee_iteration(cli.emcee_previous, nwalkers)
    if(not last_done):
      anc.print_both('**STOP: USING A DIFFERENT NUMBER OF WALKERS (%d) W.R.T. PREVIOUS EMCEE SIMULATION (%d).' %(nwalkers, old_nwalkers), of_run)
      sys.exit()
    p0 = last_p0
  else:
    p0 = compute_initial_walkers(nfit, nwalkers, fitting_parameters, parameters_minmax, parameter_names, cli.delta_sigma, of_run)

  anc.print_both(' emcee chain: nwalkers = %d nruns = %d' %(nwalkers, nruns), of_run)
  anc.print_both(' sampler ... ',of_run)
  
  # old version with threads
  #sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob, threads=nthreads)
  #sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob_sq, threads=nthreads, args=[parameter_names])
  
  threads_pool = emcee.interruptible_pool.InterruptiblePool(nthreads)
  sampler = emcee.EnsembleSampler(nwalkers, nfit, lnprob, pool=threads_pool)
  
  anc.print_both(' ready to go', of_run)
  anc.print_both(' with nsave = %s' %(str(nsave)), of_run)
  sys.stdout.flush()

  #sys.exit()

  if (nsave != False):
    # save temporary sampling during emcee every nruns*10%
    #if(os.path.exists(os.path.join(working_folder, 'emcee_temp.hdf5')) and os.path.isfile(os.path.join(working_folder, 'emcee_temp.hdf5'))):
      #os.remove(os.path.join(working_folder, 'emcee_temp.hdf5'))
    if(os.path.exists(os.path.join(working_folder, 'emcee_summary.hdf5')) and os.path.isfile(os.path.join(working_folder, 'emcee_summary.hdf5'))):
      os.remove(os.path.join(working_folder, 'emcee_summary.hdf5'))
    f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'a')
    f_hdf5.create_dataset('parameter_names', data=parameter_names, dtype='S10')
    f_hdf5.create_dataset('boundaries', data=parameters_minmax, dtype=np.float64)
    temp_dset = f_hdf5.create_dataset('chains', (nwalkers, nruns, nfit), dtype=np.float64)
    f_hdf5['chains'].attrs['nwalkers'] = nwalkers
    f_hdf5['chains'].attrs['nruns'] = nruns
    f_hdf5['chains'].attrs['nfit'] = nfit
    f_hdf5['chains'].attrs['nfree'] = nfree
    temp_lnprob = f_hdf5.create_dataset('lnprobability', (nwalkers, nruns), dtype=np.float64)
    temp_lnprob.attrs['ln_err_const'] = ln_err_const
    temp_acceptance = f_hdf5.create_dataset('acceptance_fraction', data=np.zeros((nfit)), dtype=np.float64)
    temp_acor = f_hdf5.create_dataset('autocor_time', data=np.zeros((nfit)), dtype=np.float64)
    f_hdf5.close()
    pos = p0
    nchains = int(nruns/nsave)
    state=None
    anc.print_both(' Running emcee with temporary saving', of_run)
    sys.stdout.flush()
    for i in range(0, nchains):
      anc.print_both('', of_run)
      anc.print_both(' iter: %6d ' %(i+1), of_run)
      aaa = i*nsave
      bbb = aaa+nsave
      pos, prob, state = sampler.run_mcmc(pos, N=nsave, rstate0=state)
      anc.print_both('completed %d steps of %d' %(bbb, nruns), of_run)
      f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'a')
      temp_dset = f_hdf5['chains'] #[:,:,:]
      temp_dset[:,aaa:bbb,:] = sampler.chain[:, aaa:bbb, :]
      temp_dset.attrs['completed_steps'] = bbb

      temp_lnprob = f_hdf5['lnprobability'] #[:,:]
      temp_lnprob[:, aaa:bbb] = sampler.lnprobability[:, aaa:bbb]
      shape_lnprob = sampler.lnprobability.shape
      
      acceptance_fraction = sampler.acceptance_fraction
      temp_acceptance = f_hdf5['acceptance_fraction']
      temp_acceptance = acceptance_fraction
      #f_hdf5.create_dataset('acceptance_fraction', data=acceptance_fraction, dtype=np.float64)
      mean_acceptance_fraction = np.mean(acceptance_fraction)
    
      #temp_chains_T = np.zeros((bbb, nwalkers, nfit))
      #for ifit in range(0,nfit):
        #temp_chains_T[:,:,ifit] = sampler.chain[:, :bbb, ifit].T
      #acor_time = anc.compute_autocor_time(temp_chains_T, walkers_transposed=True)
      acor_time = anc.compute_acor_time(sampler, steps_done=bbb)
      temp_acor = f_hdf5['autocor_time']
      temp_acor[...] = acor_time
      
      #f_hdf5.create_dataset('autocor_time', data=np.array(acor_temp, dtype=np.float64), dtype=np.float64)
      #f_hdf5.create_dataset('autocor_time', data=np.array(sampler.acor, dtype=np.float64), dtype=np.float64) # not working
      #print 'aaa = %6d bbb = %6d -> sampler.lnprobability.shape = (%6d , %6d)' %(aaa, bbb, shape_lnprob[0], shape_lnprob[1])
      f_hdf5.close()
      sys.stdout.flush()
    anc.print_both('', of_run)
    anc.print_both('...done with saving temporary total shape = %s' %(str(np.shape(sampler.chain))), of_run)
    anc.print_both('', of_run)
    sys.stdout.flush()

  # RUN EMCEE AND RESET AFTER REMOVE BURN-IN
  #pos, prob, state = sampler.run_mcmc(p0, npost)
  #sampler.reset()
  #sampler.run_mcmc(pos, nruns, rstate0=state)
  else:
    # GOOD COMPLETE SINGLE RUNNING OF EMCEE, WITHOUT REMOVING THE BURN-IN
    anc.print_both(' Running full emcee ...', of_run)
    sys.stdout.flush()
    sampler.run_mcmc(p0, nruns)
    anc.print_both('done', of_run)
    anc.print_both('', of_run)
    sys.stdout.flush()
    flatchains = sampler.chain[:, :, :].reshape((nwalkers*nruns, nfit)) # full chain values
    acceptance_fraction = sampler.acceptance_fraction
    mean_acceptance_fraction = np.mean(acceptance_fraction)
    #autocor_time = sampler.acor
    temp_chains_T = np.zeros((bbb, nwalkers, nfit))
    for ifit in range(0,nfit):
      temp_chains_T[:,:,ifit] = sampler.chain[:, :, ifit].T
    #acor_time = anc.compute_autocor_time(temp_chains_T, walkers_transposed=True)
    acor_time = anc.compute_acor_time(sampler)
    lnprobability = sampler.lnprobability
    # save chains with original shape as hdf5 file
    f_hdf5 = h5py.File(os.path.join(working_folder, 'emcee_summary.hdf5'), 'w')
    f_hdf5.create_dataset('chains', data=sampler.chain, dtype=np.float64)
    f_hdf5['chains'].attrs['nwalkers'] = nwalkers
    f_hdf5['chains'].attrs['nruns'] = nruns
    f_hdf5['chains'].attrs['nfit'] = nfit
    f_hdf5['chains'].attrs['nfree'] = nfree
    f_hdf5['chains'].attrs['completed_steps'] = nruns
    f_hdf5.create_dataset('parameter_names', data=parameter_names, dtype='S10')
    f_hdf5.create_dataset('boundaries', data=parameters_minmax, dtype=np.float64)
    f_hdf5.create_dataset('acceptance_fraction', data=acceptance_fraction, dtype=np.float64)
    f_hdf5.create_dataset('autocor_time', data=acor_time, dtype=np.float64)
    f_hdf5.create_dataset('lnprobability', data=lnprobability, dtype=np.float64)
    f_hdf5['lnprobability'].attrs['ln_err_const'] = ln_err_const
    f_hdf5.close()

  anc.print_both(" Mean_acceptance_fraction should be between [0.25-0.5] = %.6f" %(mean_acceptance_fraction), of_run)
  anc.print_both('', of_run)

  # close the pool of threads
  threads_pool.close()
  threads_pool.terminate()
  threads_pool.join()

  anc.print_both('COMPLETED EMCEE', of_run)

  elapsed = time.time() - start
  elapsed_d, elapsed_h, elapsed_m, elapsed_s = anc.computation_time(elapsed)

  anc.print_both('', of_run)
  anc.print_both(' pyTRADES: EMCEE FINISHED in %2d day %02d hour %02d min %.2f sec - bye bye' %(int(elapsed_d), int(elapsed_h), int(elapsed_m), elapsed_s), of_run)
  anc.print_both('', of_run)
  of_run.close()
  pytrades_lib.pytrades.deallocate_variables()

  return
Example #42
0
def execute(vec, dx, dy, quan=1.0):
    """Calculate the divergence of Vector from dx and dy.\n\
    for each output cell,              \n
        u = Vector[0]                  \n
        v = Vector[1]                  \n
        diff_u = u[i+1,j]*q[i+1,j] - u[i-1,j]*q[i-1,j]   \n
        diff_v = u[i,j+1]*q[i,j+1] - u[i,j-1]*q[i,j-1]   \n
        dudx = diff_u/dx[i,j]          \n
        dvdy = diff_v/dy[i,j]          \n
        diverg[i,j] = (dudx + dvdy)/2  \n 
        """
    vec_U, vec_V = vec

    vshape = shape(vec_U)

    if len(vshape) < 2:
        raise TypeError, "Divergence: Vector must be at least 2-D."

    if vshape[0] < 3:
        raise ValueError, "Divergence: Vector's first dimension must be at least 3."

    if vshape[1] < 3:
        raise ValueError, "Divergence: Vector's second dimension must be at least 3."

    rslt = empty(vshape, dtype=vec_U.dtype)

    rslt[0, :] = NaN
    rslt[-1, :] = NaN
    rslt[1:-1, 0] = NaN
    rslt[1:-1, -1] = NaN

    if shape(quan) == ():
        quan_rl = quan
        quan_tb = quan
    else:
        quan_rl = quan[1:-1]
        quan_tb = quan[:, 1:-1]

    QU = quan_rl * vec_U[1:-1, :]
    QV = quan_tb * vec_V[:, 1:-1]
    diff_U = QU[:, 2:] - QU[:, 0:-2]
    diff_V = QV[0:-2, :] - QV[2:, :]

    # if dx is a constant or one-element array, just divide by it.
    # otherwise, divide each cell in diff_U by the corresponding cell in dx.
    dxshape = shape(dx)
    ldxs = len(dxshape)
    if ldxs == 0 or sum(dxshape) == ldxs:
        dudx = diff_U / dx
    elif dxshape == vshape:
        dudx = diff_U / dx[1:-1, 1:-1]
    else:
        raise TypeError, "Divergence: dx must be a scalar or the same shape as Vector."

    # if dy is a constant or one-element array, just divide by it.
    # otherwise, divide each cell in diff_V by the corresponding cell in dy.
    dyshape = shape(dy)
    ldys = len(dyshape)
    if ldys == 0 or sum(dyshape) == ldys:
        dvdy = diff_V / dy
    elif dyshape == vshape:
        dvdy = diff_V / dy[1:-1, 1:-1]
    else:
        raise TypeError, "Divergence: dy must be a scalar or the same shape as Vector."

    rslt[1:-1, 1:-1] = (dudx + dvdy) / 2

    return rslt
   23.08412361,  -29.4766407 ,  -43.37115479,   23.08412361,  -29.4766407 ,
  -43.37115479,   23.08412361,  -29.4766407 ,  -43.37115479,   23.08412361,
  -29.4766407 ,  -43.37115479,   23.08412361,   36.13037109,   -3.3407526 ,
  -10.48023796],
[ -19.74851799,  166.00675964,   66.80673981,   56.04732895,    0.34365317,
   11.8905611 ,   56.04732895,    0.34365317,   11.8905611 ,   56.04732895,
    0.34365317,   11.8905611 ,   56.04732895,    0.34365317,   11.8905611 ,
   56.04732895,    0.34365317,   11.8905611 ,  -29.4766407 ,  -43.37115479,
   23.08412361,  -29.4766407 ,  -43.37115479,   23.08412361,  -29.4766407 ,
  -43.37115479,   23.08412361,  -29.4766407 ,  -43.37115479,   23.08412361,
  -29.4766407 ,  -43.37115479,   23.08412361,   35.89316177,   -4.19855595,
   -9.6547451 ],
[ -17.18582535,  166.63139343,   66.30813599,   55.81494522,    0.57037091,
   12.9295435 ,   55.81494522,    0.57037091,   12.9295435 ,   55.81494522,
    0.57037091,   12.9295435 ,   55.81494522,    0.57037091,   12.9295435 ,
   55.81494522,    0.57037091,   12.9295435 ,  -29.48789024,  -43.1723938 ,
   23.43960762,  -29.48789024,  -43.1723938 ,   23.43960762,  -29.48789024,
  -43.1723938 ,   23.43960762,  -29.48789024,  -43.1723938 ,   23.43960762,
  -29.48789024,  -43.1723938 ,   23.43960762,   35.89316177,   -4.19855595,
   -9.6547451 ],
[ -17.18582535,  166.63139343,   66.30813599,   55.81494522,    0.57037091,
   12.9295435 ,   55.81494522,    0.57037091,   12.9295435 ,   55.81494522,
    0.57037091,   12.9295435 ,   55.81494522,    0.57037091,   12.9295435 ,
   55.81494522,    0.57037091,   12.9295435 ,  -29.48789024,  -43.1723938 ,
   23.43960762,  -29.48789024,  -43.1723938 ,   23.43960762,  -29.48789024,
  -43.1723938 ,   23.43960762,  -29.48789024,  -43.1723938 ,   23.43960762,
  -29.48789024,  -43.1723938 ,   23.43960762,   36.07330322,   -4.09444809,
   -9.29074287]
], "float32")
print np.shape(baseball)
file_path="/Users/thomasaref/Dropbox/Current stuff/Logbook/TA210715A46_cooldown1/Data_1008/TA46_refll_fluxpowswp_4p2GHz4pGHz.hdf5"

with File(file_path, 'r') as f:
    print f["Traces"].keys()
    print f.attrs["comment"]
    print f["Instrument config"].keys()
    #ctl_frq=f["Instrument config"]['PXI Aeroflex 302x Signal Generator - GPIB: PXI3::13::INSTR, PXI SigGen at localhost'].attrs["Frequency"] #["Power"]
    probe_frq=f["Instrument config"]['Rohde&Schwarz Network Analyzer - IP: 169.254.107.192,  at localhost'].attrs["Start frequency"]
    probe_pwr=f["Instrument config"]['Rohde&Schwarz Network Analyzer - IP: 169.254.107.192,  at localhost'].attrs["Output power"]

    #print probe_frq, probe_pwr, ctl_frq        
    print f["Data"]["Channel names"][:]
    Magvec=f["Traces"]["Rohde&Schwarz Network Analyzer - S12"]#[:]
    data=f["Data"]["Data"]
    #pwr2=data[:,0,:].astype(float64)
    print shape(data)

    yoko=data[:,0,0].astype(float64)
    pwr=data[0,1,:].astype(float64)
    print pwr
    fstart=f["Traces"]['Rohde&Schwarz Network Analyzer - S12_t0dt'][0][0]
    fstep=f["Traces"]['Rohde&Schwarz Network Analyzer - S12_t0dt'][0][1]
    print shape(Magvec)
    sm=shape(Magvec)[0]
    sy=shape(data)
    s=(sm, sy[0], sy[2]) 
    print s
    Magcom=Magvec[:,0,:]+1j*Magvec[:,1,:]
    Magcom=reshape(Magcom, s, order="F")
    freq=linspace(fstart, fstart+fstep*(sm-1), sm)
#    
Example #45
0
    def err_band_sz1(self,
                     orth=False,
                     svar=False,
                     repl=1000,
                     signif=0.05,
                     seed=None,
                     burn=100,
                     component=None):
        """
        IRF Sims-Zha error band method 1. Assumes symmetric error bands around
        mean.

        Parameters
        ----------
        orth : bool, default False
            Compute orthogonalized impulse responses
        repl : int, default 1000
            Number of MC replications
        signif : float (0 < signif < 1)
            Significance level for error bars, defaults to 95% CI
        seed : int, default None
            np.random seed
        burn : int, default 100
            Number of initial simulated obs to discard
        component : neqs x neqs array, default to largest for each
            Index of column of eigenvector/value to use for each error band
            Note: period of impulse (t=0) is not included when computing
            principle component

        References
        ----------
        Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
        Response". Econometrica 67: 1113-1155.
        """

        model = self.model
        periods = self.periods
        irfs = self._choose_irfs(orth, svar)
        neqs = self.neqs
        irf_resim = model.irf_resim(orth=orth,
                                    repl=repl,
                                    steps=periods,
                                    seed=seed,
                                    burn=burn)
        q = util.norm_signif_level(signif)

        W, eigva, k = self._eigval_decomp_SZ(irf_resim)

        if component is not None:
            if np.shape(component) != (neqs, neqs):
                raise ValueError("Component array must be " + str(neqs) +
                                 " x " + str(neqs))
            if np.argmax(component) >= neqs * periods:
                raise ValueError(
                    "Atleast one of the components does not exist")
            else:
                k = component

        # here take the kth column of W, which we determine by finding the largest eigenvalue of the covaraince matrix
        lower = np.copy(irfs)
        upper = np.copy(irfs)
        for i in range(neqs):
            for j in range(neqs):
                lower[1:, i,
                      j] = irfs[1:, i, j] + W[i, j, :, k[i, j]] * q * np.sqrt(
                          eigva[i, j, k[i, j]])
                upper[1:, i,
                      j] = irfs[1:, i, j] - W[i, j, :, k[i, j]] * q * np.sqrt(
                          eigva[i, j, k[i, j]])

        return lower, upper
Example #46
0
def average_y_over_kxky(y_vs_kxky, vec_time, t_range, interpolate, vec_kx,
                        vec_ky):
    from stellapy.decorators.verbose_wrapper import indent
    # phi2_vs_kx_ky has dimensions (ntime, nakx, naky)
    # Here, we remove all values of phi2_vs_kx_ky of times out of the selected interval.
    y_to_avrg = y_vs_kxky[(vec_time > t_range[0]) &
                          (vec_time < t_range[1]), :, :]

    # Each tile is the average of |phi(kx,ky)^2| over the time range
    y_avrg = np.nanmean(y_to_avrg, axis=0)

    # Remove broken tiles
    print(
        indent,
        'Warning: all tiles with phi2<E-30 or phi2>100 are removed and shown in black in phi'
        + str(np.shape(y_avrg)) + '.')
    print(indent, '   Tiles lower than E-30 are at:',
          np.argwhere(y_avrg[:, :] < 1.E-30))
    print(indent, '   Their values are:', y_avrg[y_avrg[:, :] < 1.E-30])
    print(indent, '   Tiles bigger than 100 are at:  ',
          np.argwhere(y_avrg[:, :] > 100))
    print(indent, '   Their values are:', y_avrg[y_avrg[:, :] > 100])
    filter_low = y_avrg[:, :] < 1.E-30  # kx,ky = 0,0 is 2.58485853e-32
    filter_high = y_avrg[:, :] > 100  # kx,ky = left right,0 is 28

    # Interpolate the data
    if interpolate:
        y_avrg[filter_low] = np.nan
        y_avrg[filter_high] = np.nan
        for index in np.argwhere(
                np.isnan(y_avrg)):  # At ky,kx=0,0 the values are nan's
            if (index[0] <= 27 or index[0]
                    == len(y_avrg[:, 1]) - 1) and (not index[0] == 0):
                y_avrg[index[0], index[1]] = y_avrg[index[0] - 1, index[1]]
            if (index[0] >= 28 or index[0]
                    == 0) and (not index[0] == len(y_avrg[:, 1]) - 1):
                y_avrg[index[0], index[1]] = y_avrg[index[0] + 1, index[1]]
        function = interp2d(vec_ky, vec_kx, y_avrg, kind='linear')
        xnew = np.linspace(vec_ky[0], vec_ky[-1],
                           int(len(vec_ky)) * interpolate)
        ynew = np.linspace(vec_kx[0], vec_kx[-1],
                           int(len(vec_kx)) * interpolate)
        y_avrg_interp = function(xnew, ynew)
        vec_ky_interp, vec_kx_interp = np.meshgrid(xnew, ynew)
        vec_ky_interp = vec_ky_interp[0, :]
        vec_kx_interp = vec_kx_interp[:, 0]

    # Remove broken tiles
    if interpolate:
        y_avrg_interp[np.repeat(np.repeat(filter_low, interpolate, axis=1),
                                interpolate,
                                axis=0)] = np.nan
        y_avrg_interp[np.repeat(np.repeat(filter_high, interpolate, axis=1),
                                interpolate,
                                axis=0)] = np.nan
    if not interpolate:
        y_avrg[filter_low] = np.nan
        y_avrg[filter_high] = np.nan

    if not interpolate: return vec_kx, vec_ky, y_avrg
    if interpolate: return vec_kx_interp, vec_ky_interp, y_avrg_interp
Example #47
0
def main():
    """
    Read input video and process it, the output video will be exported output_video path
     which can be set by input arguments.
    Example: python inference_video.py --config configs/config.json --input_video_path data/video/sample.mov
     --output_video data/videos/output.avi
    """
    argparse = ArgumentParser()
    argparse.add_argument('--config', type=str, help='json config file path')
    argparse.add_argument('--input_video_path',
                          type=str,
                          help='the path of input video',
                          default='')
    argparse.add_argument('--output_video',
                          type=str,
                          help='the name of output video file',
                          default='face_mask_output.avi')
    args = argparse.parse_args()

    config_path = args.config
    cfg = Config(path=config_path)

    if args.input_video_path == '':
        input_path = cfg.APP_VIDEO_PATH
    else:
        input_path = args.input_video_path

    print("INFO: Input video is: ", input_path)
    output_path = args.output_video
    file_name_size = len(output_path.split('/')[-1])
    output_dir = output_path[:-file_name_size]

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    print("INFO: The output video will be exported at: ", output_path)

    detector_input_size = (cfg.DETECTOR_INPUT_SIZE[0],
                           cfg.DETECTOR_INPUT_SIZE[1], 3)
    classifier_img_size = (cfg.CLASSIFIER_INPUT_SIZE,
                           cfg.CLASSIFIER_INPUT_SIZE, 3)

    device = cfg.DEVICE
    detector = None
    classifier = None
    output_vidwriter = None

    if device == "x86":
        from libs.detectors.x86.detector import Detector
        from libs.classifiers.x86.classifier import Classifier

    elif device == "EdgeTPU":
        from libs.detectors.edgetpu.detector import Detector
        from libs.classifiers.edgetpu.classifier import Classifier
    elif device == "Jetson":
        from libs.detectors.jetson.detector import Detector
        from libs.classifiers.jetson.classifier import Classifier
    else:
        raise ValueError('Not supported device named: ', device)

    detector = Detector(cfg)
    classifier_model = Classifier(cfg)
    input_cap = cv.VideoCapture(input_path)

    print("INFO: Start inferencing")
    frame_id = 0
    while (input_cap.isOpened()):
        ret, raw_img = input_cap.read()
        if output_vidwriter is None:
            output_vidwriter = cv.VideoWriter(
                output_path, cv.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                (raw_img.shape[1], raw_img.shape[0]))
            height, width = raw_img.shape[:2]

        if ret == False:
            break
        _, cv_image = input_cap.read()
        if np.shape(cv_image) != ():
            resized_image = cv.resize(cv_image, tuple(detector_input_size[:2]))
            rgb_resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)
            objects_list = detector.inference(rgb_resized_image)
            faces = []
            cordinates = []
            cordinates_head = []
            for obj in objects_list:
                if 'bbox' in obj.keys():
                    face_bbox = obj['bbox']  # [ymin, xmin, ymax, xmax]
                    xmin, xmax = np.multiply([face_bbox[1], face_bbox[3]],
                                             width)
                    ymin, ymax = np.multiply([face_bbox[0], face_bbox[2]],
                                             height)
                    croped_face = cv_image[int(ymin):int(ymin) +
                                           (int(ymax) - int(ymin)),
                                           int(xmin):int(xmin) +
                                           (int(xmax) - int(xmin))]
                    # Resizing input image
                    croped_face = cv.resize(croped_face,
                                            tuple(classifier_img_size[:2]))
                    croped_face = cv.cvtColor(croped_face, cv.COLOR_BGR2RGB)
                    # Normalizing input image to [0.0-1.0]
                    croped_face = croped_face / 255.0
                    faces.append(croped_face)
                    cordinates.append(
                        [int(xmin), int(ymin),
                         int(xmax), int(ymax)])
                if 'bbox_head' in obj.keys():
                    head_bbox = obj['bbox_head']  # [ymin, xmin, ymax, xmax]
                    xmin, xmax = np.multiply([head_bbox[1], head_bbox[3]],
                                             width)
                    ymin, ymax = np.multiply([head_bbox[0], head_bbox[2]],
                                             height)
                    cordinates_head.append(
                        [int(xmin), int(ymin),
                         int(xmax), int(ymax)])

            faces = np.array(faces)
            face_mask_results, scores = classifier_model.inference(faces)
            for i, cor in enumerate(cordinates):
                if face_mask_results[i] == 1:
                    color = (0, 0, 255)
                elif face_mask_results[i] == 0:
                    color = (0, 255, 0)
                else:
                    color = (0, 0, 0)

                cv.rectangle(raw_img, (cor[0], cor[1]), (cor[2], cor[3]),
                             color, 2)

            for cor in cordinates_head:
                cv.rectangle(raw_img, (cor[0], cor[1]), (cor[2], cor[3]),
                             (200, 200, 200), 2)
            output_vidwriter.write(raw_img)
            print('{} Frames number are processed. {} fps'.format(
                frame_id, detector.fps))
            frame_id = frame_id + 1
        else:
            continue

    input_cap.release()
    output_vidwriter.release()
    print('INFO: Finish:) Output video is exported at: ', output_path)
Example #48
0
    def err_band_sz2(self,
                     orth=False,
                     svar=False,
                     repl=1000,
                     signif=0.05,
                     seed=None,
                     burn=100,
                     component=None):
        """
        IRF Sims-Zha error band method 2.

        This method Does not assume symmetric error bands around mean.

        Parameters
        ----------
        orth : bool, default False
            Compute orthogonalized impulse responses
        repl : int, default 1000
            Number of MC replications
        signif : float (0 < signif < 1)
            Significance level for error bars, defaults to 95% CI
        seed : int, default None
            np.random seed
        burn : int, default 100
            Number of initial simulated obs to discard
        component : neqs x neqs array, default to largest for each
            Index of column of eigenvector/value to use for each error band
            Note: period of impulse (t=0) is not included when computing
            principle component

        References
        ----------
        Sims, Christopher A., and Tao Zha. 1999. "Error Bands for Impulse
        Response". Econometrica 67: 1113-1155.
        """
        model = self.model
        periods = self.periods
        irfs = self._choose_irfs(orth, svar)
        neqs = self.neqs
        irf_resim = model.irf_resim(orth=orth,
                                    repl=repl,
                                    T=periods,
                                    seed=seed,
                                    burn=100)

        W, eigva, k = self._eigval_decomp_SZ(irf_resim)

        if component is not None:
            if np.shape(component) != (neqs, neqs):
                raise ValueError("Component array must be " + str(neqs) +
                                 " x " + str(neqs))
            if np.argmax(component) >= neqs * periods:
                raise ValueError(
                    "Atleast one of the components does not exist")
            else:
                k = component

        gamma = np.zeros((repl, periods + 1, neqs, neqs))
        for p in range(repl):
            for i in range(neqs):
                for j in range(neqs):
                    gamma[p, 1:, i,
                          j] = W[i, j, k[i, j], :] * irf_resim[p, 1:, i, j]

        gamma_sort = np.sort(gamma, axis=0)  #sort to get quantiles
        indx = round(signif / 2 * repl) - 1, round((1 - signif / 2) * repl) - 1

        lower = np.copy(irfs)
        upper = np.copy(irfs)
        for i in range(neqs):
            for j in range(neqs):
                lower[:, i, j] = irfs[:, i, j] + gamma_sort[indx[0], :, i, j]
                upper[:, i, j] = irfs[:, i, j] + gamma_sort[indx[1], :, i, j]

        return lower, upper
def histogram_enhancement(im, etype='linear2', target=None, maxCount=255):
    """
    Function to run histogram enhancement and histogram matching for a given
    image. The function returns an image after the histogram matching or
    enhancement has been performed.

    Args:
        im (array): image to be modified based on the etype or target
        etype (optional[string]): type of enhancement to perform
            Can be 'linear2', 'linear3', etc..., to do a linear enhancement
            based on the CDF.
            Can be 'equalize' to do enhancement based on a flat histogram.
            Can be 'match', to do an image or histogram (PDF) based enhancement.
        target (optional[image, histogram]): image or histogram to match against
            Can be None, to do enhancement based on other etypes.
        maxCount (optional[int]): the maximum value for a digital count.
    Returns:
        the enhanced image
    Raises:
        TypeError: if image is not a numpy ndarray
        TypeError: if target is not a numpy ndarray when performing a match
        ValueError: if etype did not contain a digit: e.g. 'linear3'
        ValueError: if etype is not 'linear', 'match', or 'equalize'
    """

    outputImage = np.zeros(np.shape(im))

    # get int type for the image (to return to that later)
    dtype = im.dtype

    # type checking, look at the above Raises section
    if (not isinstance(im, np.ndarray)):
        raise TypeError("image is not a numpy ndarray; use openCV's imread")

    # use the etype to determine type of transform
    if (not isinstance(etype, str)):
        raise TypeError("etype is not a string value")

    if (etype.find("linear") == 0):
        # linear was at index 0, so we're doing a linear transformation
        linearValue = etype.split("linear")[1]
        if (not linearValue.isdigit()):
            # check if "linear" was not followed by a digit
            raise ValueError("linear etype should contain a digit")

        # linear percentage to enhance by
        linPct = int(linearValue)

        # perform linear enhancement
        if (len(np.shape(im)) == 3):
            # color image, build special lookup table
            lut = build_color_linear_lookup_table(im, linPct, maxCount + 1)
            outputImage[:, :, 0] = lut[0][im[:, :, 0]]
            outputImage[:, :, 1] = lut[1][im[:, :, 1]]
            outputImage[:, :, 2] = lut[2][im[:, :, 2]]
        else:
            lut = build_linear_lookup_table(im, linPct, maxCount + 1)
            outputImage = lut[im]

    elif (etype == "match"):
        if (not isinstance(target, np.ndarray)):
            raise TypeError("target is not a numpy ndarray")
        else:
            # perform match enhancement
            if (len(np.shape(im)) == 3):
                # color image, build special lookup table
                lut = build_color_match_lookup_table(im, target, maxCount)
                outputImage[:, :, 0] = lut[0][im[:, :, 0]]
                outputImage[:, :, 1] = lut[1][im[:, :, 1]]
                outputImage[:, :, 2] = lut[2][im[:, :, 2]]
            else:
                lut = build_match_lookup_table(im, target, maxCount)
                outputImage = lut[im]

    elif (etype == "equalize"):
        # build pdf to match against
        equalizePDF = np.zeros(maxCount)
        equalizePDF.fill(1 / maxCount)

        # perform match enhancement
        if (len(np.shape(im)) == 3):
            # color image, build special lookup table
            lut = build_color_match_lookup_table(im, equalizePDF, maxCount)
            outputImage[:, :, 0] = lut[0][im[:, :, 0]]
            outputImage[:, :, 1] = lut[1][im[:, :, 1]]
            outputImage[:, :, 2] = lut[2][im[:, :, 2]]
        else:
            lut = build_match_lookup_table(im, equalizePDF, maxCount)
            outputImage = lut[im]

    else:
        raise (ValueError, "etype must be 'linear', 'match', or 'equalize'")

    outputImage = np.array(outputImage, dtype)
    """
    # Compare Histograms ======================================================#
    print("plot original image")
    plotImgHist(im)
    print("plot output image")
    plotImgHist(outputImage)
    """

    return outputImage
Example #50
0
import numpy as np 
import csv
import tensorflow as tf

test_abs_move = False

data = list(csv.reader(open("/home/lilach/MarketStudies/TestConverter_scaled.csv")))
data = np.array(data)
rowSize = np.shape(data)[0]
colSize = np.shape(data)[1]
print('RowSize:', rowSize, 'ColSize:', colSize)
lastRow = 50001
X = (data[1:lastRow,0:colSize-1])
Y = (data[1:lastRow,colSize-1:colSize])

X_TEST = (data[lastRow:lastRow+1000,0:colSize-1])
Y_TEST = (data[lastRow:lastRow+1000,colSize-1:colSize])


X = X.astype(np.float)
Y = (Y.astype(np.float))
if test_abs_move:
  Y = np.abs(Y)  
#print(X)
#print(Y)
tf_x = tf.constant(dtype=tf.float32,value=X)
y_true = tf.constant(dtype=tf.float32,value=Y)

#Build layers
l1 = tf.layers.Dense(units=20, activation=tf.sigmoid)
y1 = l1(tf_x)
Example #51
0
for i in range(0, epochs):
    for j in range(0, len(training_images), batch_size):
        print(j)
        if args.use_ref:
            fc1, fc1b, fc2, fc2b, fc3, fc3b, c1, c1b, c2, c2b, c3, c3b, c4, c4b, c5, c5b, pred, _total_correct, g, e, _ = sess.run([get_fc1_weights, get_fc1_bias, \
                                                                        get_fc2_weights, get_fc2_bias, \
                                                                        get_fc3_weights, get_fc3_bias, \
                                                                        get_conv1_weights, get_conv1_bias, \
                                                                        get_conv2_weights, get_conv2_bias, \
                                                                        get_conv3_weights, get_conv3_bias, \
                                                                        get_conv4_weights, get_conv4_bias, \
                                                                        get_conv5_weights, get_conv5_bias, \
                                                                        predict, total_correct, grads, error, optimizer])

            print("fc1: ")
            print(np.shape(fc1), np.average(fc1), np.std(fc1))

            print("fc2: ")
            print(np.shape(fc2), np.average(fc2), np.std(fc2))

            print("fc3: ")
            print(np.shape(fc3), np.average(fc3), np.std(fc3))

            # print ("E: ")
            # print (e[0] / 128.)

            # pretty sure the 2nd dim here is the bias
            # print (np.shape(g[7][0]), np.shape(g[7][1]))

            # print ("G: ")
            # print (g[7][0][0])
Example #52
0
	A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
	"""
	if 0 <= k <= n:
		ntok = 1
		ktok = 1
		for t in range(1, min(k, n - k) + 1):
			ntok *= n
			ktok *= t
			n -= 1
		return ntok // ktok
	else:
		return 0
#=======================================================================
infile=sys.argv[1]
dataframe=pandas.read_csv(infile)
(a,b)= numpy.shape(dataframe)
print (a)
print (b)
feature = dataframe.values[:,0:b-1]
target = dataframe.values[:,b-1]

data=feature
n=b-1
omega=3

storedval=numpy.full((n,n),-2.0) #using the fact that normalized_mutual_info_score lies b/w 0 and 1
for i in range(n):
	print(i)
	var1=data[:,i]
	for j in range(n):
		#this mutual_info_score : we can use any other criteria here
 def setLibor(self, libor):
     self.libor = libor / libor.loc[self.referencedate]
     #self.ntimes = np.shape(self.datelist)[0]
     self.ntrajectories = np.shape(self.libor)[1]
     self.ones = np.ones(shape=[self.ntrajectories])
    def _get_masks(self,output, utt_info):
        '''estimate the masks

        Args:
            output: the output of a single utterance of the neural network
                    tensor of dimension [Txfeature_dimension*emb_dim]

        Returns:
            the estimated masks'''

        embeddings = output['bin_emb']
        noise_filter = output['noise_filter']
        #only the non-silence bins will be used for the clustering
        mix_to_mask, _ = self.mix_to_mask_reader(self.pos)

        [T,F] = np.shape(mix_to_mask)
        emb_dim = np.shape(embeddings)[1]/F
        N = T*F
        if np.shape(embeddings)[0] != T:
            raise 'Number of frames in usedbins does not match the sequence length'
        if np.shape(noise_filter)[0] != T:
            raise 'Number of frames in usedbins does not match the sequence length'
        if np.shape(noise_filter)[1] != F:
            raise 'Number of noise filter outputs does not match number of frequency bins'
        #reshape the outputs
        emb_vec = embeddings[:T,:]
        emb_vec_resh = np.reshape(emb_vec,[T*F,emb_dim])

        X_hat_clean = np.multiply(mix_to_mask,noise_filter[:T,:])
        maxbin = np.max(X_hat_clean)
        floor=maxbin/self.usedbin_threshold

        #apply floor to get the used bins
        usedbins=np.greater(X_hat_clean,floor)
        noise_filter_reshape = np.reshape(noise_filter[:T,:],[T*F,1])


        usedbins_resh = np.reshape(usedbins, T*F)

        #Only keep the active bins (above threshold) for clustering
        output_speech_resh = emb_vec_resh[usedbins_resh] # dim:N' x embdim (N' is number of bins that are used N'<N)
        if np.shape(output_speech_resh)[0] < 2:
            print 'insufficient bins with energie'
            return np.zeros([self.nrS,T,F])
        #apply kmeans clustering and assign each bin to a clustering
        kmeans_model=KMeans(n_clusters=self.nrS, init='k-means++', n_init=10, max_iter=100, n_jobs=-1)
        for _ in range(5):
            # Sometime it fails due to some indexerror and I'm not sure why. Just retry then. max 5 times
            try:
                kmeans_model.fit(output_speech_resh)
            except IndexError:
              continue
            break

        A = kmeans_model.cluster_centers_ # dim: nrS x embdim


        prod_1 = np.matmul(A,emb_vec_resh.T) # dim: nrS x N
        numerator = np.exp(prod_1-np.max(prod_1,axis=0))
        denominator = np.sum(numerator,axis=0)
        M = numerator/denominator
        M_final = np.multiply(M,np.transpose(noise_filter_reshape))

        #reconstruct the masks from the cluster labels
        masks = np.reshape(M_final,[self.nrS,T,F])
        np.save(os.path.join(self.center_store_dir,utt_info['utt_name']),kmeans_model.cluster_centers_)
        return masks
Example #55
0
def train_net(model):
	f = open('data.pickle', 'wb')
	'''
		VISUALIZE


	'''
	desiredLayers = [0]
	desiredOutputs = [model.layers[i].output for i in desiredLayers]
	newModel = Model(model.inputs, desiredOutputs)


	normalise = 32
	train_frames = 10000
	t = 0
	dataset = load_dataset()
	
	while t < train_frames:
		t += 1
		# time.sleep(1)
		batch = random.sample(dataset,batch_size)

		inputs = []
		labels = []

		for i in range(batch_size):
			inputs.append(batch[i][0])
			# inputs.append( process_img_cnn(batch[i][0] ))  
			labels.append(batch[i][1])
			

		inputs = np.asarray(inputs)
		# print(dataset[0])
		# plt.imshow(inputs[0])
		# plt.show()
		# print(inputs,file = f)
		pickle.dump(inputs,f)
		# f.close()
		# np.savetxt('text.txt',inputs)
		labels = np.asarray(labels)
		# print(np.shape(inputs[1]))
		# for layer in model.layers:
		# 	print(layer.get_weights())

		# s

		# plt.savefig('Images/'+str(1)+'.png')
		# state = inputs[0]
		# misc.imshow(state)
		# state = np.expand_dims(state, axis=0)
		# arr = newModel.predict(state)
		# print(np.shape(arr))

		# # print(np.shape(arr))
		# # 	# print('count = ',np.count_nonzero(arr))
		# for filter_ in range(arr.shape[3]):
		# 	# Get the 5x5x1 filter:
		# 	extracted_filter = arr[:, :, :, filter_]
		# 	print(np.shape(extracted_filter))
		# 	# Get rid of the last dimension (hence get 5x5):
		# 	extracted_filter = np.squeeze(extracted_filter)

		# # 	# 	# display the filter (might be very small - you can resize the window)
		# 	misc.imshow(extracted_filter)
		# # 	# 	plt.imshow(arr[0,:,:,1])
		# # 	# 	plt.savefig('Images/'+str(1)+'.png')

		print(np.shape(inputs))
		print("Predicted = ",model.predict( inputs, batch_size=batch_size, verbose=0))
		print("Labels = ",labels)
		model.fit(
			inputs,labels, batch_size=batch_size,
			nb_epoch=1, verbose=1
		)

			
			# plt.savefig('Images/'+str(game_state.num_steps)+'.png')
			
		 # Save the model every 25,000 frames.
		if t % 500== 0:
			model.save_weights('saved-models/' +
							   str(t) + '.h5',
							   overwrite=True)
			print("Saving model %d" % (t))
Example #56
0
    def evaluate(self, X, *args, return_values_of="auto", return_as_dictionary=False, **kwargs):
        '''
        Evaluate the given problem.

        The function values set as defined in the function.
        The constraint values are meant to be positive if infeasible. A higher positive values means "more" infeasible".
        If they are 0 or negative, they will be considered as feasible what ever their value is.

        Parameters
        ----------
        X : np.array
            A two dimensional matrix where each row is a point to evaluate and each column a variable.
        return_as_dictionary : bool, default=False
            If this is true than only one object, a dictionary, is returned. This contains all the results
            that are defined by return_values_of. Otherwise, by default a tuple as defined is returned.
        return_values_of : list of strings, default='auto'
            You can provide a list of strings which defines the values that are returned. By default it is set to
            "auto" which means depending on the problem the function values or additional the constraint violation (if
            the problem has constraints) are returned. Otherwise, you can provide a list of values to be returned.\n
            Allowed is ["F", "CV", "G", "dF", "dG", "dCV", "feasible"] where the d stands for
            derivative and h stands for hessian matrix.

        Returns
        -------
            A dictionary, if return_as_dictionary enabled, or a list of values as defined in return_values_of.
        '''
        assert self.surrogate_model is not None, 'surrogate model must be set first before evaluation'

        # call the callback of the problem
        if self.callback is not None:
            self.callback(X)

        # make the array at least 2-d - even if only one row should be evaluated
        only_single_value = len(np.shape(X)) == 1
        X = np.atleast_2d(X)

        # check the dimensionality of the problem and the given input
        if X.shape[1] != self.n_var:
            raise Exception('Input dimension %s are not equal to n_var %s!' % (X.shape[1], self.n_var))

        # automatic return the function values and CV if it has constraints if not defined otherwise
        if type(return_values_of) == str and return_values_of == "auto":
            return_values_of = ["F"]
            if self.n_constr > 0:
                return_values_of.append("CV")

        # all values that are set in the evaluation function
        values_not_set = [val for val in return_values_of if val not in self.evaluation_of]

        # have a look if gradients are not set and try to use autograd and calculate grading if implemented using it
        gradients_not_set = [val for val in values_not_set if val.startswith("d")]

        # whether gradient calculation is necessary or not
        calc_gradient = (len(gradients_not_set) > 0)

        # handle hF (hessian) computation, which is not supported by Pymoo
        calc_hessian = (type(return_values_of) == list and 'hF' in return_values_of)

        # set in the dictionary if the output should be calculated - can be used for the gradient
        out = {}
        for val in return_values_of:
            out[val] = None

        # calculate the output array - either elementwise or not. also consider the gradient
        self._evaluate(X, out, *args, calc_gradient=calc_gradient, calc_hessian=calc_hessian, **kwargs)
        at_least2d(out)

        calc_gradient_of = [key for key, val in out.items()
                            if "d" + key in return_values_of and
                            out.get("d" + key) is None and
                            (type(val) == autograd.numpy.numpy_boxes.ArrayBox)]

        if len(calc_gradient_of) > 0:
            deriv = self._calc_gradient(out, calc_gradient_of)
            out = {**out, **deriv}

        # convert back to conventional numpy arrays - no array box as return type
        for key in out.keys():
            if type(out[key]) == autograd.numpy.numpy_boxes.ArrayBox:
                out[key] = out[key]._value

        # if constraint violation should be returned as well
        if self.n_constr == 0:
            CV = np.zeros([X.shape[0], 1])
        else:
            CV = Problem.calc_constraint_violation(out["G"])

        if "CV" in return_values_of:
            out["CV"] = CV

        # if an additional boolean flag for feasibility should be returned
        if "feasible" in return_values_of:
            out["feasible"] = (CV <= 0)

        # if asked for a value but not set in the evaluation set to None
        for val in return_values_of:
            if val not in out:
                out[val] = None

        # remove the first dimension of the output - in case input was a 1d- vector
        if only_single_value:
            for key in out.keys():
                if out[key] is not None:
                    out[key] = out[key][0, :]

        if return_as_dictionary:
            return out
        else:
            # if just a single value do not return a tuple
            if len(return_values_of) == 1:
                return out[return_values_of[0]]
            else:
                return tuple([out[val] for val in return_values_of])
Example #57
0
def timeIntegration(params):
    """Sets up the parameters for time integration

    :param params: Parameter dictionary of the model
    :type params: dict
    :return: Integrated activity variables of the model
    :rtype: (numpy.ndarray,)
    """

    dt = params["dt"]  # Time step for the Euler intergration (ms)
    duration = params["duration"]  # imulation duration (ms)
    RNGseed = params["seed"]  # seed for RNG

    # ------------------------------------------------------------------------
    # local parameters
    # See Papadopoulos et al., Relations between large-scale brain connectivity and effects of regional stimulation
    # depend on collective dynamical state, arXiv, 2020
    tau_exc = params["tau_exc"]  #
    tau_inh = params["tau_inh"]  #
    c_excexc = params["c_excexc"]  #
    c_excinh = params["c_excinh"]  #
    c_inhexc = params["c_inhexc"]  #
    c_inhinh = params["c_inhinh"]  #
    a_exc = params["a_exc"]  #
    a_inh = params["a_inh"]  #
    mu_exc = params["mu_exc"]  #
    mu_inh = params["mu_inh"]  #

    # external input parameters:
    # Parameter of the Ornstein-Uhlenbeck process for the external input(ms)
    tau_ou = params["tau_ou"]
    # Parameter of the Ornstein-Uhlenbeck (OU) process for the external input ( mV/ms/sqrt(ms) )
    sigma_ou = params["sigma_ou"]
    # Mean external excitatory input (OU process) (mV/ms)
    exc_ou_mean = params["exc_ou_mean"]
    # Mean external inhibitory input (OU process) (mV/ms)
    inh_ou_mean = params["inh_ou_mean"]

    # ------------------------------------------------------------------------
    # global coupling parameters

    # Connectivity matrix
    # Interareal relative coupling strengths (values between 0 and 1), Cmat(i,j) connection from jth to ith
    Cmat = params["Cmat"]
    N = len(Cmat)  # Number of nodes
    K_gl = params["K_gl"]  # global coupling strength
    # Interareal connection delay
    lengthMat = params["lengthMat"]
    signalV = params["signalV"]

    if N == 1:
        Dmat = np.zeros((N, N))
    else:
        # Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms)
        Dmat = dp.computeDelayMatrix(lengthMat, signalV)
        Dmat[np.eye(len(Dmat)) == 1] = np.zeros(len(Dmat))
    Dmat_ndt = np.around(Dmat / dt).astype(
        int)  # delay matrix in multiples of dt
    params["Dmat_ndt"] = Dmat_ndt
    # ------------------------------------------------------------------------
    # Initialization
    # Floating point issue in np.arange() workaraound: use integers in np.arange()
    t = np.arange(1, round(duration, 6) / dt + 1) * dt  # Time variable (ms)

    sqrt_dt = np.sqrt(dt)

    max_global_delay = np.max(Dmat_ndt)
    startind = int(max_global_delay + 1)  # timestep to start integration at

    exc_ou = params["exc_ou"]
    inh_ou = params["inh_ou"]

    exc_ext = params["exc_ext"]
    inh_ext = params["inh_ext"]

    # state variable arrays, have length of t + startind
    # they store initial conditions AND simulated data
    excs = np.zeros((N, startind + len(t)))
    inhs = np.zeros((N, startind + len(t)))

    # ------------------------------------------------------------------------
    # Set initial values
    # if initial values are just a Nx1 array
    if np.shape(params["exc_init"])[1] == 1:
        exc_init = np.dot(params["exc_init"], np.ones((1, startind)))
        inh_init = np.dot(params["inh_init"], np.ones((1, startind)))
    # if initial values are a Nxt array
    else:
        exc_init = params["exc_init"][:, -startind:]
        inh_init = params["inh_init"][:, -startind:]

    # xsd = np.zeros((N,N))  # delayed activity
    exc_input_d = np.zeros(N)  # delayed input to x
    inh_input_d = np.zeros(N)  # delayed input to y

    if RNGseed:
        np.random.seed(RNGseed)

    # Save the noise in the activity array to save memory
    excs[:, startind:] = np.random.standard_normal((N, len(t)))
    inhs[:, startind:] = np.random.standard_normal((N, len(t)))

    excs[:, :startind] = exc_init
    inhs[:, :startind] = inh_init

    noise_exc = np.zeros((N, ))
    noise_inh = np.zeros((N, ))

    # ------------------------------------------------------------------------

    return timeIntegration_njit_elementwise(
        startind,
        t,
        dt,
        sqrt_dt,
        N,
        Cmat,
        K_gl,
        Dmat_ndt,
        excs,
        inhs,
        exc_input_d,
        inh_input_d,
        exc_ext,
        inh_ext,
        tau_exc,
        tau_inh,
        a_exc,
        a_inh,
        mu_exc,
        mu_inh,
        c_excexc,
        c_excinh,
        c_inhexc,
        c_inhinh,
        noise_exc,
        noise_inh,
        exc_ou,
        inh_ou,
        exc_ou_mean,
        inh_ou_mean,
        tau_ou,
        sigma_ou,
    )
Example #58
0
def visualize_2D_AE(model,
                    training_df,
                    validation_df,
                    example_data,
                    num_examples,
                    batch_size,
                    num_gpus,
                    dims,
                    iter_,
                    n_cols=4,
                    std_to_plot=2.5,
                    summary_density=50,
                    save_loc=False,
                    n_samps_per_dim=8):
    """
    Visualization of AE as it trains in 2D space
    """
    # choose a color palette
    current_palette = sns.color_palette()
    # summarize training
    bins = [0] + np.unique(
        np.logspace(0,
                    np.log2(np.max(training_df.batch + [100])),
                    num=summary_density,
                    base=2).astype('int'))
    training_df['batch_bin'] = pd.cut(training_df.batch + 1,
                                      bins,
                                      labels=bins[:-1])
    training_summary = training_df.groupby(['batch_bin']).describe()
    validation_df['batch_bin'] = pd.cut(validation_df.batch + 1,
                                        bins,
                                        labels=bins[:-1])
    validation_summary = validation_df.groupby(['batch_bin']).describe()
    validation_df[:3]

    # get reconstructions of example data
    example_recon, z = model.sess.run((model.x_tilde, model.z_x),
                                      {model.x_input: example_data})
    # get Z representations of data
    z = np.array(
        generate_manifold(model, dims, iter_, num_examples, batch_size,
                          num_gpus))

    if np.shape(z)[1] == 2:
        # generate volumetric data
        x_bounds = [
            -inv_z_score(std_to_plot, z[:, 0]),
            inv_z_score(std_to_plot, z[:, 0])
        ]
        y_bounds = [
            -inv_z_score(std_to_plot, z[:, 1]),
            inv_z_score(std_to_plot, z[:, 1])
        ]
        maxx, maxy, hx, hy, pts = make_grid(x_bounds,
                                            y_bounds,
                                            maxx=int(n_samps_per_dim),
                                            maxy=int(n_samps_per_dim))

        dets = metric_and_volume(model, maxx, maxy, hx, hy, pts, dims,
                                 batch_size)

    fig = plt.figure(figsize=(10, 10))
    outer = gridspec.GridSpec(2, 2, wspace=0.2, hspace=0.2)

    scatter_ax = plt.Subplot(fig, outer[0])
    scatter_ax.scatter(z_score(z[:, 0]),
                       z_score(z[:, 1]),
                       alpha=.1,
                       s=3,
                       color='k')
    scatter_ax.axis('off')
    scatter_ax.set_xlim([-std_to_plot, std_to_plot])
    scatter_ax.set_ylim([-std_to_plot, std_to_plot])
    fig.add_subplot(scatter_ax)

    if np.shape(z)[1] == 2:
        volume_ax = plt.Subplot(fig, outer[1])
        volume_ax.axis('off')
        volume_ax.matshow(np.log2(dets), cmap=plt.cm.viridis)
        fig.add_subplot(volume_ax)

    recon_ax = gridspec.GridSpecFromSubplotSpec(int(n_cols),
                                                int(n_cols / 2),
                                                subplot_spec=outer[2],
                                                wspace=0.1,
                                                hspace=0.1)

    for axi in range(int(n_cols) * int(n_cols / 2)):
        recon_sub_ax = gridspec.GridSpecFromSubplotSpec(
            1, 2, subplot_spec=recon_ax[axi], wspace=0.1, hspace=0.1)
        orig_ax = plt.Subplot(fig, recon_sub_ax[0])
        orig_ax.matshow(np.squeeze(example_data[axi].reshape(dims)),
                        origin='lower')
        orig_ax.axis('off')
        rec_ax = plt.Subplot(fig, recon_sub_ax[1])
        rec_ax.matshow(np.squeeze(example_recon[axi].reshape(dims)),
                       origin='lower')
        rec_ax.axis('off')
        fig.add_subplot(orig_ax)
        fig.add_subplot(rec_ax)

    error_ax = plt.Subplot(fig, outer[3])
    #error_ax.plot(training_df.batch, training_df.recon_loss)
    training_plt, = error_ax.plot(
        training_summary.recon_loss['mean'].index.astype('int').values,
        training_summary.recon_loss['mean'].values,
        alpha=1,
        color=current_palette[0],
        label='training')

    error_ax.fill_between(
        training_summary.recon_loss['mean'].index.astype('int').values,
        training_summary.recon_loss['mean'].values -
        training_summary.recon_loss['std'].values,
        training_summary.recon_loss['mean'].values +
        training_summary.recon_loss['std'].values,
        alpha=.25,
        color=current_palette[0])

    error_ax.fill_between(
        validation_summary.recon_loss['mean'].index.astype('int').values,
        validation_summary.recon_loss['mean'].values -
        validation_summary.recon_loss['std'].values,
        validation_summary.recon_loss['mean'].values +
        validation_summary.recon_loss['std'].values,
        alpha=.25,
        color=current_palette[1])
    validation_plt, = error_ax.plot(
        validation_summary.recon_loss['mean'].index.astype('int').values,
        validation_summary.recon_loss['mean'].values -
        validation_summary.recon_loss['std'].values,
        alpha=1,
        color=current_palette[1],
        label='validation')

    error_ax.legend(handles=[validation_plt, training_plt], loc=1)
    error_ax.set_yscale("log")
    error_ax.set_xscale("log")
    fig.add_subplot(error_ax)
    if save_loc != False:
        if not os.path.exists('/'.join(save_loc.split('/')[:-1])):
            os.makedirs('/'.join(save_loc.split('/')[:-1]))
        plt.savefig(save_loc)
    plt.show()
Example #59
0
def _array_like(x, x0):
    """Return ndarray `x` as same array subclass and shape as `x0`"""
    x = np.reshape(x, np.shape(x0))
    wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
    return wrap(x)
Example #60
0
   # Create the model	
   model = Sequential()

   # INPUT LAYER
   model.add(Dropout(0.0, input_shape=(inp_dim,)))
   model.add(Dense(inp_dim,activation='selu'))

   # HIDDEN 1
   model.add(Dense(encoding_dim,  activation='selu'))

   # HIDDEN 2
   model.add(Dense(256,  activation='selu'))


   # HIDDEN 3
   model.add(Dense(encoding_dim,  activation='selu'))


   model.add(Dense(out_dim,  activation='selu'))

   # Compile the model
   sgd = SGD(lr=0.1, momentum=0.2, decay=1e-6, nesterov=False)
   model.compile(optimizer=sgd, loss='mse')
   model.summary()
   model.fit(train_input,train_output,epochs=30, batch_size=32, shuffle=True,callbacks=[LoggingCallback(logging.info)])

print("input shape is ------------------------------------------------------------------------------>",numpy.shape(train_input))
train_model()
valid_model()