def evaluate_kl_divergence_wd_graph_kernel_density(self, Kii, Kjj, Kij):
     #
     # evaluate KL divergence between two graph distributions using graph kernel density
     #
     # Kii is graph kernel matrix on graph samples in distribution i
     # Kjj is graph kernel matrix on graph samples in distribution j
     # Kij is a matrix of graph kernels between samples of distribution i and samples of distribution j
     #
     assert Kii.shape[0] == Kii.shape[1] and len(Kii.shape) == 2
     assert Kjj.shape[0] == Kjj.shape[1] and len(Kjj.shape) == 2
     assert len(Kij.shape) == 2 and Kii.shape[0] == Kij.shape[0] and Kjj.shape[0] == Kij.shape[1]
     #
     start_time = time.time()
     const_epsilon = 1e-30
     kl_ii_jj = np.log(np.divide(Kii.mean(1)+const_epsilon, Kij.mean(1)+const_epsilon)).mean()
     kl_jj_ii = np.log(np.divide(Kjj.mean(1)+const_epsilon, Kij.transpose().mean(1)+const_epsilon)).mean()
     kl = kl_ii_jj + kl_jj_ii
     if kl < 0:
         if -self.div_tol < kl < 0:
             kl = 0
         else:
             print 'Kii.mean(1)+const_epsilon', Kii.mean(1)+const_epsilon
             print 'Kjj.mean(1)+const_epsilon', Kjj.mean(1)+const_epsilon
             print 'Kij.mean(1)+const_epsilon', Kij.mean(1)+const_epsilon
             print 'Kij.transpose().mean(1)+const_epsilon', Kij.transpose().mean(1)+const_epsilon
             print 'kl_ii_jj', kl_ii_jj
             print 'kl_jj_ii', kl_jj_ii
             print 'kl', kl
             raise AssertionError
     if config.coarse_debug:
         print 'time to compute kl divergence with kernel density estimation was {}'.format(time.time()-start_time)
     return kl
Beispiel #2
0
def trainer(model, data, epochs, validate_period, model_path, prob_lm=0.1, runid=''):
    def valid_loss():
        result = dict(lm=[], visual=[])
        for item in data.iter_valid_batches():
            result['lm'].append(model.lm.loss_test(*model.lm.args(item)))
            result['visual'].append(model.visual.loss_test(*model.visual.args(item)))
        return result
    costs = Counter(dict(cost_v=0.0, N_v=0.0, cost_t=0.0, N_t=0.0))
    print "LM: {} parameters".format(count_params(model.lm.params()))
    print "Vi: {} parameters".format(count_params(model.visual.params()))
    for epoch in range(1,epochs+1):
        for _j, item in enumerate(data.iter_train_batches()):
            j = _j +1
            if random.random() <= prob_lm:
                cost_t = model.lm.train(*model.lm.args(item))
                costs += Counter(dict(cost_t=cost_t, N_t=1))
            else:
                cost_v = model.visual.train(*model.visual.args(item))
                costs += Counter(dict(cost_v=cost_v, N_v=1))
            print epoch, j, j*data.batch_size, "train", \
                    numpy.divide(costs['cost_v'], costs['N_v']),\
                    numpy.divide(costs['cost_t'], costs['N_t'])
            if j % validate_period == 0:
                result = valid_loss()
                print epoch, j, 0, "valid", \
                    numpy.mean(result['visual']),\
                    numpy.mean(result['lm'])
                sys.stdout.flush()
        model.save(path='model.r{}.e{}.zip'.format(runid, epoch))
    model.save(path='model.zip')
Beispiel #3
0
def run_sim(R_star, transit_duration, bodies):
    """Run 3-body sim and convert results to TTV + TDV values in [minutes]"""

    # Run 3-body sim for one full orbit of the outermost moon
    loop(bodies, orbit_duration)
    

    # Move resulting data from lists to numpy arrays
    ttv_array = numpy.array([])
    ttv_array = ttv_list
    tdv_array = numpy.array([])
    tdv_array = tdv_list

    # Zeropoint correction
    middle_point =  numpy.amin(ttv_array) + numpy.amax(ttv_array)
    ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
    ttv_array = numpy.divide(ttv_array, 1000)  # km/s

    # Compensate for barycenter offset of planet at start of simulation:
    planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
    stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
    ttv_array = numpy.divide(ttv_array, stretch_factor)

    # Convert to time units, TTV
    ttv_array = numpy.divide(ttv_array, R_star)
    ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24)  # minutes

    # Convert to time units, TDV
    oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60  # m/sec
    newspeed = oldspeed - numpy.amax(tdv_array)
    difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
    conversion_factor = difference / numpy.amax(tdv_array)
    tdv_array = numpy.multiply(tdv_array, conversion_factor)

    return ttv_array, tdv_array
Beispiel #4
0
def multistate_distribution(data, parameters, limit, 
                            normalize_likelihood_level_cell_counts = True):

    data_grandpa, data_parent, data_children = data
    sigma, b, a_grandpa, a_parent, a_children = parameters
    
    normalization_factor = normalize(sigma, a_grandpa, b, limit)
    grandpa_dist = [steady_state_distribution(x, sigma, a_grandpa, b, normalization_factor) for x in data_grandpa]
    
    normalization_factor = normalize(sigma, a_parent, b, limit)
    parent_dist = [steady_state_distribution(x, sigma, a_parent, b, normalization_factor) for x in data_parent]
    
    normalization_factor = normalize(sigma, a_children, b, limit)
    children_dist = [steady_state_distribution(x, sigma, a_children, b, normalization_factor) for x in data_children]
    
    grandpa_dist = np.array(grandpa_dist, dtype = float)
    parent_dist = np.array(parent_dist, dtype = float)
    children_dist = np.array(children_dist, dtype = float)
    
    if normalize_likelihood_level_cell_counts:
        grandpa_dist = np.divide(grandpa_dist, float(data_grandpa.size))
        parent_dist = np.divide(parent_dist, float(data_parent.size))
        children_dist = np.divide(children_dist, float(data_children.size))
        
    return grandpa_dist, parent_dist, children_dist
def HS(im1, im2, alpha, ite,):

	#set up initial velocities
	uInitial = np.zeros([im1.shape[0],im1.shape[1]])
	vInitial = np.zeros([im1.shape[0],im1.shape[1]])

	# Set initial value for the flow vectors
	u = uInitial
	v = vInitial

	# Estimate derivatives
	[fx, fy, ft] = computeDerivatives(im1, im2)

	# Averaging kernel
	kernel=np.matrix([[1/12, 1/6, 1/12],[1/6, 0, 1/6],[1/12, 1/6, 1/12]])

	print fx[100,100],fy[100,100],ft[100,100]

	# Iteration to reduce error
	for i in range(ite):
		# Compute local averages of the flow vectors
		uAvg = cv2.filter2D(u,-1,kernel)
		vAvg = cv2.filter2D(v,-1,kernel)

		uNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
		uDenom = alpha + fx**2 + fy**2
		u = uAvg - np.divide(uNumer,uDenom)

		# print np.linalg.norm(u)

		vNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
		vDenom = alpha + fx**2 + fy**2
		v = vAvg - np.divide(vNumer,vDenom)
	return (u,v)
Beispiel #6
0
def Likelihood(MeanVec,VarVec,wVec,X):

    LLH = 0
    for a in range(X.shape[0]):
        summ = 0
        for i in range(NoM):
            power = np.square(np.subtract(X[a],MeanVec[i]))
##            print 'poweris \n',power
##            print 'v o\n',VarVec[i]
            denp = np.multiply(-2,VarVec[i])
##            print denp
            power = np.divide(power,denp)
##            print power
            power = np.sum(power)
##            print 'power is \n',power

            power = exp(power)
##            print power
            prodVarVec = np.prod(VarVec[i])
            den = 1/(2*math.pi)**(NoM/2)*np.sqrt(prodVarVec)
##            print den
            sigma = wVec[i]*np.divide(power,den)
##            print gamma
            summ = summ + sigma
##            print gamma[i][a]
        LLH = LLH + math.log(summ)
        
    return LLH
def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
    #uses simple propagation of errors (partial derivatives)
    #note it returns errorbars, not interval

    #-make sure input is numpy arrays-
    top = np.array(top)
    top_low = np.array(top_low)
    top_high = np.array(top_high)
    bottom = np.array(bottom)
    bottom_low = np.array(bottom_low)
    bottom_high = np.array(bottom_high)

    #-calculate errorbars-
    top_errlow = np.subtract(top,top_low)
    top_errhigh = np.subtract(top_high,top)
    bottom_errlow = np.subtract(bottom,bottom_low)
    bottom_errhigh = np.subtract(bottom_high,bottom)

    #-calculate ratio_low-
    ratio_low  = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
    #-calculate ratio_high-
    ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
#    ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5

    # return two vectors, err_low and err_high
    return ratio_low,ratio_high
Beispiel #8
0
    def normalize(self, mode='integral'):
        """
        Normalize the filter kernel.

        Parameters
        ----------
        mode : {'integral', 'peak'}
            One of the following modes:
                * 'integral' (default)
                    Kernel is normalized such that its integral = 1.
                * 'peak'
                    Kernel is normalized such that its peak = 1.
        """

        if mode == 'integral':
            normalization = self._array.sum()
        elif mode == 'peak':
            normalization = self._array.max()
        else:
            raise ValueError("invalid mode, must be 'integral' or 'peak'")

        # Warn the user for kernels that sum to zero
        if normalization == 0:
            warnings.warn('The kernel cannot be normalized because it '
                          'sums to zero.', AstropyUserWarning)
        else:
            np.divide(self._array, normalization, self._array)

        self._kernel_sum = self._array.sum()
Beispiel #9
0
def CalcGamma(MeanVec,VarVec,wVec,X):

    #gamma = np.zeros(shape=(8,X.shape[0]),dtype='float128')
    for a in range(X.shape[0]):
        summ = 0
        for i in range(NoM):
            power = np.square(np.subtract(X[a],MeanVec[i]))
##            print 'poweris \n',power
##            print 'v o\n',VarVec[i]
            denp = np.multiply(-2,VarVec[i])
##            print denp
            power = np.divide(power,denp)
##            print power
            power = np.sum(power)
##            print 'power is \n',power

            power = exp(power)
##            print power
            prodVarVec = np.prod(VarVec[i])
            den = 1/(2*math.pi)**(NoM/2)*np.sqrt(prodVarVec)
##            print den
            gamma[i][a] = wVec[i]*np.divide(power,den)
##            print gamma
            summ = summ + gamma[i][a]
##            print gamma[i][a]
        for i in range(NoM):
            gamma[i][a] = gamma[i][a]/summ
        
    return gamma
Beispiel #10
0
    def sampleNextInternal_bak(self, variables):

        y_tilde = self.samplerEngine.getVariable('nrl').varYtilde
        beta = (y_tilde * y_tilde).sum(0)/2
        gammaSamples = np.random.gamma((self.ny - 1.)/2, 1, self.nbVox)

        np.divide(beta, gammaSamples, self.currentValue)
def  visResults (m, result_dir, varying_para_values = r_values, xlabel= 'Radius Scale Factor', basenum = 0):

        # plot the raw results
        plotResults(m,result_dir, varying_para_values,xlabel, 'Fitted')

        # plot the normalized results
        norm_m = np.zeros(m.shape)
        for i in range(len(varying_para_values)):
            norm_m[:,:,i] = np.divide(m[:,:,i], m[:,:,basenum]) # r=1.0 is the original model fitting results
        plotResults(norm_m,result_dir,varying_para_values,xlabel,'Normalized')

        for i in range(len(varying_para_values)):
            norm_m[:,:,i] = 100 * np.divide(m[:,:,i]- m[:,:,basenum], m[:,:,basenum]) # r=1.0 is the original model fitting results
        plotResults(norm_m,result_dir,varying_para_values,xlabel,'Difference')



        CmTotal = np.zeros((sample_size, len(varying_para_values)))
        for i in range(len(varying_para_values)):
            CmTotal[:,i] = m[:,4,i] * (m[:,1,i]+ m[:,2,i])  # Cm * (A1+A2) p=0.0 is the original model fitting results
        df_CmTotal = pd.DataFrame(CmTotal, columns = varying_para_values)
        my_box_plot(df_CmTotal, result_dir+ "/total_capacitance.png", xlabel,'Total Capacitance')



        RmUnit = np.zeros((sample_size, len(varying_para_values)))
        for i in range(len(varying_para_values)):
            RmUnit[:,i] = m[:,5,i] / (m[:,1,i]+ m[:,2,i])  # Rm / (A1+A2)
        df_RmUnit = pd.DataFrame(RmUnit, columns = varying_para_values)
        my_box_plot(df_RmUnit, result_dir+ "/total_Rm.png", xlabel,'Unit Membrane Resistance')
Beispiel #12
0
def basicconn(skf,X,y):
    total_score = 0
    for train_index, test_index in skf:
        #print("TRAIN:", train_index, "TEST:", test_index)
        # Feature selection
        #selectf = SelectFpr().fit(X[train_index],y[train_index])
        #selectf = SelectKBest(f_classif, k=750).fit(X[train_index],y[train_index])
        #tmp_x = selectf.transform(X[train_index])
        # Train
        #clf = RandomForestClassifier(n_estimators=20)
        #clf = clf.fit(tmp_x, y[train_index])
        #clf.feature_importances_
        # SVM
        #clf = svm.LinearSVC()
        #clf = svm.SVC()
        #clf.fit(tmp_x, y[train_index])
        clf = plib.classif(X[train_index], y[train_index])
        #clf.support_vec()
        # Test
        #pred = clf.predict(selectf.transform(X[test_index]))
        pred = clf.predict(X[test_index])
        print "Target     : ", y[test_index]
        print "Prediction : ", pred
        matchs = np.equal(pred, y[test_index])
        score = np.divide(np.sum(matchs), np.float64(matchs.size))
        total_score = score + total_score
    return np.divide(total_score, skf.n_folds)
 def sw_sums(a, b):
     abw = apply_scale(w, a, b)
     np.divide(abw, 1 + abw, out = abw)
     abw[np.isnan(abw)] = 1
     swr = abw.sum(1, keepdims = True)
     swc = abw.sum(0, keepdims = True)
     return swr, swc
Beispiel #14
0
 def minimum_pension(self, trim_wages_reg, trim_wages_all, pension_reg, pension_all):
     ''' MICO du régime général : allocation différentielle
     RQ : ASPA et minimum vieillesse sont gérés par OF
     Il est attribué quels que soient les revenus dont dispose le retraité en plus de ses pensions : loyers, revenus du capital, activité professionnelle...
     + mécanisme de répartition si cotisations à plusieurs régimes
     TODO: coder toutes les évolutions et rebondissements 2004/2008'''
     P = reduce(getattr, self.param_name.split('.'), self.P)
     # pension_RG, pension, trim_RG, trim_cot, trim
     trimesters = trim_wages_reg['trimesters']
     trim_regime = trimesters['regime'].sum() + sum(trim_wages_reg['maj'].values())
     coeff = minimum(1, divide(trim_regime, P.prorat.n_trim))
     if P.mico.dispositif == 0:
         # Avant le 1er janvier 1983, comparé à l'AVTS
         min_pension = self.P.common.avts
         return maximum(min_pension - pension_reg,0)*coeff
     elif P.mico.dispositif == 1:
         # TODO: Voir comment gérer la limite de cumul relativement complexe (Doc n°5 du COR)
         mico = P.mico.entier
         return maximum(mico - pension_reg,0)*coeff
     elif P.mico.dispositif == 2:
         # A partir du 1er janvier 2004 les périodes cotisées interviennent (+ dispositif transitoire de 2004)
         nb_trim = P.prorat.n_trim
         trim_regime = trimesters['regime'].sum() #+ sum(trim_wages_regime['maj'].values())
         trim_cot_regime = sum(trimesters[key].sum() for key in trimesters.keys() if 'cot' in key)
         mico_entier = P.mico.entier*minimum(divide(trim_regime, nb_trim), 1)
         maj = (P.mico.entier_maj - P.mico.entier)*divide(trim_cot_regime, nb_trim)
         mico = mico_entier + maj*(trim_cot_regime >= P.mico.trim_min)
         return (mico - pension_reg)*(mico > pension_reg)*(pension_reg>0)
Beispiel #15
0
    def __EM(self):
        old_log_like = -np.inf
        threshold = 1e-15
        probability = 0
        while True:
            # E step
            probability = self.__probability()
            expectation = np.multiply(probability, self.prior)
            expectation = np.divide(expectation, expectation.sum(axis=1))

            # M step: updata parameters
            sumk = expectation.sum(axis=0)
            self.prior = sumk / self.x.shape[0]
            self.mean = np.diag(np.array(np.divide(1, sumk)).flatten()) * \
                        expectation.T * self.x
            for i in range(self.k):
                x_shift = self.x - self.mean[i, :]
                self.sigma[:, :, i] = x_shift.T * \
                    np.diag(np.array(expectation[:, i]).flatten()) * x_shift /\
                    sumk[0, i]

            new_log_like = np.log(probability * self.prior.T).sum()
            if np.abs(new_log_like - old_log_like) < threshold:
                break
            old_log_like = new_log_like
        return probability
def plot_a_func_time(aes, times, which_are_final_bodies=None,year_unit='kyr',title=None):
    """This function takes a list of each individual body's semimajor axes,
    which itself can be stored as a list, as well as a list of lists of the 
    times corresponding to semimajor axes passed with the first argument
    and then plots semi-major axis as a function of time for the objects 
    passed to me.
    which_are_final_bodies: pass the index of the final bodies if you want 
    those lines plotted thicker
    """  
    year_unit_dict  = {"Myr":1.e6,"kyr":1.e3}

    fig = pp.figure()

    for i in range(len(aes)):
        pp.plot(np.divide(times[i],year_unit_dict[year_unit]),aes[i],color='blue',linewidth=0.5)

    if which_are_final_bodies != None:
        for i in range(len(which_are_final_bodies)):
            #print "   final body plotting as red: " + str(i)
            pp.plot(np.divide(times[which_are_final_bodies[i]],year_unit_dict[year_unit]),aes[which_are_final_bodies[i]],color='red')#,color='blue',linewidth=1.5)

    pp.xscale(u'log')
    
    if title != None:
        pp.title(title)

    pp.xlabel("Time ("+year_unit+")")
    pp.ylabel("Semimajor axis (AU)")


    return fig
def _generate(l, k, g, beta, M, e, A, mu, intercept):

    p = beta.shape[0]

    if intercept:
        gradL1 = grad_l1(beta[1:, :])
        gradL2 = grad_l2_squared(beta[1:, :])
        gradGLmu = grad_glmu(beta[1:, :], A, mu)
    else:
        gradL1 = grad_l1(beta)
        gradL2 = grad_l2_squared(beta)
        gradGLmu = grad_glmu(beta, A, mu)

    alpha = -(l * gradL1 + k * gradL2 + g * gradGLmu)
    Mte = np.dot(M.T, e)
    if intercept:
        alpha = np.divide(alpha, Mte[1:, :])
    else:
        alpha = np.divide(alpha, Mte)

    X = np.ones(M.shape)
    if intercept:
        for i in xrange(p - 1):
            X[:, i + 1] = M[:, i + 1] * alpha[i, 0]
    else:
        for i in xrange(p):
            X[:, i] = M[:, i] * alpha[i, 0]

    y = np.dot(X, beta) - e

    return X, y
def salespersonSummary(raw_data):
    aggFuncs = {'Invoice':len_unique, 'Dollars':np.sum, 
                'OffDayDeliveries':np.max}
    grpCols = ['Warehouse','Date','Weekday','DeliveryDays','OffWeek','SalespersonId','Salesperson','CustomerId','Customer']
    slspplOffdayByDate = pd.DataFrame(raw_data.groupby(grpCols).agg(aggFuncs)).reset_index(drop=False)
    slspplOffdayByDate['Deliveries'] = 1
    
    aggFuncs = {'OffDayDeliveries':{'TotalDelivered':np.sum},
                'Deliveries':{'TotalDelivered':len},
                'Dollars':{'AvgPerDelivery':np.mean, 'TotalDelivered':np.sum},
                'Invoice':{'AvgPerDelivery':np.mean, 'TotalDelivered':np.sum}}
    grpCols = ['Warehouse','SalespersonId','Salesperson']
    slspplOffday = pd.DataFrame(slspplOffdayByDate.groupby(grpCols).agg(aggFuncs)).reset_index(drop=False)
    slspplOffday.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in slspplOffday.columns]
    slspplOffday['Deliveries|PercentOffday'] = np.divide(slspplOffday['OffDayDeliveries|TotalDelivered'], slspplOffday['Deliveries|TotalDelivered'])
    
    aggFuncs = {'Dollars':{'AvgPerOffdayDelivery':np.mean, 'TotalOffdayDelivered':np.sum},
                'Invoice':{'AvgPerOffdayDelivery':np.mean, 'TotalOffdayDelivered':np.sum}}
    slspplOffdayOnly = pd.DataFrame(slspplOffdayByDate[slspplOffdayByDate.OffDayDeliveries==1].groupby('SalespersonId').agg(aggFuncs)).reset_index(drop=False)
    slspplOffdayOnly.columns = ['%s%s' % (a, '|%s' % b if b else '') for a, b in slspplOffdayOnly.columns]
    
    slspplSummary = slspplOffday.merge(slspplOffdayOnly, on='SalespersonId', how='outer')
    slspplSummary['Dollars|PercentOffday'] = np.divide(slspplSummary['Dollars|TotalOffdayDelivered'], slspplSummary['Dollars|TotalDelivered'])
    slspplSummary['Invoice|PercentOffday'] = np.divide(slspplSummary['Invoice|TotalOffdayDelivered'], slspplSummary['Invoice|TotalDelivered'])
    
    orderedCols = ['Warehouse','SalespersonId','Salesperson',
                   'OffDayDeliveries|TotalDelivered','Deliveries|TotalDelivered',
                   'Deliveries|PercentOffday','Dollars|PercentOffday','Invoice|PercentOffday',
                   'Dollars|AvgPerDelivery','Dollars|TotalDelivered',
                   'Invoice|TotalDelivered','Invoice|AvgPerDelivery','Dollars|TotalDelivered']
    slspplSummary = slspplSummary[orderedCols]
    slspplSummary.sort_values('Deliveries|PercentOffday', ascending=False, inplace=True)
    slspplSummary.reset_index(drop=True, inplace=True)

    return slspplSummary
Beispiel #19
0
def eventsPerInterval(times,start_date,end_date,interval='day'):
    daycount = (end_date-start_date).days
    timeaxis = []
    if interval=='day':
        timebins = np.zeros( daycount )
        for n in range( daycount ):
            timeaxis.append( start_date + timedelta(n) )
        for t in times:
            timebins[ (t - start_date).days ] += 1
            
    elif interval=='hour':
        timebins = np.zeros( 24*daycount )
        for n in range( 24*daycount ):
            timeaxis.append( start_date + n*timedelta(0,3600) )
        for t in times:
            timebins[ np.floor(np.divide((t - start_date).total_seconds(),3600)) ] += 1
    
    elif interval=='halfhour':
        timebins = np.zeros( 48*daycount )
        for n in range( 48*daycount ):
            timeaxis.append( start_date + n*timedelta(0,1800) )
        for t in times:
            timebins[ np.floor(np.divide((t - start_date).total_seconds(),1800)) ] += 1

    else:
        print('Options are day, hour, or halfhour')
        return
    
    return timebins, timeaxis
def getRadiationAtLatLong(path, latitude, longitude):
	suitableLatLong = True

	#open the file and get the header info such as coords, cell size, numrows and cols.
	
	bottomLeftLatLong = getBottomLeftLatLong(path)
	cellSize = getCellSize(path)
	colsRows = getColsRows(path)
	minLatitude = np.float(bottomLeftLatLong[0] - np.multiply(cellSize, np.float(colsRows[0])))
	maxLongitude = np.float(bottomLeftLatLong[1] + np.multiply(cellSize, np.float(colsRows[1])))
	maxLatitude = bottomLeftLatLong[0]
	minLongitude = bottomLeftLatLong[1]
	topRightLatitude = bottomLeftLatLong[0] + np.multiply(cellSize,(colsRows[1]- 1))
	topRightLongitude = bottomLeftLatLong[1]

	print "Minimum lat, long = ("+str(minLatitude)+","+str(minLongitude)+")"
	print "Top right lat, long = ("+str(topRightLatitude)+","+str(topRightLongitude)+")"
	print "Requested lat, long = ("+str(latitude)+","+str(longitude)+")"
	# Verify that lat and long are reasonable so we don't explode the file.
	if(latitude >= minLatitude and latitude <= maxLatitude and longitude >= minLongitude and longitude <= maxLongitude):
		xcoord = int(np.round(np.divide(np.float(topRightLongitude - longitude), cellSize)))
		ycoord = int(np.round(np.divide(np.float(topRightLatitude - latitude), cellSize)))
		data = np.loadtxt(path, dtype='float',skiprows=6, usecols=None, unpack=False)
		print "Table Coordinates: "+str(ycoord)+","+str(xcoord)
		print "Size of Table: "+str(data.shape[0])+","+str(data.shape[1])
		radiation = data[ycoord, xcoord]

	else:
		print "Latitude and longitude are invalid for this file."
		radiation = -1

	return radiation
def getPercentages(summary):
    summaryPercentages = pd.DataFrame(summary).reset_index(drop=False)
    aggFuncs = {'Deliveries':np.sum, 'OffDayDeliveries':np.sum, 'AdditionalDeliveries':np.sum}
    summaryPercentages = summaryPercentages.groupby('Warehouse').agg(aggFuncs)
    summaryPercentages['PercentNonOffday'] = 1 - np.divide(summaryPercentages.OffDayDeliveries, summaryPercentages.Deliveries)
    summaryPercentages['PercentNonAdditionalDay'] = 1 -  np.divide(summaryPercentages.AdditionalDeliveries, summaryPercentages.Deliveries)
    return summaryPercentages
Beispiel #22
0
def mk_stochastic(T):
    """
    % MK_STOCHASTIC Ensure the argument is a stochastic matrix, i.e., the sum over the last dimension is 1.
    % [T,Z] = mk_stochastic(T)
    %
    % If T is a vector, it will sum to 1.
    % If T is a matrix, each row will sum to 1.
    % If T is a 3D array, then sum_k T(i,j,k) = 1 for all i,j.
    
    % Set zeros to 1 before dividing
    % This is valid since S(j) = 0 iff T(i,j) = 0 for all j
    """

    T = np.asfarray(T)

    if T.ndim == 1 or (T.ndim == 2 and (T.shape[0] == 1 or T.shape[1] == 1)):  # isvector
        T, Z = normalise(T)
    elif T.ndim == 2:  # matrix
        T = np.asmatrix(T)
        Z = np.sum(T, 1)
        S = Z + (Z == 0)
        norm = np.tile(S, (1, T.shape[1]))
        T = np.divide(T, norm)
    else:  # multi-dimensional array
        ns = T.shape
        T = np.asmatrix(np.reshape(T, (np.prod(ns[0:-1]), ns[-1])))
        Z = np.sum(T, 1)
        S = Z + (Z == 0)
        norm = np.tile(S, (1, ns[-1]))
        T = np.divide(T, norm)
        T = np.reshape(np.asarray(T), ns)

    return T, Z
Beispiel #23
0
    def normalize(self, mode="integral"):
        """
        Force normalization of filter kernel.

        Parameters
        ----------
        mode : {'integral', 'peak'}
            One of the following modes:
                * 'integral' (default)
                    Kernel normalized such that its integral = 1.
                * 'peak'
                    Kernel normalized such that its peak = 1.
        """
        # There are kernel that sum to zero and
        # the user should be warned in this case
        if np.isinf(self._normalization):
            warnings.warn(
                "Kernel cannot be normalized because the " "normalization factor is infinite.", AstropyUserWarning
            )
            return
        if np.abs(self._normalization) > MAX_NORMALIZATION:
            warnings.warn(
                "Normalization factor of kernel is " "exceptionally large > {0}.".format(MAX_NORMALIZATION),
                AstropyUserWarning,
            )
        if mode == "integral":
            self._array *= self._normalization
        if mode == "peak":
            np.divide(self._array, self._array.max(), self.array)
            self._normalization = 1.0 / self._array.sum()
Beispiel #24
0
def ndcg_multi(X, Y, Ks):
	assert(X.size == Y.size and all(X.indices == Y.indices) and all(X.indptr == Y.indptr))
	n = Y.shape[1]
	res = zeros(len(Ks))
	nvalid = 0
	Xdata = X.data
	Ydata = Y.data
	indices = Y.indices
	indptr = Y.indptr
	for i in xrange(n):
		[j0, j1] = [indptr[i], indptr[i + 1]]
		if j0 == j1: # skip empty column
			continue
		nvalid += 1
		Xi = Xdata[j0:j1]
		Yi = Ydata[j0:j1]
		I = argsort(-Xi)
		Yi_pred = numpy.exp(Yi[I])-1.0
		Yi_best = numpy.exp(-(sort(-Yi)))-1.0
		Wi = numpy.log(numpy.exp(1) + arange(j1 - j0))
		Yi_pred = numpy.divide(Yi_pred, Wi)
		Yi_best = numpy.divide(Yi_best, Wi)
		for k in xrange(len(Ks)):
			K = Ks[k]
			Ki = min([K, j1 - j0])
			res[k] += sum(Yi_pred[0:Ki]) / sum(Yi_best[0:Ki])
	assert(nvalid > 0)
	res /= nvalid
	return res
Beispiel #25
0
    def get_summed_cohp_by_label_and_orbital_list(self, label_list, orbital_list, divisor=1):
        """
        Returns a COHP object that includes a summed COHP divided by divisor

        Args:
            label_list: list of labels for the COHP that should be included in the summed cohp
            orbital_list: list of orbitals for the COHPs that should be included in the summed cohp (same order as label_list)
            divisor: float/int, the summed cohp will be divided by this divisor
        Returns:
            Returns a COHP object including a summed COHP
        """
        # check if cohps are spinpolarized or not
        first_cohpobject = self.get_orbital_resolved_cohp(label_list[0], orbital_list[0])
        summed_cohp = first_cohpobject.cohp.copy()
        summed_icohp = first_cohpobject.icohp.copy()
        for ilabel, label in enumerate(label_list[1:], 1):
            cohp_here = self.get_orbital_resolved_cohp(label, orbital_list[ilabel])
            summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp.copy()[Spin.up]], axis=0)
            if Spin.down in summed_cohp:
                summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp.copy()[Spin.down]], axis=0)
            summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp.copy()[Spin.up]], axis=0)
            if Spin.down in summed_icohp:
                summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp.copy()[Spin.down]], axis=0)

        divided_cohp = {}
        divided_icohp = {}
        divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor)
        divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor)
        if Spin.down in summed_cohp:
            divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor)
            divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor)

        return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp,
                    are_coops=first_cohpobject.are_coops,
                    icohp=divided_icohp)
Beispiel #26
0
def getGammaAngle(appf,cAtom,oAtom,hAtom):
    # first determine the nAtom
    aminoGroup = appf.select('resnum ' + str(cAtom.getResnum()))
    for at in aminoGroup:
        if(at.getName() == 'N'):
            nAtom = at
        # get coordinates
    cCoords = cAtom.getCoords()
    oCoords = oAtom.getCoords()
    hCoords = hAtom.getCoords()
    nCoords = nAtom.getCoords()
    # get necessary vectors
    oc = np.subtract(oCoords,cCoords)
    nc = np.subtract(nCoords,cCoords)
    ho = np.subtract(hCoords,oCoords)
    n1 = np.cross(oc,nc)
    n1_unit = np.divide(n1,np.linalg.norm(n1))
    # get projection of H-O in O-C direction
    oc_unit = np.divide(oc,np.linalg.norm(oc))
    #print oc_unit
    hproj = np.dot(ho,oc_unit)
    # get projection of H-O onto N-C-O plane
    out = np.dot(ho,n1_unit)
    n2 = np.cross(np.multiply(n1_unit,out),oc)
    #print n2
    ho_ip = np.subtract(ho,np.multiply(n1_unit,out))
    test = np.dot(n2,ho_ip)
    #print test
    ang = hproj/np.linalg.norm(ho_ip)
    ang = math.acos(ang)
    ang = ang*180/math.pi
    #if(test < 0):
    #    ang = ang * -1
    return ang
Beispiel #27
0
    def _process_sample (self, ap1, ap2, ap3, triple, tflags):
        """We have computed one independent phase closure triple in one timeslot.

        """
        # Frequency-resolved:
        np.divide (triple, np.abs (triple), triple)
        phase = np.angle (triple)

        self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.)
        self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.)
        self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.)

        # Frequency-averaged:
        triple = np.dot (triple, tflags) / tflags.sum ()
        phase = np.angle (triple)

        self.global_stats_by_time.accum (self.cur_time, phase)

        self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase)
        self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase)
        self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase)
        self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase)
        self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase)
        self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase)

        self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase)
        self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase)
        self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase)
Beispiel #28
0
def get_gaussian_weight_patch(gauss_shape=(19, 19), gauss_sigma_frac=.3,
                              gauss_norm_01=True):
    r"""
    2d gaussian image useful for plotting

    Returns:
        ndarray: patch

    CommandLine:
        python -m vtool.coverage_kpts --test-get_gaussian_weight_patch

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> # build test data
        >>> # execute function
        >>> patch = get_gaussian_weight_patch()
        >>> # verify results
        >>> result = str(patch)
        >>> print(result)
    """
    # Perdoch uses roughly .95 of the radius
    radius = gauss_shape[0] / 2.0
    sigma = gauss_sigma_frac * radius
    # Similar to SIFT's computeCircularGaussMask in helpers.cpp
    # uses smmWindowSize=19 in hesaff for patch size. and 1.6 for sigma
    # Create gaussian image to warp
    patch = ptool.gaussian_patch(shape=gauss_shape, sigma=sigma)
    if gauss_norm_01:
        np.divide(patch, patch.max(), out=patch)
    return patch
def compile_stats():
    logging.info('Loading data...')

    for i, filename in enumerate(os.listdir(DATA_DIR)):
        if i > MAX_FILES:
            break
        full_name = DATA_DIR + "/" + filename
        print full_name
        with open(full_name) as f:
            data = np.load(f)
            if len(data["input"].shape) == 3 and len(data["output"].shape) == 3:
                X = data["input"]
                y = data["output"]
                if COLLECT_ACTION_PCT:
                    probs = np.divide(np.sum(y, (0,1)).astype(np.float64),
                                      np.sum(y, (0,1,2)))
                    player_to_stats[filename] = probs
                elif COLLECT_VPIP_PFR:
                    # Assumes actions are (fold, check, call, raise)
                    actions = X[:,:,11:15]
                    if (np.sum(actions, (0,1,2)) < 100):
                        continue
                    probs = np.divide(np.sum(actions, (0,1)).astype(np.float64),
                                      np.sum(actions, (0,1,2)))
                    pfr = probs[3]
                    vpip = probs[3] + probs[2]
                    player_to_stats[filename] = np.array([vpip, pfr])
Beispiel #30
0
def mc2mvsk(args):
    '''convert central moments to mean, variance, skew, kurtosis
    '''
    mc, mc2, mc3, mc4 = args
    skew = np.divide(mc3, mc2**1.5)
    kurt = np.divide(mc4, mc2**2.0) - 3.0
    return (mc, mc2, skew, kurt)
 def redraw(self, event=None):
    # acquie Gaussian parameters
    p = np.array([float(self.p1.get()), 1.0-float(self.p1.get())])
    mu1 = np.array([float(self.mu1x.get()), float(self.mu1y.get())])
    mu2 = np.array([float(self.mu2x.get()), float(self.mu2y.get())])
    s1 = np.array([[float(self.s1x.get()), float(self.s1xy.get())],
                   [float(self.s1xy.get()), float(self.s1y.get())]])
    s2 = np.array([[float(self.s2x.get()), float(self.s2xy.get())],
                   [float(self.s2xy.get()), float(self.s2y.get())]])
    # greate Multivariate Gaussian objects
    try:
       rv1 = multivariate_normal(mu1, s1)
    except (ValueError, np.linalg.LinAlgError) as e:
       messagebox.showerror("Error!", "Covariance matrix must be positive definite (Gaussian 1)")
       return
    try:
       rv2 = multivariate_normal(mu2, s2)
    except (ValueError, np.linalg.LinAlgError) as e:
       messagebox.showerror("Error!", "Covariance matrix must be positive definite (Gaussian 2)")
       return
    # Compute PDF for a certain range of x and y
    #xlim = [float(self.xmin.get()), float(self.xmax.get())]
    #ylim = [float(self.ymin.get()), float(self.ymax.get())]
    zoom = float(self.zoom.get())
    center = (mu1+mu2)/2
    distance = np.abs(mu1-mu2).max()
    if distance == 0:
       distance = 1.0
    xlim = [center[0]-distance*zoom, center[0]+distance*zoom]
    ylim = [center[1]-distance*zoom, center[1]+distance*zoom]
    x, y = np.mgrid[xlim[0]:xlim[1]:(xlim[1]-xlim[0])/500.0, ylim[0]:ylim[1]:(ylim[1]-ylim[0])/500.0]
    pos = np.dstack((x, y))
    rv1g = p[0]*rv1.pdf(pos)
    rv2g = p[1]*rv2.pdf(pos)
    sum12 = rv1g+rv2g
    post1 = np.divide(rv1g, sum12)
    post2 = np.divide(rv2g, sum12)
    self.fig.clf()
    #plt.set_cmap('seismic')
    ax = self.fig.add_subplot(111)
    # plot Decision Boundary or Difference of PDFs
    plotType = self.drawType.get()
    if plotType == 'Decision Boundary':
       ax.imshow((post1>post2).T, origin='lower', extent=[xlim[0], xlim[1], ylim[0], ylim[1]], cmap='bwr')
       self.fig.suptitle(plotType)
    elif plotType == 'Log-likelihood ratio':
       maxdata = np.max(np.abs(np.log(rv1.pdf(pos))-np.log(rv2.pdf(pos))))
       cax = ax.imshow((np.log(rv1.pdf(pos)) - np.log(rv2.pdf(pos))).T, origin='lower', extent=[xlim[0], xlim[1], ylim[0], ylim[1]], cmap='Spectral_r', vmin=-maxdata, vmax=maxdata)
       self.fig.colorbar(cax)
       self.fig.suptitle('log[p(x|y1)/p(x|y2)]')
    elif plotType == 'Scaled Posterior difference':
       maxdata = np.max(np.abs(rv1g-rv2g))
       cax = ax.imshow((rv1g - rv2g).T, origin='lower', extent=[xlim[0], xlim[1], ylim[0], ylim[1]], cmap='Spectral_r', vmin=-maxdata, vmax=maxdata)
       self.fig.colorbar(cax)
       self.fig.suptitle('P(y1)p(x|y1) - P(y2)p(x|y2)')
    elif plotType == 'Posterior':
       maxdata = np.max(np.abs(post1))
       cax = ax.imshow((post1).T, origin='lower', extent=[xlim[0], xlim[1], ylim[0], ylim[1]], cmap='Spectral_r', vmin=0, vmax=1)
       self.fig.colorbar(cax)
       self.fig.suptitle('P(y1|x) ( = 1 - P(y2|x) )')
    else:
       messagebox.showerror("Error!", "Plot type not supported")
    ax.text(mu1[0], mu1[1], '+', color='white', horizontalalignment='center', verticalalignment='center')
    ax.text(mu2[0], mu2[1], 'o', color='white', horizontalalignment='center', verticalalignment='center')
    ax.set_xlabel('x1')
    ax.set_ylabel('x2')
    # plot contours for each PDF
    if self.drawPDFContour.get():
       ax.contour(x, y, rv1g, colors='w')
       ax.contour(x, y, rv2g, colors='w')
       #plt.contour(x, y, rv1g.reshape(x.shape), norm=LogNorm(vmin=1.0, vmax=40.0),levels=np.logspace(0, 3, 10))
       #plt.contour(x, y, rv2g.reshape(x.shape), norm=LogNorm(vmin=1.0, vmax=40.0),levels=np.logspace(0, 3, 10))
    self.canvas.draw()
Beispiel #32
0
def normalize(image,mean_image,std_image):
    return np.divide((image-mean_image),std_image)
import numpy
from scipy import signal
from LoadMnistData_kjw_v1_0_0 import *
from Softmax_kjw_v1_0_0 import *
from ReLU_kjw_v1_0_0 import *
from Conv_kjw_v1_0_1 import *
from Pool_kjw_v1_0_0 import *
from MnistConv_kjw_v1_0_0 import *

# Learn
#
Images, Labels = LoadMnistData_kjw('MNIST\\t10k-images-idx3-ubyte.gz', 'MNIST\\t10k-labels-idx1-ubyte.gz')

## Images를 255로 나눔, 0 ~ 1사이 값을 만들기 위함인것으로 추정됨
Images = numpy.divide(Images, 255)

## numpy.random.randn: 가우시안 표준 정규 분포로 난수 생성(기대값: 0, 표준편차: 1)
W1 = 1e-2 * numpy.random.randn(9, 9, 20)

## numpy.random.uniform: 균등분포로 난수 생성, numpy.sqrt: 제곱근
W5 = numpy.random.uniform(-1, 1, (100, 2000)) * numpy.sqrt(6) / numpy.sqrt(360 + 2000)
Wo = numpy.random.uniform(-1, 1, (10, 100)) * numpy.sqrt(6) / numpy.sqrt(10 + 100)

X = Images[0:8000, :, :]
D = Labels[0:8000]

for _epoch in range(3):
    print(_epoch)
    W1, W5, Wo = MnistConv_kjw(W1, W5, Wo, X, D)
def eddy_sections(bwl:np.ndarray, a_1:np.ndarray, a_3:np.ndarray, sigma:np.ndarray, H0:np.ndarray, Ts:np.ndarray,
         OG:float, R:float, wE:float, fi_a:float, ra=1000.0):
    """
    Calculation of eddy damping according to Ikeda.
    This implementation is a translation from Carl-Johans Matlab implementation.

    Parameters
    ----------
    bwl
        sectional b water line [m]
    a_1
        sectional lewis coefficients
    a_3
        sectional lewis coefficients
    sigma
        sectional coefficient
    H0
        sectional coefficient
    Ts
        sectional draft [m]
    OG
        vertical distance water line to cg [m]
    R
        bilge radius [m]
    ra
        water density [kg/m3]
    wE
        roll requency [rad/s]
    fi_a
        roll amplitude [rad]

    Returns
    -------
    Bp44E0s
        Eddy damping per unit length for the sections at zero speed.
    """

    #H0=np.array(H0)/2  # ...strange...
    H0 = np.array(H0) # ...strange...

    N=len(bwl)
    #M = bwl / (2 * (1 + a_1 + a_3));
    M = np.divide(bwl, (2 * (1 + a_1 + a_3)), out=np.zeros_like(bwl), where=(2 * (1 + a_1 + a_3))!=0)

    fi1 = 0;
    #fi2 = 0.5 * arccos(a_1 * (1 + a_3)) / (4 * a_3);
    fi2 = np.divide(0.5 * arccos(a_1 * (1 + a_3)), (4 * a_3), out=np.zeros_like(a_1), where=(4 * a_3)!=0)
    rmax_fi1 = M*sqrt(((1+a_1)*sin(fi1)-a_3*sin(fi1))**2+((1-a_1)*cos(fi1)-a_3*cos(fi1))**2)
    rmax_fi2 = M*sqrt(((1+a_1)*sin(fi2)-a_3*sin(fi2))**2+((1-a_1)*cos(fi2)-a_3*cos(fi2))**2)

    mask=rmax_fi2 > rmax_fi1
    fi=np.zeros(N)
    fi[mask] = fi2[mask]
    fi[~mask] = fi1

    B0 = -2 * a_3 * sin(5 * fi) + a_1 * (1 - a_3) * sin(3 * fi) + (
                (6 + 3 * a_1) * a_3 ** 2 + (3 * a_1 + a_1 ** 2) * a_3 + a_1 ** 2) * sin(fi)

    A0 = -2 * a_3 * cos(5 * fi) + a_1 * (1 - a_3) * cos(3 * fi) + (
                (6 - 3 * a_1) * a_3 ** 2 + (a_1 ** 2 - 3 * a_1) * a_3 + a_1 ** 2) * cos(fi)

    H = 1 + a_1 ** 2 + 9 * a_3 ** 2 + 2 * a_1 * (1 - 3 * a_3) * cos(2 * fi) - 6 * a_3 * cos(4 * fi)

    f3 = 1 + 4 * exp(-1.65 * 10 ** 5 * (1 - sigma) ** 2);

    x_ = np.array([rmax_fi1, rmax_fi2]).transpose()
    rmax = max(x_, axis=1)

    H0_prim = H0*Ts/(Ts-OG)
    sigma_prim = (sigma*Ts-OG)/(Ts-OG)
    gamma=sqrt(pi)*f3*(rmax+2*M/H*sqrt(B0**2*A0**2))/((2*Ts*(1-OG/Ts)*sqrt(H0_prim*sigma_prim)))

    f1 = 0.5 * (1 + tanh(20 * (sigma - 0.7)));
    f2 = 0.5 * (1 - cos(pi * sigma)) - 1.5 * (1 - exp(-5 * (1 - sigma))) * (sin(pi * sigma)) ** 2

    Cp = 0.5 * (0.87 * exp(-gamma) - 4 * exp(-0.187 * gamma) + 3);

    M_re = 1 / 2 * ra * rmax ** 2 * Ts ** 2 * Cp * (
                (1 - f1 * R / Ts) * (1 - OG / Ts - f1 * R / Ts) + f2 * (H0 - f1 * R / Ts) ** 2)
    Cr = M_re / (1 / 2 * ra * Ts ** 4)


    #WE, CR = np.meshgrid(wE, Cr)
    Cr=np.array(Cr)
    wE=np.array(wE)

    try:
        len_Cr = len(Cr)
    except TypeError:
        len_Cr = 1

    try:
        len_wE = len(wE)
    except TypeError:
        len_wE = 1

    WE=np.tile(wE, (len_Cr, 1))
    FI_a=np.tile(fi_a, (len_Cr, 1))

    Tss = np.tile(Ts, (len_wE, 1)).transpose()
    CR = np.tile(Cr, (len_wE, 1)).transpose()

    Bp44E0s = 4 * ra * Tss ** 4 * WE * FI_a * CR / (3 * pi)

    mask = np.isnan(Bp44E0s)
    Bp44E0s[mask] = 0

    return Bp44E0s
Beispiel #35
0
def parallel_tempering_plus(init_toric, Nc=None, p=0.1, SEQ=5, TOPS=10, tops_burn=2, eps=0.001, n_tol=1e-4, steps=1000000, iters=10, conv_criteria='error_based'):
    size = init_toric.system_size
    Nc = Nc or size

    # create the diffrent chains in an array
    # number of chains in ladder, must be odd
    if not Nc % 2:
        print('Number of chains was not odd.')

    if tops_burn >= TOPS:
        print('tops_burn has to be smaller than TOPS')

    ladder = []  # ladder to store all chains
    p_end = 0.75  # p at top chain as per high-threshold paper
    tops0 = 0
    resulting_burn_in = 0
    since_burn = 0
    nbr_errors_bottom_chain = np.zeros(steps)
    eq = np.zeros([steps, 16], dtype=np.uint32)  # list of class counts after burn in

    # used in error_based/majority_based instead of setting tops0 = TOPS
    tops_change = 0

    convergence_reached = False

    # add and copy state for all chains in ladder
    for i in range(Nc):
        p_i = p + ((p_end - p) / (Nc - 1)) * i
        ladder.append(Chain(size, p_i))
        ladder[i].toric = copy.deepcopy(init_toric)  # give all the same initial state
    ladder[Nc - 1].p_logical = 0.5  # set probability of application of logical operator in top chain

    count = -1
    for j in range(steps):
        # run mcmc for each chain [steps] times
        for i in range(Nc):
            ladder[i].update_chain(iters)
        # current_eq attempt flips from the top down
        ladder[-1].flag = 1
        for i in reversed(range(Nc - 1)):
            if r_flip(ladder[i].toric.qubit_matrix, ladder[i].p, ladder[i + 1].toric.qubit_matrix, ladder[i + 1].p):
                ladder[i].toric, ladder[i + 1].toric = ladder[i + 1].toric, ladder[i].toric
                ladder[i].flag, ladder[i + 1].flag = ladder[i + 1].flag, ladder[i].flag
        if ladder[0].flag == 1:
            tops0 += 1
            ladder[0].flag = 0

        current_eq = define_equivalence_class(ladder[0].toric.qubit_matrix)

        if tops0 >= tops_burn:
            since_burn = j - resulting_burn_in

            eq[since_burn] = eq[since_burn - 1]
            eq[since_burn][current_eq] += 1
            nbr_errors_bottom_chain[since_burn] = np.count_nonzero(ladder[0].toric.qubit_matrix)

        else:
            # number of steps until tops0 = 2
            resulting_burn_in += 1

        if not convergence_reached and tops0 >= TOPS and not since_burn % 10:
            if conv_criteria == 'error_based':
                tops_accepted = tops0 - tops_change
                accept, convergence_reached = conv_crit_error_based(nbr_errors_bottom_chain, since_burn, tops_accepted, SEQ, eps)
                if not accept:
                    tops_change = tops0

            if conv_criteria == 'distr_based':
                tops_accepted = tops0 - tops_change
                accept, convergence_criteria = conv_crit_distr_based(eq, since_burn, tops_accepted, SEQ, n_tol)

                # Reset if difference (norm) between Q2 and Q4 is too different
                if not accept:
                    tops_change = tops0

            if conv_criteria == 'majority_based':
                # returns the majority class that becomes obvious right when convergence is reached
                tops_accepted = tops0 - tops_change
                accept, convergence_reached = conv_crit_majority_based(eq, since_burn, SEQ)

                # reset if majority classes in Q2 and Q4 are different
                if not accept:
                    tops_change = tops0

        if convergence_reached:
            count=j
            break

    distr = (np.divide(eq[since_burn], since_burn + 1) * 100).astype(np.uint8)
    return distr, count
Beispiel #36
0
def parallel_tempering_analysis(init_toric, Nc=None, p=0.1, SEQ=5, TOPS=10, tops_burn=2, eps=0.01, n_tol=1e-4, tvd_tol=0.05, kld_tol=0.5, steps=1000, iters=10, conv_criteria=None):
    size = init_toric.system_size
    Nc = Nc or size

    # create the diffrent chains in an array
    # number of chains in ladder, must be odd
    if not Nc % 2:
        print('Number of chains was not odd.')

    # Warning if TOPS is too small
    if tops_burn >= TOPS:
        print('tops_burn has to be smaller than TOPS')

    ladder = []  # ladder to store all chains
    p_end = 0.75  # p at top chain as per high-threshold paper
    tops0 = 0  # number of error chains that have traveled from top chain to bottom chain
    resulting_burn_in = 0  # Number of steps taken for tops0 to reach tops_burn
    since_burn = 0  # Number of steps taken since tops0 reached top_burn
    nbr_errors_bottom_chain = np.zeros(steps)  # number of errors in bottom chain
    eq = np.zeros([steps, 16], dtype=np.uint32)  # list of class counts after burn in
    eq_full = np.zeros([steps, 16], dtype=np.uint32)  # list of class counts from start
    # might only want one of these, as (eq_full[j] - eq[j - resulting_burn_in]) is constant

    # List of convergence criteria. Add any new ones to list
    conv_criteria = conv_criteria or ['error_based', 'distr_based', 'majority_based', 'tvd_based', 'kld_based']
    # Dictionary to hold the converged distribution and the number of steps to converge, according to each criteria
    crits_distr = {}
    tops_distr = {}
    for crit in conv_criteria:
        # every criteria gets an empty list, a number and a bool.
        # The empty list represents eq_class_distr, the number is the step where convergence is reached, and the bool is whether convergence has been reached
        crits_distr[crit] = [np.zeros(16), -1, False]
        # How much tops0 has increased while crit has remained fulfilled
        tops_distr[crit] = TOPS

    # plot initial error configuration
    init_toric.plot_toric_code(init_toric.next_state, 'Chain_init', define_equivalence_class(init_toric.qubit_matrix))

    # add and copy state for all chains in ladder
    for i in range(Nc):
        p_i = p + ((p_end - p) / (Nc - 1)) * i
        ladder.append(Chain(size, p_i))
        ladder[i].toric = copy.deepcopy(init_toric)  # give all the same initial state
    ladder[Nc - 1].p_logical = 0.5  # set probability of application of logical operator in top chain

    for j in range(steps):
        # run mcmc for each chain [steps] times
        for i in range(Nc):
            ladder[i].update_chain(iters)
        # attempt flips from the top down
        ladder[-1].flag = 1
        for i in reversed(range(Nc - 1)):
            if r_flip(ladder[i].toric.qubit_matrix, ladder[i].p, ladder[i + 1].toric.qubit_matrix, ladder[i + 1].p):
                ladder[i].toric, ladder[i + 1].toric = ladder[i + 1].toric, ladder[i].toric
                ladder[i].flag, ladder[i + 1].flag = ladder[i + 1].flag, ladder[i].flag
        if ladder[0].flag == 1:
            tops0 += 1
            ladder[0].flag = 0

        # Equivalence class of bottom chain
        current_eq = define_equivalence_class(ladder[0].toric.qubit_matrix)

        # current class count is previous class count + the current class
        # edge case j = 0 is ok. eq_full[-1] picks last element, which is initiated as zeros
        eq_full[j] = eq_full[j - 1]
        eq_full[j][current_eq] += 1

        # Check if burn in phase is complete
        if tops0 >= tops_burn:
            # Update since_burn
            since_burn = j - resulting_burn_in
            # Increment counts of equivalence classes according to current_eq
            eq[since_burn] = eq[since_burn - 1]
            eq[since_burn][current_eq] += 1
            # Update number of errors in bottom chain
            nbr_errors_bottom_chain[since_burn] = np.count_nonzero(ladder[0].toric.qubit_matrix)

        else:
            # number of steps until tops0 >= tops_burn
            resulting_burn_in += 1

        # Evaluate convergence criteria every tenth step
        if tops0 >= TOPS and not since_burn % 10:
            # Evaluate error_based
            if 'error_based' in conv_criteria and not crits_distr['error_based'][2]:
                tops_accepted = tops0 - tops_distr['error_based']
                accept, crits_distr['error_based'][2] = conv_crit_error_based(nbr_errors_bottom_chain, since_burn, tops_accepted, SEQ, eps)

                # Reset if difference in nbr_errors between Q2 and Q4 is too different
                if not accept:
                    tops_distr['error_based'] = tops0

                # Converged
                if crits_distr['error_based'][2]:
                    crits_distr['error_based'][1] = since_burn

            # Evaluate distr_based
            if 'distr_based' in conv_criteria and not crits_distr['distr_based'][2]:
                tops_accepted = tops0 - tops_distr['distr_based']
                accept, crits_distr['distr_based'][2] = conv_crit_distr_based(eq, since_burn, tops_accepted, SEQ, n_tol)

                # Reset if difference (norm) between Q2 and Q4 is too different
                if not accept:
                    tops_distr['distr_based'] = tops0

                # Converged
                if crits_distr['distr_based'][2]:
                    crits_distr['distr_based'][1] = since_burn

            # Evaluate majority_based
            if 'majority_based' in conv_criteria and not crits_distr['majority_based'][2]:
                # returns the majority class that becomes obvious right when convergence is reached
                tops_accepted = tops0 - tops_distr['majority_based']
                accept, crits_distr['majority_based'][2] = conv_crit_majority_based(eq, since_burn, tops_accepted, SEQ)

                # reset if majority classes in Q2 and Q4 are different
                if not accept:
                    tops_distr['majority_based'] = tops0

                # Converged
                if crits_distr['majority_based'][2]:
                    crits_distr['majority_based'][1] = since_burn

            # Evaulate tvd_based
            if 'tvd_based' in conv_criteria and not crits_distr['tvd_based'][2]:
                tops_accepted = tops0 - tops_distr['tvd_based']
                accept, crits_distr['tvd_based'][2] = conv_crit_tvd_based(eq, since_burn, tops_accepted, SEQ, tvd_tol)

                # Reset if difference (norm) between Q2 and Q4 is too different
                if not accept:
                    tops_distr['tvd_based'] = tops0

                # Converged
                if crits_distr['tvd_based'][2]:
                    crits_distr['tvd_based'][1] = since_burn

            # Evaluate kld_based
            if 'kld_based' in conv_criteria and not crits_distr['kld_based'][2]:
                tops_accepted = tops0 - tops_distr['kld_based']
                accept, crits_distr['kld_based'][2] = conv_crit_kld_based(eq, since_burn, tops_accepted, SEQ, kld_tol)

                # Reset if difference (norm) between Q2 and Q4 is too different
                if not accept:
                    tops_distr['kld_based'] = tops0

                # Converged
                if crits_distr['kld_based'][2]:
                    crits_distr['kld_based'][1] = since_burn

    # plot all chains
    for i in range(Nc):
        ladder[i].plot('Chain_' + str(i), define_equivalence_class(ladder[i].toric.qubit_matrix))

    # Convert resulting final distribution to 8 bit int
    distr = (np.divide(eq[since_burn], since_burn + 1) * 100).astype(np.uint8)

    for crit in conv_criteria:
        # Check if converged
        if crits_distr[crit][2]:
            # Calculate converged distribution from converged class count
            crits_distr[crit][0] = np.divide(eq[crits_distr[crit][1]], crits_distr[crit][1] + 1)  # Divide by "index+1" since first index is 0

    # Return resulting parameters
    return [distr, eq, eq_full, ladder[0], resulting_burn_in, crits_distr]
Beispiel #37
0
def normalize_range_0_to_1(x):
    x = np.add(x, -x.min())
    x = np.divide(x, x.max())
    return x
    def energy_full_vs_energy_in_red():
        global_filename = PB.global_filename

        freq_class = PB.x_axis_frequency()
        freq_array = freq_class.get_freq_unrounded()
        angle_array = PB.get_angles_unrounded()

        pathname = os.path.dirname(global_filename)
        filename_extension = os.path.splitext(global_filename)[-1]
        # решаем в 2 захода: сначала среднее по поддиапазонам 1мдж,
        # потом считаем отклонение от среднего
        max_energy_mj = 30
        energy_index = np.arange(1, max_energy_mj + 1)
        number_of_samples = np.zeros(max_energy_mj)
        sum_of_energy = np.zeros(max_energy_mj)
        mean_of_energy = np.zeros(max_energy_mj)
        sd_of_energy = np.zeros(max_energy_mj)
        sum_of_energy_red = np.zeros(max_energy_mj)
        mean_of_energy_red = np.zeros(max_energy_mj)
        sd_of_energy_red = np.zeros(max_energy_mj)

        if (pathname):
            for file in os.listdir(pathname):
                if file.endswith(filename_extension):
                    energy = float(file[:file.find("_")])
                    if (energy > 1):
                        if file.endswith(".png"):
                            PB.do_image_to_array('', pathname + "/" + file)
                        elif file.endswith(".dat"):
                            PB.do_data_to_array('', pathname + "/" + file)
                            PB.preprocessing_plot()
                            subarray = PB.array[PB.angle_from:PB.angle_to, :]
                            number_of_samples[round(
                                energy)] = number_of_samples[round(energy)] + 1
                            sum_of_energy[round(energy)] = sum_of_energy[round(
                                energy)] + subarray.sum()
                            x_from = freq_class.index(840)
                            subarray = PB.array[PB.angle_from:PB.angle_to,
                                                x_from:]
                            sum_of_energy_red[round(
                                energy)] = sum_of_energy_red[round(
                                    energy)] + subarray.sum()
        mean_of_energy = np.divide(sum_of_energy, number_of_samples)
        mean_of_energy_red = np.divide(sum_of_energy_red, number_of_samples)

        if (pathname):
            for file in os.listdir(pathname):
                if file.endswith(filename_extension):
                    energy = float(file[:file.find("_")])
                    if (energy > 1):
                        if file.endswith(".png"):
                            PB.do_image_to_array('', pathname + "/" + file)
                        elif file.endswith(".dat"):
                            PB.do_data_to_array('', pathname + "/" + file)
                            PB.preprocessing_plot()
                            subarray = PB.array[PB.angle_from:PB.angle_to, :]
                            sd_of_energy[round(energy)] = sd_of_energy[round(
                                energy)] + np.power(
                                    subarray.sum() -
                                    mean_of_energy[round(energy)], 2)
                            x_from = freq_class.index(820)
                            subarray = PB.array[PB.angle_from:PB.angle_to,
                                                x_from:]
                            sd_of_energy_red[round(energy)] = sd_of_energy_red[
                                round(energy)] + np.power(
                                    subarray.sum() -
                                    mean_of_energy_red[round(energy)], 2)
        # sd = sqrt(sum[(xi-<x>)^2]/(n-1))
        number_of_samples_minus_1 = np.zeros(max_energy_mj)
        number_of_samples_minus_1[number_of_samples > 1] = number_of_samples[
            number_of_samples > 1] - 1
        number_of_samples_minus_1[number_of_samples <= 1] = number_of_samples[
            number_of_samples <= 1]
        sd_of_energy = np.sqrt(
            np.divide(sd_of_energy, number_of_samples_minus_1))
        sd_of_energy_red = np.sqrt(
            np.divide(sd_of_energy_red, number_of_samples_minus_1))
        fig = plt.figure()
        ax = fig.add_subplot(111)
        '''
        ax.bar(energy_index, mean_of_energy, yerr=sd_of_energy,
               error_kw={'ecolor': '0.1', 'capsize': 6}, label='Full')
        ax.bar(energy_index, mean_of_energy_red,  yerr=sd_of_energy_red,
               error_kw={'ecolor': 'tab:purple', 'capsize': 6}, label='Red')
        '''
        energy_ratio = np.zeros(max_energy_mj)
        energy_ratio[number_of_samples > 1] = np.divide(
            mean_of_energy_red, mean_of_energy)[number_of_samples > 1]
        div = np.divide(energy_ratio * sd_of_energy, mean_of_energy)
        div_red = np.divide(energy_ratio * sd_of_energy_red,
                            mean_of_energy_red)
        # print(div)
        # print("!\n")
        # print(div_red)
        # print("!\n")
        # print(np.divide(sd_of_energy, mean_of_energy))
        # print("!\n")
        energy_ratio_error = (np.sqrt(np.power(div, 2) + np.power(div_red, 2)))
        # print(energy_ratio_error)
        ax.errorbar(energy_index[number_of_samples > 1],
                    energy_ratio[number_of_samples > 1],
                    yerr=energy_ratio_error[number_of_samples > 1],
                    fmt='o',
                    capsize=10)
        ax.set_ylim(0, 1)
        ax.set_xlabel('Энергия в импульсе, мДж')
        ax.set_ylabel('Энергия на оси')
        plt.title('Энерговклад в красное крыло')
        plt.legend(loc=2)
        fig.show()
def normalization(x,mu,sigma):
    x = np.subtract(x, mu)
    x = np.divide(x, sigma)
    return x
Beispiel #40
0
 def observation(self, obs):
     slope = np.divide( self.goal_position[1] - self.agent_pos[1] ,  self.goal_position[0] - self.agent_pos[0])
     obs['goal_direction'] = np.arctan( slope ) if self.type == 'angle' else slope
     return obs
Beispiel #41
0
def trace_ratio(X, y, n_selected_features, **kwargs):
    """
    This function implements the trace ratio criterion for feature selection

    Input
    -----
    X: {numpy array}, shape (n_samples, n_features)
        input data
    y: {numpy array}, shape (n_samples,)
        input class labels
    n_selected_features: {int}
        number of features to select
    kwargs: {dictionary}
        style: {string}
            style == 'fisher', build between-class matrix and within-class affinity matrix in a fisher score way
            style == 'laplacian', build between-class matrix and within-class affinity matrix in a laplacian score way
        verbose: {boolean}
            True if user want to print out the objective function value in each iteration, False if not

    Output
    ------
    feature_idx: {numpy array}, shape (n_features,)
        the ranked (descending order) feature index based on subset-level score
    feature_score: {numpy array}, shape (n_features,)
        the feature-level score
    subset_score: {float}
        the subset-level score

    Reference
    ---------
    Feiping Nie et al. "Trace Ratio Criterion for Feature Selection." AAAI 2008.
    """

    # if 'style' is not specified, use the fisher score way to built two affinity matrix
    if 'style' not in kwargs.keys():
        kwargs['style'] = 'fisher'
    # get the way to build affinity matrix, 'fisher' or 'laplacian'
    style = kwargs['style']
    n_samples, n_features = X.shape

    # if 'verbose' is not specified, do not output the value of objective function
    if 'verbose' not in kwargs:
        kwargs['verbose'] = False
    verbose = kwargs['verbose']
    verbose = False

    if style is 'fisher':
        kwargs_within = {
            "neighbor_mode": "supervised",
            "fisher_score": True,
            'y': y
        }
        # build within class and between class laplacian matrix L_w and L_b
        W_within = construct_W(X, **kwargs_within)
        L_within = np.eye(n_samples) - W_within
        L_tmp = np.eye(n_samples) - np.ones([n_samples, n_samples]) / n_samples
        L_between = L_within - L_tmp

    if style is 'laplacian':
        kwargs_within = {
            "metric": "euclidean",
            "neighbor_mode": "knn",
            "weight_mode": "heat_kernel",
            "k": 5,
            't': 1
        }
        # build within class and between class laplacian matrix L_w and L_b
        W_within = construct_W(X, **kwargs_within)
        D_within = np.diag(np.array(W_within.sum(1))[:, 0])
        L_within = D_within - W_within
        W_between = np.dot(np.dot(D_within, np.ones([n_samples, n_samples])),
                           D_within) / np.sum(D_within)
        D_between = np.diag(np.array(W_between.sum(1)))
        L_between = D_between - W_between

    # build X'*L_within*X and X'*L_between*X
    L_within = (np.transpose(L_within) + L_within) / 2
    L_between = (np.transpose(L_between) + L_between) / 2
    S_within = np.array(np.dot(np.dot(np.transpose(X), L_within), X))
    S_between = np.array(np.dot(np.dot(np.transpose(X), L_between), X))

    # reflect the within-class or local affinity relationship encoded on graph, Sw = X*Lw*X'
    S_within = (np.transpose(S_within) + S_within) / 2
    # reflect the between-class or global affinity relationship encoded on graph, Sb = X*Lb*X'
    S_between = (np.transpose(S_between) + S_between) / 2

    # take the absolute values of diagonal
    s_within = np.absolute(S_within.diagonal())
    s_between = np.absolute(S_between.diagonal())
    s_between[s_between == 0] = 1e-14  # this number if from authors' code

    # preprocessing
    fs_idx = np.argsort(np.divide(s_between, s_within), 0)[::-1]
    k = np.sum(s_between[0:n_selected_features]) / np.sum(
        s_within[0:n_selected_features])
    s_within = s_within[fs_idx[0:n_selected_features]]
    s_between = s_between[fs_idx[0:n_selected_features]]

    # iterate util converge
    count = 0
    while True:
        score = np.sort(s_between - k * s_within)[::-1]
        I = np.argsort(s_between - k * s_within)[::-1]
        idx = I[0:n_selected_features]
        old_k = k
        k = np.sum(s_between[idx]) / np.sum(s_within[idx])
        if verbose:
            print('obj at iter ' + str(count + 1) + ': ' + str(k))
        count += 1
        if abs(k - old_k) < 1e-3:
            break

    # get feature index, feature-level score and subset-level score
    feature_idx = fs_idx[I]
    feature_score = score
    subset_score = k

    return feature_idx, feature_score, subset_score
Beispiel #42
0
    def calculateWeights(self):
        """
        finds flat cal factors as medians/pixelSpectra for each pixel.  Normalizes these weights at each wavelength bin.
        Trim the beginning and end off the sorted weights for each wvl for each pixel, to exclude extremes from averages
        """
        self.flatWeightsList = []
        for iCube, cube in enumerate(self.spectralCubes):
            cubeWeightsList = []
            self.averageSpectra = []
            deltaWeightsList = []
            effIntTime = self.cubeEffIntTimes[iCube]
            # for each time chunk
            wvlAverages = np.zeros(self.nWvlBins)
            spectra2d = np.reshape(cube, [self.nxpix * self.nypix, self.nWvlBins])
            for iWvl in range(self.nWvlBins):
                wvlSlice = spectra2d[:, iWvl]
                goodPixelWvlSlice = np.array(wvlSlice[wvlSlice != 0])
                # dead pixels need to be taken out before calculating averages
                wvlAverages[iWvl] = np.median(goodPixelWvlSlice)
            weights = np.divide(wvlAverages, cube)
            weights[weights == 0] = np.nan
            weights[weights == np.inf] = np.nan
            cubeWeightsList.append(weights)
            deltaWeights = weights / np.sqrt(effIntTime * cube)
            deltaWeightsList.append(deltaWeights)
            self.averageSpectra.append(wvlAverages)

            cubeWeights = np.array(cubeWeightsList)
            deltaCubeWeights = np.array(deltaWeightsList)
            cubeWeightsMask = np.isnan(cubeWeights)
            self.maskedCubeWeights = np.ma.array(cubeWeights, mask=cubeWeightsMask, fill_value=1.)
            self.maskedCubeDeltaWeights = np.ma.array(deltaCubeWeights, mask=cubeWeightsMask)

            # sort maskedCubeWeights and rearange spectral cubes the same way
            sortedIndices = np.ma.argsort(self.maskedCubeWeights, axis=0)
            identityIndices = np.ma.indices(np.shape(self.maskedCubeWeights))

            sortedWeights = self.maskedCubeWeights[
                sortedIndices, identityIndices[1], identityIndices[2], identityIndices[3]]
            countCubesReordered = self.countCubes[
                sortedIndices, identityIndices[1], identityIndices[2], identityIndices[3]]
            cubeDeltaWeightsReordered = self.maskedCubeDeltaWeights[
                sortedIndices, identityIndices[1], identityIndices[2], identityIndices[3]]

            nCubes = np.shape(self.maskedCubeWeights)[0]
            trimmedWeights = sortedWeights[
                             self.fractionOfChunksToTrim * nCubes:(1 - self.fractionOfChunksToTrim) * nCubes, :, :, :]
            trimmedCountCubesReordered = countCubesReordered[self.fractionOfChunksToTrim * nCubes:(
                                                                                                          1 - self.fractionOfChunksToTrim) * nCubes,
                                         :, :, :]

            self.totalCube = np.ma.sum(trimmedCountCubesReordered, axis=0)
            self.totalFrame = np.ma.sum(self.totalCube, axis=-1)

            trimmedCubeDeltaWeightsReordered = cubeDeltaWeightsReordered[self.fractionOfChunksToTrim * nCubes:(
                                                                                                                      1 - self.fractionOfChunksToTrim) * nCubes,
                                               :, :, :]
            """
            Uncertainty in weighted average is sqrt(1/sum(averagingWeights))
            Normalize weights at each wavelength bin
            """
            self.flatWeights, summedAveragingWeights = np.ma.average(trimmedWeights, axis=0,
                                                                     weights=trimmedCubeDeltaWeightsReordered ** -2.,
                                                                     returned=True)
            self.countCubesToSave = np.ma.average(trimmedCountCubesReordered, axis=0)
            self.deltaFlatWeights = np.sqrt(summedAveragingWeights ** -1.)
            self.flatFlags = self.flatWeights.mask

            wvlWeightMedians = np.ma.median(np.reshape(self.flatWeights, (-1, self.nWvlBins)), axis=0)
            self.flatWeights = np.divide(self.flatWeights, wvlWeightMedians)
            self.flatWeightsforplot = np.ma.sum(self.flatWeights, axis=-1)
            self.indexweights = iCube
            flatcal.writeWeights()
            if self.verbose:
                self.pbar_iter += 1
                self.pbar.update(self.pbar_iter)
            if self.save_plots:
                self.indexplot = iCube
                if iCube == 0 or iCube == int((self.expTime / self.intTime) / 2) or iCube == (
                        int(self.expTime / self.intTime) - 1):
                    flatcal.plotWeightsWvlSlices()
                    flatcal.plotWeightsByPixelWvlCompare()
Beispiel #43
0
    def _partition_active_and_storage_layers(self, **kwds):
        """For each parcel in the network, determines whether it is in the
        active or storage layer during this timestep, then updates node
        elevations.

        """
        self._vol_tot = self._parcels.calc_aggregate_value(
            xr.Dataset.sum,
            "volume",
            at="link",
            filter_array=self._this_timesteps_parcels,
            fill_value=0.0,
        )

        if self._active_layer_method == "WongParker":
            # Wong et al. (2007) approximation for active layer thickness.
            # NOTE: calculated using grain size and grain density calculated for
            # the active layer grains in each link at the **previous** timestep.
            # This circumvents the need for an iterative scheme to determine grain
            # size of the active layer before determining which grains are in the
            # active layer.

            # calculate tau
            tau = (
                self._fluid_density
                * self._g
                * self._grid.at_link["channel_slope"]
                * self._grid.at_link["flow_depth"]
            )

            # calcuate taustar
            # taustar = tau / (
            #     (self._rhos_mean_active - self._fluid_density)
            #     * self._g
            #     * self._d_mean_active
            # )
            taustar = np.zeros_like(tau)
            np.divide(
                tau,
                (self._rhos_mean_active - self._fluid_density)
                * self._g
                * self._d_mean_active,
                where=self._rhos_mean_active > self._fluid_density,
                out=taustar,
            )

            # calculate active layer thickness
            self._active_layer_thickness = (
                0.515
                * self._d_mean_active
                * (3.09 * (taustar - 0.0549).clip(0.0, None) ** 0.56)
            )  # in units of m

        elif self._active_layer_method == "GrainSizeDependent":
            # Set all active layers to a multiple of the lnk mean grain size
            self._active_layer_thickness = (
                self._d_mean_active * self._active_layer_d_multiplier
            )

        elif self._active_layer_method == "Constant10cm":
            # Set all active layers to 10 cm thickness.
            self._active_layer_thickness = 0.1 * np.ones_like(self._d_mean_active)

        # If links have no parcels, we still need to assign them an active layer
        # thickness..
        links_with_no_active_layer = np.isnan(self._active_layer_thickness)
        self._active_layer_thickness[links_with_no_active_layer] = np.mean(
            self._active_layer_thickness[links_with_no_active_layer == 0]
        )  # assign links with no parcels an average value

        if np.sum(np.isfinite(self._active_layer_thickness)) == 0:
            self._active_layer_thickness.fill(_INIT_ACTIVE_LAYER_THICKNESS)
            # handles the case of the first timestep -- assigns a modest value

        capacity = (
            self._grid.at_link["channel_width"]
            * self._grid.at_link["reach_length"]
            * self._active_layer_thickness
        )  # in units of m^3

        active_inactive = _INACTIVE * np.ones(self._num_parcels)

        current_link = self._parcels.dataset.element_id.values[:, -1].astype(int)
        time_arrival = self._parcels.dataset.time_arrival_in_link.values[:, -1]
        volumes = self._parcels.dataset.volume.values[:, -1]

        for i in range(self._grid.number_of_links):

            if (
                self._vol_tot[i] > 0
            ):  # only do this check capacity if parcels are in link

                # First In Last Out.

                # Find parcels on this link.
                this_links_parcels = np.where(current_link == i)[0]

                # sort them by arrival time.
                time_arrival_sort = np.flip(
                    np.argsort(
                        time_arrival[this_links_parcels],
                        0,
                    )
                )
                parcel_id_time_sorted = this_links_parcels[time_arrival_sort]

                # calculate the cumulative volume (in sorted order).
                cumvol = np.cumsum(volumes[parcel_id_time_sorted])

                # determine which parcels are within capacity and set those to
                # active.
                make_active = parcel_id_time_sorted[cumvol <= capacity[i]]

                active_inactive[make_active] = _ACTIVE

        self._parcels.dataset.active_layer[:, -1] = active_inactive

        # set active here. reference it below in wilcock crowe
        self._active_parcel_records = (
            self._parcels.dataset.active_layer == _ACTIVE
        ) * (self._this_timesteps_parcels)

        self._vol_act = self._parcels.calc_aggregate_value(
            xr.Dataset.sum,
            "volume",
            at="link",
            filter_array=self._active_parcel_records,
            fill_value=0.0,
        )

        self._vol_stor = (self._vol_tot - self._vol_act) / (1 - self._bed_porosity)
Beispiel #44
0
def load_grid(fformat=None, fname=None, ngridx=None, ngridy=None, ngridz=None):
    """
    Produce a Grid object using either analytic definitions or the output file of an MHD simulation.

    Parameters
    ----------
    fformat : string
        The type of input grid to be used. Options are 'analytic_grid', 'FLASH', 'HYDRA', or 'LSP'.
    fname : string, optional
        The name of the file to be read in, if fformat is something other than analytic_grid.
        If the file is not in the runtime directory, fname should include file path.
    ngridx : int, optional
        Desired number of grid points along x dimension. Must be either passed as an argument
        OR defined in the input file. Can be left undefined for FLASH grids.
    ngridy : int, optional
        Desired number of grid points along y dimension. Must be either passed as an argument
        OR defined in the input file. Can be left undefined for FLASH grids.
    ngridz : int, optional
        Desired number of grid points along z dimension. Must be either passed as an argument
        OR defined in the input file. Can be left undefined for FLASH grids.

    Returns
    -------
    grid : Grid object populated with field values, grid spacings, grid offset, and grid dimensions.

    """

    start_time = timer()

    # If grid dimensions not passed as arguments, try to get them from input file
    if ngridx is None:
        try:
            ngridx = params.ngridx
        except AttributeError:
            pass
    if ngridy is None:
        try:
            ngridy = params.ngridy
        except AttributeError:
            pass
    if ngridz is None:
        try:
            ngridz = params.ngridz
        except AttributeError:
            pass

    grid = None

    if fformat is None:
        try:
            fformat = params.fformat
        except AttributeError:
            print("ERROR: No file format supplied. Aborting.")
            return None

    if fname is None and (fformat != 'analytic_grid' and fformat != 'LSP'):
        try:
            fname = params.fname
        except AttributeError:
            print("ERROR: File name required for fformat='" + fformat +
                  "'. Aborting.")

    if fformat == "analytic_grid":
        # Load user-defined analytic fields

        print("Generating " + str(ngridx) + "x" + str(ngridy) + "x" +
              str(ngridz) + " grid from analytic fields...")

        cyl_coords = False
        try:
            cyl_coords = params.cyl_coords
        except AttributeError:
            pass

        lx, ly, lz = params.lx, params.ly, params.lz
        xoffset, yoffset, zoffset = params.gridcorner

        X_coords = np.linspace(xoffset, xoffset + lx, ngridx)
        Y_coords = np.linspace(yoffset, yoffset + ly, ngridy)
        Z_coords = np.linspace(zoffset, zoffset + lz, ngridz)

        # Which grid indices to populate. Omitted indices will be left as zeros.
        # All indices are populated unless specified otherwise in the input file.
        # Leaving out indices that the user knows will have zero fields can speed things up.
        field_xrange_idx = range(ngridx)
        field_yrange_idx = range(ngridy)
        field_zrange_idx = range(ngridz)

        try:
            field_xrange_idx = params.field_xrange_idx
        except AttributeError:
            pass
        try:
            field_yrange_idx = params.field_yrange_idx
        except AttributeError:
            pass
        try:
            field_zrange_idx = params.field_zrange_idx
        except AttributeError:
            pass

        gridvals = np.zeros((ngridx, ngridy, ngridz, NUM_FIELDS))

        try:
            # If grid_nthreads is defined in input file, initialize the specified grid elements in parallel.
            # Using Python's multiprocessing library rather than mpi, so actual parallelism is currently limited
            # to the number of processors on a single node.
            p = Pool(params.grid_nthreads)
            print("Using " + str(params.grid_nthreads) +
                  " threads to initialize grid")
            coords = np.meshgrid(X_coords[field_xrange_idx],
                                 Y_coords[field_yrange_idx],
                                 Z_coords[field_zrange_idx],
                                 indexing='ij')
            coords = itertools.product(X_coords[field_xrange_idx],
                                       Y_coords[field_yrange_idx],
                                       Z_coords[field_zrange_idx])
            initialized_vals = np.array(p.map(params.fields, coords))
            initialized_vals = initialized_vals.reshape(
                len(field_xrange_idx), len(field_yrange_idx),
                len(field_zrange_idx), NUM_FIELDS)
            idx1, idx2, idx3 = np.meshgrid(field_xrange_idx,
                                           field_yrange_idx,
                                           field_zrange_idx,
                                           indexing='ij')
            gridvals[idx1, idx2, idx3] = initialized_vals
        except AttributeError:
            print(
                "'params.grid_nthreads' not specified. Initializing grid in serial."
            )
            for i in field_xrange_idx:
                for j in field_yrange_idx:
                    for k in field_zrange_idx:
                        x = X_coords[i]
                        y = Y_coords[j]
                        z = Z_coords[k]
                        gridvals[i, j, k, :] = params.fields((x, y, z))

        gridspacings = (lx / ngridx, ly / ngridy, lz / ngridz)

        grid = Grid(gridvals,
                    gridspacings, (xoffset, yoffset, zoffset), (lx, ly, lz),
                    cyl_coords=cyl_coords)

    elif fformat == "FLASH":
        print("Loading FLASH grid...")
        try:
            import yt
        except ImportError:
            print(
                "ERROR: You need the yt module installed to load FLASH grids.")
            print(
                "See instructions at http://yt-project.org/doc/installing.html"
            )
            return None
        # Load the dataset using yt
        ds = yt.load(fname)

        # Sample the data onto a uniform grid, taking the coarsest resolution (i.e. averaging out any AMR)
        uniform_data = ds.covering_grid(level=0,
                                        left_edge=ds.domain_left_edge,
                                        dims=ds.domain_dimensions)
        magx = uniform_data[u'magx'].in_cgs().to_ndarray()
        magy = uniform_data[u'magy'].in_cgs().to_ndarray()
        magz = uniform_data[u'magz'].in_cgs().to_ndarray()

        right_edge = ds.domain_right_edge.in_cgs().to_ndarray()
        left_edge = ds.domain_left_edge.in_cgs().to_ndarray()
        gridspacings = (right_edge - left_edge) / magx.shape

        ngridx, ngridy, ngridz = magx.shape

        gridvals = np.zeros((ngridx, ngridy, ngridz, NUM_FIELDS))

        for i in range(ngridx):
            for j in range(ngridy):
                for k in range(ngridz):
                    # TODO: Calculate electric fields too
                    gridvals[i, j, k, 3:6] = [
                        magx[i, j, k], magy[i, j, k], magz[i, j, k]
                    ]

        grid = Grid(gridvals, gridspacings, left_edge, right_edge - left_edge)

    elif fformat == "HYDRA":
        print("Loading HYDRA grid...")
        try:
            import std_yorick as stdY
            import libraries.pydjs.hyddjs as DJH

        except ImportError:
            print(
                "ERROR: You need to install and configure Yorick and the hyddjs tools to load HYDRA grids."
            )
            print("Contact Dave Strozzi ([email protected]) for info.")
            return None

        H = stdY.h2open(fname)
        varL = [
            'x', 'y', 'z', 'Bx', 'By', 'Bz', 'p', 'eden', 'zb', 'ireg', 'tmat'
        ]
        stg = DJH.h2stgchk(H, varL)
        x = stg['x'][0]
        y = stg['y'][0]
        z = stg['z'][0]
        #zb = stg['zb'][0]

        xmin, xmax = 0.0, np.amax(x)
        zmin, zmax = np.amin(z), np.amax(z)

        try:
            xmin, xmax = params.hyd_xrange
        except AttributeError:
            pass
        try:
            zmin, zmax = params.hyd_zrange
        except AttributeError:
            pass

        R = np.linspace(0, xmax, ngridx)
        Theta = np.linspace(0, 2 * np.pi - 2 * np.pi / ngridy, ngridy)
        Z = np.linspace(zmin, zmax, ngridz)

        dz = Z[1] - Z[0]
        dR = R[1] - R[0]
        dTheta = Theta[1] - Theta[0]

        xz_vals = np.zeros((ngridx, ngridz, NUM_FIELDS))
        X_2D, Z_2D = np.meshgrid(R, Z, indexing='ij')

        # Calculate electric field from electron temperature and density gradient
        edens = DJH.h2interp(H, stg, 'eden', X_2D, Z_2D) * 1e6  # m^-3
        epres = DJH.h2interp(H, stg, 'p', X_2D, Z_2D) * 1e11  # Pa
        #etemp = DJH.h2interp(H,stg,'tmat',X_2D,Z_2D)*1e-3 # eV
        np.seterr(
            divide='ignore', invalid='ignore'
        )  # Temporarily ignore errors because we could have divide by zeros
        #Exz = (etemp/proton_charge)*np.nan_to_num(np.divide(np.gradient(epres,dR,dz),epres))
        Exz = -np.nan_to_num(
            np.divide(np.gradient(epres, dR / 100.0, dz / 100.0),
                      charge_SI * edens)) * E_SItoCGS
        np.seterr(divide='warn',
                  invalid='warn')  # Restore to normal error behavior

        xz_vals[:, :, 0], xz_vals[:, :, 2] = Exz
        xz_vals[:, :, 3] = DJH.h2interp(H, stg, 'Bx', X_2D, Z_2D)
        xz_vals[:, :, 4] = DJH.h2interp(H, stg, 'By', X_2D, Z_2D)
        xz_vals[:, :, 5] = DJH.h2interp(H, stg, 'Bz', X_2D, Z_2D)
        #xz_vals[:,:,7] = DJH.h2interp(H,stg,'zb',X_2D,Z_2D)

        gridvals = np.zeros((ngridx, ngridy, ngridz, NUM_FIELDS))

        for j in range(ngridy):
            theta = Theta[j]
            gridvals[:, j, :, 0] = xz_vals[:, :, 0] * np.cos(theta)
            gridvals[:, j, :, 1] = xz_vals[:, :, 0] * np.sin(theta)
            gridvals[:, j, :, 2] = xz_vals[:, :, 2]
            gridvals[:, j, :, 3] = xz_vals[:, :, 3] * np.cos(
                theta) - xz_vals[:, :, 4] * np.sin(theta)
            gridvals[:, j, :, 4] = xz_vals[:, :, 3] * np.sin(
                theta) + xz_vals[:, :, 4] * np.cos(theta)
            gridvals[:, j, :, 5] = xz_vals[:, :, 5]

        grid = Grid(gridvals, (dR, dTheta, dz), (0.0, 0.0, 0.0),
                    (2 * xmax, 2 * xmax, zmax - zmin),
                    cyl_coords=True)

    elif fformat == "LSP":
        print("Loading LSP grid...")
        try:
            import libraries.read_xdr as lsp
        except ImportError:
            print(
                "ERROR: You need the xdrlib tool to read LSP grids. Contact Drew Higginson for access."
            )
            return None

        FILE = lsp.flds(params.lsp_dirname, step=int(params.lsp_step))
        (X, Y, Z, t) = FILE.get_XYZt()

        (Ex, Name, Unit) = FILE.get_VarNameUnit(name='E', xyz='x')
        (Ey, Name, Unit) = FILE.get_VarNameUnit(name='E', xyz='y')
        (Ez, Name, Unit) = FILE.get_VarNameUnit(name='E', xyz='z')
        (Bx, Name, Unit) = FILE.get_VarNameUnit(name='B', xyz='x')
        (By, Name, Unit) = FILE.get_VarNameUnit(name='B', xyz='y')
        (Bz, Name, Unit) = FILE.get_VarNameUnit(name='B', xyz='z')

        ngridx = len(X)
        ngridy = len(Y)
        ngridz = len(Z)

        gridvals = np.zeros((ngridx, ngridy, ngridz, NUM_FIELDS))

        dx = X[1] - X[0]
        dz = Z[1] - Z[0]

        if ngridy == 1:
            # 2D grid

            # Extrude distance dy based on the wavelength of periodic features in x
            # (probably will not want to do this in general)
            By_fft = np.fft.fft(By[:, 0])
            x_periods = 1.0 / np.fft.fftfreq(len(By_fft), dx)
            dy = abs(x_periods[np.argmax(By_fft)])

            #gridvals[:,0,:,0] = Ex*1e5*E_SItoCGS
            #gridvals[:,0,:,1] = Ey*1e5*E_SItoCGS
            #gridvals[:,0,:,2] = Ez*1e5*E_SItoCGS
            gridvals[:, 0, :, 3] = Bx
            gridvals[:, 0, :, 4] = By
            gridvals[:, 0, :, 5] = Bz

            # If params.lsp_ntile is defined, tile the grid that number of times in x and z
            try:
                gridvals = np.tile(gridvals,
                                   (params.lsp_ntile, 1, params.lsp_ntile, 1))
            except AttributeError:
                pass

        else:
            # 3D grid
            # TODO: Test 3D LSP grid

            dy = Y[1] - Y[0]

            #gridvals[:,:,:,0] = Ex*1e5*E_SItoCGS
            #gridvals[:,:,:,1] = Ey*1e5*E_SItoCGS
            #gridvals[:,:,:,2] = Ez*1e5*E_SItoCGS
            gridvals[:, :, :, 3] = Bx
            gridvals[:, :, :, 4] = By
            gridvals[:, :, :, 5] = Bz

        lx = dx * len(gridvals[:, 0, 0, 0])
        ly = dy * len(gridvals[0, :, 0, 0])
        lz = dz * len(gridvals[0, 0, :, 0])

        grid = Grid(gridvals, (dx, dy, dz), (-lx / 2.0, -ly / 2.0, 0.0),
                    (lx, ly, lz))

    else:
        print('"' + fformat + '"' +
              'is not a recognized file format. Aborting.')
        return None

    end_time = timer()
    print("Time elapsed during grid generation: " +
          str(end_time - start_time) + " s")

    if grid.cyl_coords:
        print("Grid dR, dTheta, dz: " + str(grid.dx) + " cm, " + str(grid.dy) +
              " rad, " + str(grid.dz) + " cm")
        print("Grid nR, nTheta, nz: " + str(grid.nx) + ", " + str(grid.ny) +
              ", " + str(grid.nz))
        print("Grid lR, lTheta, lz: " + str(grid.lx) + " cm, " + str(grid.ly) +
              " rad, " + str(grid.lz) + " cm")
    else:
        print("Grid dx, dy, dz: " + str(grid.dx) + " cm, " + str(grid.dy) +
              " cm, " + str(grid.dz) + " cm")
        print("Grid nx, ny, nz: " + str(grid.nx) + ", " + str(grid.ny) + ", " +
              str(grid.nz))
        print("Grid lx, ly, lz: " + str(grid.lx) + " cm, " + str(grid.ly) +
              " cm, " + str(grid.lz) + " cm")

    return grid
Beispiel #45
0
scd_arrayNR = np.zeros((r0slen,heightlen,slen,slen))
vcd_arrayNR = np.zeros((r0slen,heightlen,slen,slen))
scd_arrayB = np.zeros((r0slen,heightlen,slen,slen))
vcd_arrayB = np.zeros((r0slen,heightlen,slen,slen))
scd_arrayNL = np.zeros((r0slen,heightlen,slen,slen))
vcd_arrayNL = np.zeros((r0slen,heightlen,slen,slen))

fbz_arrayB = np.zeros((r0slen,heightlen,slen,slen), dtype=np.complex)

for j in range(len(r0s)):

    # theta = 2*np.arctan(r0s[j]/(r + 1e-25))
    theta = pi * np.exp(-r/r0s[j]) / np.sqrt((2*r/r0s[j])+1)

    mz = np.cos(theta)
    mxB = np.sqrt(1-mz**2)*np.divide(-ygrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))
    myB = np.sqrt(1-mz**2)*np.divide(xgrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))
    mxNL = np.sqrt(1-mz**2)*np.divide(xgrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))
    myNL = np.sqrt(1-mz**2)*np.divide(ygrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))
    mxNR = np.sqrt(1-mz**2)*np.divide(-xgrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))
    myNR = np.sqrt(1-mz**2)*np.divide(-ygrid, np.sqrt(xgrid**2 + ygrid**2 + 1e-20))

    for i in range(len(heights)):
        hNR, scdNR, vcdNR, _ = sfct.stray_field_calc_thick(mxNR,myNR,mz,Ms,t,simSize,heights[i])
        hB, scdB, vcdB, fhB = sfct.stray_field_calc_thick(mxB,myB,mz,Ms,t,simSize,heights[i])
        hNL, scdNL, vcdNL, _ = sfct.stray_field_calc_thick(mxNL,myNL,mz,Ms,t,simSize,heights[i])

        bz_arrayNR[j,i] = hNR[2]
        bz_arrayB[j,i] = hB[2]
        bz_arrayNL[j,i] = hNL[2]
def predict_datapoint(input_sound, input_annotation):
    '''
    loads one audio file and predicts its coutinuous valence

    '''
    sr, samples = uf.wavread(input_sound)  #load
    e_samples = uf.preemphasis(samples, sr)  #apply preemphasis
    predictors = fa.extract_features(e_samples)  #compute power law spectrum
    #normalize by training mean and std
    predictors = np.subtract(predictors, ref_mean)
    predictors = np.divide(predictors, ref_std)
    #load target
    target = pandas.read_csv(input_annotation)
    target = target.values
    target = np.reshape(target,(target.shape[0]))
    final_pred = []
    #compute prediction until last frame
    start = 0
    while start < (len(target)-SEQ_LENGTH):
        start_features = int(start * frames_per_annotation)
        stop_features = int((start + SEQ_LENGTH) * frames_per_annotation)
        predictors_temp = predictors[start_features:stop_features]
        predictors_temp = predictors_temp.reshape(1,predictors_temp.shape[0], predictors_temp.shape[1])
        #predictors_temp = predictors_temp.reshape(1,predictors_temp.shape[0], predictors_temp.shape[1], 1)

        prediction = valence_model.predict(predictors_temp)
        for i in range(prediction.shape[1]):
            final_pred.append(prediction[0][i])
        perc = int(float(start)/(len(target)-SEQ_LENGTH) * 100)
        print "Computing prediction: " + str(perc) + "%"
        start += SEQ_LENGTH
    #compute prediction for last frame
    predictors_temp = predictors[-int(SEQ_LENGTH*frames_per_annotation):]
    predictors_temp = predictors_temp.reshape(1,predictors_temp.shape[0], predictors_temp.shape[1])
    prediction = valence_model.predict(predictors_temp)
    missing_samples = len(target) - len(final_pred)
    #last_prediction = prediction[0][-missing_samples:]
    reverse_index = np.add(list(reversed(range(missing_samples))),1)
    for i in reverse_index:
        final_pred.append(prediction[0][-i])
    final_pred = np.array(final_pred)



    '''
    #compute best prediction shift
    shifted_cccs = []
    time = np.add(1,range(200))
    print "Computing best optimization parameters"
    for i in time:
        t = target.copy()
        p = final_pred.copy()
        t = t[i:]
        p = p[:-i]
        #print t.shape, p.shape

        temp_ccc = ccc2(t, p)
        shifted_cccs.append(temp_ccc)


    best_shift = np.argmax(shifted_cccs)
    best_ccc = np.max(shifted_cccs)
    if best_shift > 0:
        best_target = target[best_shift:]
        best_pred = final_pred[:-best_shift]
    else:
        best_target = target
        best_pred = final_pred
    #print 'LEN BEST PRED: ' + str(len(best_pred))

    #compute best parameters for the filter
    test_freqs = []
    test_orders = []
    test_cccs = []
    freqs = np.arange(0.01,0.95,0.01)
    orders = np.arange(1,10,1)
    print "Finding best optimization parameters..."
    for freq in freqs:
        for order in orders:
            test_signal = best_pred.copy()
            b, a = butter(order, freq, 'low')
            filtered = filtfilt(b, a, test_signal)
            temp_ccc = ccc2(best_target, filtered)
            test_freqs.append(freq)
            test_orders.append(order)
            test_cccs.append(temp_ccc)
    best_filter = np.argmax(test_cccs)
    best_order = test_orders[best_filter]
    best_freq = test_freqs[best_filter]
    '''
    #POSTPROCESSING
    #normalize between -1 and 1
    final_pred = np.multiply(final_pred, 2.)
    final_pred = np.subtract(final_pred, 1.)

    #apply f_trick
    ann_folder = '../dataset/Training/Annotations'
    # target_mean, target_std = uf.find_mean_std(ann_folder)
    train_labels = np.load('../matrices/training_2A_S_target.npy')
    target_mean = np.mean(train_labels)
    target_std = np.std(train_labels)
    final_pred = uf.f_trick(final_pred, target_mean, target_std)

    #apply butterworth filter
    b, a = butter(3, 0.01, 'low')
    final_pred = filtfilt(b, a, final_pred)

    ccc = ccc2(final_pred, target)  #compute ccc
    print "CCC = " + str(ccc)

    '''
    plt.plot(target)
    plt.plot(final_pred, alpha=0.7)
    plt.legend(['target','prediction'])
    plt.show()
    '''

    return ccc
Beispiel #47
0
    nii_data = nii_file.get_fdata()

    list_cubes = glob.glob("../pytorch-CycleGAN-and-pix2pix/results/" +
                           name_dataset + "/test_latest/images/*_fake*.npy")
    list_cubes.sort()
    num_cubes = len(list_cubes)
    fake_value = np.zeros(
        (nii_data.shape[0], nii_data.shape[1], nii_data.shape[2]))
    fake_count = np.zeros(
        (nii_data.shape[0], nii_data.shape[1], nii_data.shape[2]))
    for idx, path_cube in enumerate(list_cubes):
        print(idx, "/", num_cubes, path_cube)
        data_cube = np.load(path_cube)
        start_x, start_y, start_z = get_cube_idx(path_cube, edge_length)
        print(start_x, start_y, start_z, np.mean(data_cube))
        fake_value[start_x:start_x + edge_length,
                   start_y:start_y + edge_length,
                   start_z:start_z + edge_length] += data_cube[1, :, :, :]
        fake_count[start_x:start_x + edge_length,
                   start_y:start_y + edge_length,
                   start_z:start_z + edge_length] += count_cube

    # assert (not 0 in fake_count), ("Each pixel should be generated at least once.")

    pred_fake = np.divide(fake_value, fake_count)
    factor_f = np.sum(nii_file.get_data()) / np.sum(pred_fake)
    file_fake = nib.Nifti1Image(pred_fake * factor_f, nii_file.affine,
                                nii_file.header)
    nib.save(file_fake, "../" + nii_name + "_fake_value_" + date_tag + ".nii")
    file_fake = nib.Nifti1Image(fake_count, nii_file.affine, nii_file.header)
    nib.save(file_fake, "../" + nii_name + "_fake_count_" + date_tag + ".nii")
Beispiel #48
0
target_path = file['external_reconstruct_general']['rlnExtReconsResult']

regularizer = AdversarialRegulariser(SAVES_PATH)

complex_data = data_real + 1j * data_im

complex_data_norm = np.mean(irfft(complex_data, scaling=NUM_VOX**2))
complex_data /= complex_data_norm
kernel /= complex_data_norm
tikhonov_kernel = kernel + TIKHONOV_REGULARIZATION

#precond = np.abs(np.divide(1, tikhonov_kernel))
#precond /= precond.max()
precond = 1
tikhonov = np.divide(complex_data, tikhonov_kernel)
reco = np.copy(tikhonov)

for k in range(150):
    STEP_SIZE = STEP_SIZE_NOMINAL / np.sqrt(1 + k / 20)

    ###############
    # DOWNSAMPLING
    reco_ds = np.copy(reco)
    reco_ds = np.fft.fftshift(reco_ds, axes=(0, 1))
    reco_ds = reco_ds[NUM_VOX // 2 - TARGET_NUM_VOX // 2:NUM_VOX // 2 +
                      TARGET_NUM_VOX // 2, NUM_VOX // 2 -
                      TARGET_NUM_VOX // 2:NUM_VOX // 2 + TARGET_NUM_VOX // 2,
                      0:(TARGET_NUM_VOX // 2) + 1]
    reco_ds = np.fft.ifftshift(reco_ds, axes=(0, 1))
    ###############
Beispiel #49
0
def running_normalize(vid_path,
                      save_folder='./norm_images/',
                      order=3,
                      dark=None,
                      return_images=False):
    #get first frame of background
    vidObj = cv2.VideoCapture(vid_path)

    success, img0 = vidObj.read()
    if not success:
        print('Video not found')
        return

    nframes = int(vidObj.get(cv2.CAP_PROP_FRAME_COUNT))
    print(nframes, 'frames')

    if dark is None:
        print('Computing dark count')
        #get dark count
        samplecount = 100  #how many frames to sample (at random)
        subtract = 5  #offset dark count
        min_cand = []
        positions = np.random.choice(
            nframes, samplecount, replace=False)  #get random frames to sample
        for i in range(samplecount):
            vidObj.set(cv2.CAP_PROP_POS_FRAMES, positions[i])
            success, image = vidObj.read()
            if success:
                min_cand.append(image.min())
            else:
                print('Something went wrong')
        dark = min(min_cand) - subtract
    print('dark count:{}'.format(dark))

    #make save folder if it doesn't exist
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    success, img0 = vidObj.read()
    img0 = img0[:, :, 0]
    if not success:
        print('Video not found')
        return

    img_return = []
    success = 1
    count = 0
    vidObj.set(cv2.CAP_PROP_POS_FRAMES, count)
    frame = vidObj.get(cv2.CAP_PROP_POS_FRAMES)

    #instantiate vmedian object
    v = vmedian(order=order, shape=img0.shape)
    v.add(img0)
    while success:
        success, image = vidObj.read()
        if success:
            image = image[:, :, 0]
            if not v.initialized:
                v.add(image)
                continue
            bg = v.get()
            numer = image - dark
            denom = np.clip((bg - dark), 1, 255)
            testimg = np.divide(numer, denom) * 100.
            testimg = np.clip(testimg, 0, 255)
            filename = os.path.dirname(save_folder) + '/image' + str(
                count).zfill(4) + '.png'
            cv2.imwrite(filename, testimg)
            testimg = np.stack((testimg, ) * 3, axis=-1)
            if return_images:
                img_return.append(testimg)
            print(filename, end='\r')
            v.add(image)
            count += 1
    return img_return
Beispiel #50
0
def exp_rv(rate):
    return (-1.0 / rate) * math.log(random())


def generate(rate, count):
    return np.vectorize(lambda _: exp_rv(rate))(np.arange(count))


samples = generate(0.1, 100)
print(samples)
bincount = len(samples)
cdf = stats.cumfreq(samples, numbins=bincount)

x = cdf.lowerlimit + np.linspace(0, cdf.binsize * cdf.cumcount.size, cdf.cumcount.size)
fig, ax = plt.subplots()
ax.plot(x, np.divide(cdf.cumcount, bincount))
ax.set(xlabel='x', ylabel='P(X ≤ x)', title='Cumulative Distribution')
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([0, 1.5])
ax.grid()
fig.savefig("cdf.png")


t = np.arange(0, 100, 0.01)
y1 = np.vectorize(lambda l: 1.0 - math.exp(-l * 0.05))(t)
y2 = np.vectorize(lambda l: 1.0 - math.exp(-l * 0.1))(t)
y3 = np.vectorize(lambda l: 1.0 - math.exp(-l * 0.15))(t)

fig1, ax2 = plt.subplots()
ax2.plot(x, np.divide(cdf.cumcount, bincount), label='raw data')
ax2.plot(t, y1, label='λ=0.05')
    def GetPascalVOCMetrics(self,
                            boundingboxes,
                            IOUThreshold=0.5,
                            method=MethodAveragePrecision.EveryPointInterpolation):
        """Get the metrics used by the VOC Pascal 2012 challenge.
        Get
        Args:
            boundingboxes: Object of the class BoundingBoxes representing ground truth and detected
            bounding boxes;
            IOUThreshold: IOU threshold indicating which detections will be considered TP or FP
            (default value = 0.5);
            method (default = EveryPointInterpolation): It can be calculated as the implementation
            in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point
            interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"
            or EveryPointInterpolation"  (ElevenPointInterpolation);
        Returns:
            A list of dictionaries. Each dictionary contains information and metrics of each class.
            The keys of each dictionary are:
            dict['class']: class representing the current dictionary;
            dict['precision']: array with the precision values;
            dict['recall']: array with the recall values;
            dict['AP']: average precision;
            dict['interpolated precision']: interpolated precision values;
            dict['interpolated recall']: interpolated recall values;
            dict['total positives']: total number of ground truth positives;
            dict['total TP']: total number of True Positive detections;
            dict['total FP']: total number of False Positive detections;
        """
        ret = []  # list containing metrics (precision, recall, average precision) of each class
        # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])
        groundTruths = []
        # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])
        detections = []
        # Get all classes
        classes = []
        # Loop through all bounding boxes and separate them into GTs and detections
        for bb in boundingboxes.getBoundingBoxes():
            # [imageName, class, confidence, (bb coordinates XYX2Y2)]
            if bb.getBBType() == BBType.GroundTruth:
                groundTruths.append([
                    bb.getImageName(),
                    bb.getClassId(), 1,
                    bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
                ])
            else:
                detections.append([
                    bb.getImageName(),
                    bb.getClassId(),
                    bb.getConfidence(),
                    bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
                ])
            # get class
            if bb.getClassId() not in classes:
                classes.append(bb.getClassId())
        classes = sorted(classes)
        # Precision x Recall is obtained individually by each class
        # Loop through by classes
        for c in classes:
            # Get only detection of class c
            dects = []
            [dects.append(d) for d in detections if d[1] == c]
            # Get only ground truths of class c
            gts = []
            [gts.append(g) for g in groundTruths if g[1] == c]
            npos = len(gts)
            # sort detections by decreasing confidence
            dects = sorted(dects, key=lambda conf: conf[2], reverse=True)
            TP = np.zeros(len(dects))
            FP = np.zeros(len(dects))
            # create dictionary with amount of gts for each image
            det = Counter([cc[0] for cc in gts])
            for key, val in det.items():
                det[key] = np.zeros(val)
            # print("Evaluating class: %s (%d detections)" % (str(c), len(dects)))
            # Loop through detections
            for d in range(len(dects)):
                # print('dect %s => %s' % (dects[d][0], dects[d][3],))
                # Find ground truth image
                gt = [gt for gt in gts if gt[0] == dects[d][0]]
                iouMax = sys.float_info.min
                for j in range(len(gt)):
                    # print('Ground truth gt => %s' % (gt[j][3],))
                    iou = Evaluator.iou(dects[d][3], gt[j][3])
                    if iou > iouMax:
                        iouMax = iou
                        jmax = j
                # Assign detection as true positive/don't care/false positive
                if iouMax >= IOUThreshold:
                    if det[dects[d][0]][jmax] == 0:
                        TP[d] = 1  # count as true positive
                        det[dects[d][0]][jmax] = 1  # flag as already 'seen'
                        # print("TP")
                    else:
                        FP[d] = 1  # count as false positive
                        # print("FP")
                # - A detected "cat" is overlaped with a GT "cat" with IOU >= IOUThreshold.
                else:
                    FP[d] = 1  # count as false positive
                    # print("FP")
            # compute precision, recall and average precision
            acc_FP = np.cumsum(FP)
            acc_TP = np.cumsum(TP)

            if npos == 0: 
                rec = np.ones(acc_TP.size)
            elif acc_TP.size == 0:
                rec = [0.]
            else:
                rec = acc_TP / npos

            if acc_FP.size == 0:
                prec = [1.]
            else: 
                prec = np.divide(acc_TP, (acc_FP + acc_TP))

            # Depending on the method, call the right implementation
            if method == MethodAveragePrecision.EveryPointInterpolation:
                [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
            else:
                [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)
            # add class result in the dictionary to be returned
            r = {
                'class': c,
                'precision': prec,
                'recall': rec,
                'AP': ap,
                'interpolated precision': mpre,
                'interpolated recall': mrec,
                'total positives': npos,
                'total TP': np.sum(TP),
                'total FP': np.sum(FP)
            }
            ret.append(r)
        return ret
 def _collect_grads(flat_grad):
     self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
     np.divide(buf, float(num_tasks), out=buf)
     return buf
Beispiel #53
0
    def get_matching_score(self, query_data, exemplar_data):
        # query data --
        query_shape = np.empty_like(query_data['comp_shape'])
        np.copyto(query_shape, query_data['comp_shape'])
        q_h = np.size(query_shape, 0)
        q_w = np.size(query_shape, 1)

        if(q_h <= 1 | q_w <= 1):
            return np.array([])

        query_shape_rs = cv2.resize(query_shape, dsize=(self.WIN_SIZE, self.WIN_SIZE),
                                    interpolation=cv2.INTER_NEAREST)
        query_shape_rs[query_shape_rs == 0] = -1

        query_label = query_data['shape_label']
        query_ar = query_data['ar']

        # get the relevant labels from the exemplar data
        synth_data = np.zeros((len(exemplar_data), 8), dtype='float')
        for j in range(0, len(exemplar_data)):

            jth_label = exemplar_data[j]['shape_label']
            if(jth_label != query_label):
                continue

            jth_shape = np.empty_like(exemplar_data[j]['comp_shape'])
            np.copyto(jth_shape, exemplar_data[j]['comp_shape'])

            jth_h = np.size(jth_shape, 0)
            jth_w = np.size(jth_shape, 1)
            if((jth_h < (self.RES_F * q_h)) | (jth_w < (self.RES_F * q_w))):
                continue

            jth_ar = exemplar_data[j]['ar']
            ar12 = np.divide(query_ar, float(jth_ar) + sys.float_info.epsilon)

            if((ar12 < 0.5) | (ar12 > 2.0)):
                continue

            jth_search_shape = cv2.resize(jth_shape, dsize=(self.WIN_SIZE, self.WIN_SIZE),
                                          interpolation=cv2.INTER_NEAREST)
            jth_search_shape[jth_search_shape == 0] = -1
            jth_score = np.divide((query_shape_rs.flatten() * jth_search_shape.flatten()).sum(),
                                  float(np.size(query_shape_rs, 0)*np.size(query_shape_rs, 1)) + sys.float_info.epsilon)
            synth_data[j, :] = [1, 1, np.size(query_shape_rs, 1), np.size(query_shape_rs, 0),
                                jth_score, 0, j, 1]

        synth_data = synth_data[synth_data[:, 7] == 1, :]
        if synth_data.size == 0:
            return synth_data

        # find the exmples better than SHAPE_THRESH
        val_examples = synth_data[:, 4] >= self.SHAPE_THRESH
        if(val_examples.sum() == 0):
            Is = np.argmax(synth_data[:, 4])
            score = np.tile(synth_data[Is, :], [self.TOP_K, 1])
            return score

        # if there are more examples
        score = synth_data[val_examples, :]
        Is = np.argsort(score[:, 4])
        rev_Is = Is[::-1]
        score = score[rev_Is, :]
        num_ex = np.minimum(np.size(score, 0), self.TOP_K)
        score = score[0:num_ex, :]
        if(np.size(score, 0) < self.TOP_K):
            score = np.tile(score, [self.TOP_K, 1])
            score = score[0:self.TOP_K, :]

        return score
Beispiel #54
0
 def testRealDiv(self):
   nums, divs = self.floatTestData()
   tf_result = math_ops.realdiv(nums, divs)
   np_result = np.divide(nums, divs)
   self.assertAllClose(tf_result, np_result)
Beispiel #55
0
def parse_heatpaf(oriImg, heatmap_avg, paf_avg):
    '''
    0:头顶
    1:脖子
    2:右肩
    3:右肘
    4:右腕

    '''

    param = {}

    param['thre1'] = 0.2
    param['thre2'] = 0.1
    param['mid_num'] = 7

    import scipy

    #plt.imshow(heatmap_avg[:,:,2])
    from scipy.ndimage.filters import gaussian_filter
    all_peaks = []
    peak_counter = 0

    for part in range(15 - 1):
        x_list = []
        y_list = []
        map_ori = heatmap_avg[:, :, part]
        map = gaussian_filter(map_ori, sigma=3)
        #map = map_ori
        map_left = np.zeros(map.shape)
        map_left[1:, :] = map[:-1, :]
        map_right = np.zeros(map.shape)
        map_right[:-1, :] = map[1:, :]
        map_up = np.zeros(map.shape)
        map_up[:, 1:] = map[:, :-1]
        map_down = np.zeros(map.shape)
        map_down[:, :-1] = map[:, 1:]

        peaks_binary = np.logical_and.reduce(
            (map >= map_left, map >= map_right, map >= map_up, map >= map_down,
             map > param['thre1']))
        peaks = zip(np.nonzero(peaks_binary)[1],
                    np.nonzero(peaks_binary)[0])  # note reverse
        peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [
            peaks_with_score[i] + (id[i], ) for i in range(len(id))
        ]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)
    # find connection in the specified sequence, center 29 is in the position 15
    limbSeq = [[13, 14], [14, 1], [14, 4], [1, 2], [2, 3], [4, 5], [5, 6],
               [1, 7], [7, 8], [8, 9], [4, 10], [10, 11], [11, 12]]
    # the middle joints heatmap correpondence
    mapIdx = [(i * 2, i * 2 + 1) for i in range(numoflinks)]
    assert (len(limbSeq) == numoflinks)

    connection_all = []
    special_k = []
    special_non_zero_index = []
    mid_num = param['mid_num']
    #     if debug:
    #     pydevd.settrace("127.0.0.1", True, True, 5678, True)
    for k in range(len(mapIdx)):
        score_mid = paf_avg[:, :, [x for x in mapIdx[k]]]
        candA = all_peaks[limbSeq[k][0] - 1]
        candB = all_peaks[limbSeq[k][1] - 1]
        # print(k)
        # print(candA)
        # print('---------')
        # print(candB)
        nA = len(candA)
        nB = len(candB)
        indexA, indexB = limbSeq[k]
        if (nA != 0 and nB != 0):
            connection_candidate = []
            for i in range(nA):
                for j in range(nB):
                    vec = np.subtract(candB[j][:2], candA[i][:2])
                    # print('vec: ',vec)
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    # print('norm: ', norm)
                    vec = np.divide(vec, norm)
                    # print('normalized vec: ', vec)
                    startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
                                np.linspace(candA[i][1], candB[j][1], num=mid_num))
                    # print('startend: ', startend)
                    vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
                                    for I in range(len(startend))])
                    # print('vec_x: ', vec_x)
                    vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
                                    for I in range(len(startend))])
                    # print('vec_y: ', vec_y)
                    score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(
                        vec_y, vec[1])
                    # print(score_midpts)
                    # print('score_midpts: ', score_midpts)
                    try:
                        score_with_dist_prior = sum(score_midpts) / len(
                            score_midpts) + min(
                                0.5 * oriImg.shape[0] / norm - 1, 0)
                    except ZeroDivisionError:
                        score_with_dist_prior = -1
                    ##print('score_with_dist_prior: ', score_with_dist_prior)
                    criterion1 = len(
                        np.nonzero(score_midpts > param['thre2'])
                        [0]) > 0.8 * len(score_midpts)
                    # print('score_midpts > param["thre2"]: ', len(np.nonzero(score_midpts > param['thre2'])[0]))
                    criterion2 = score_with_dist_prior > 0

                    if criterion1 and criterion2:
                        # print('match')
                        # print(i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2])
                        connection_candidate.append([
                            i, j, score_with_dist_prior,
                            score_with_dist_prior + candA[i][2] + candB[j][2]
                        ])
                    # print('--------end-----------')
            connection_candidate = sorted(connection_candidate,
                                          key=lambda x: x[2],
                                          reverse=True)
            # print('-------------connection_candidate---------------')
            # print(connection_candidate)
            # print('------------------------------------------------')
            connection = np.zeros((0, 5))
            for c in range(len(connection_candidate)):
                i, j, s = connection_candidate[c][0:3]
                if (i not in connection[:, 3] and j not in connection[:, 4]):
                    connection = np.vstack(
                        [connection, [candA[i][3], candB[j][3], s, i, j]])
                    # print('----------connection-----------')
                    # print(connection)
                    # print('-------------------------------')
                    if (len(connection) >= min(nA, nB)):
                        break

            connection_all.append(connection)
        elif (nA != 0 or nB != 0):
            special_k.append(k)
            special_non_zero_index.append(indexA if nA != 0 else indexB)
            connection_all.append([])
    # last number in each row is the total parts number of that person
    # the second last number in each row is the score of the overall configuration
    subset = -1 * np.ones((0, 20))

    candidate = np.array([item for sublist in all_peaks for item in sublist])

    for k in range(len(mapIdx)):
        if k not in special_k:
            try:
                partAs = connection_all[k][:, 0]
                partBs = connection_all[k][:, 1]
                indexA, indexB = np.array(limbSeq[k]) - 1
            except IndexError as e:
                row = -1 * np.ones(20)
                subset = np.vstack([subset, row])
                continue
            except TypeError as e:
                row = -1 * np.ones(20)
                subset = np.vstack([subset, row])
                continue
            for i in range(len(connection_all[k])):  #= 1:size(temp,1)
                found = 0
                subset_idx = [-1, -1]
                for j in range(len(subset)):  #1:size(subset,1):
                    if subset[j][indexA] == partAs[i] or subset[j][
                            indexB] == partBs[i]:
                        subset_idx[found] = j
                        found += 1

                if found == 1:
                    j = subset_idx[0]
                    if (subset[j][indexB] != partBs[i]):
                        subset[j][indexB] = partBs[i]
                        subset[j][-1] += 1
                        subset[j][-2] += candidate[partBs[i].astype(int),
                                                   2] + connection_all[k][i][2]
                elif found == 2:  # if found 2 and disjoint, merge them
                    j1, j2 = subset_idx
                    print "found = 2"
                    membership = ((subset[j1] >= 0).astype(int) +
                                  (subset[j2] >= 0).astype(int))[:-2]
                    if len(np.nonzero(membership == 2)[0]) == 0:  #merge
                        subset[j1][:-2] += (subset[j2][:-2] + 1)
                        subset[j1][-2:] += subset[j2][-2:]
                        subset[j1][-2] += connection_all[k][i][2]
                        subset = np.delete(subset, j2, 0)
                    else:  # as like found == 1
                        subset[j1][indexB] = partBs[i]
                        subset[j1][-1] += 1
                        subset[j1][-2] += candidate[
                            partBs[i].astype(int), 2] + connection_all[k][i][2]

                # if find no partA in the subset, create a new subset
                elif not found and k < 17:
                    row = -1 * np.ones(20)
                    row[indexA] = partAs[i]
                    row[indexB] = partBs[i]
                    row[-1] = 2
                    row[-2] = sum(
                        candidate[connection_all[k][i, :2].astype(int),
                                  2]) + connection_all[k][i][2]
                    subset = np.vstack([subset, row])

    # delete some rows of subset which has few parts occur
    deleteIdx = []
    for i in range(len(subset)):
        if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
            deleteIdx.append(i)
    subset = np.delete(subset, deleteIdx, axis=0)

    ## Show human part keypoint

    # visualize
    colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
            [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
            [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]

    # cmap = matplotlib.cm.get_cmap('hsv')

    # canvas = cv.imread(test_image) # B,G,R order
    # print len(all_peaks)
    # for i in range(15):
    #     rgba = np.array(cmap(1 - i/18. - 1./36))
    #     rgba[0:3] *= 255
    #     for j in range(len(all_peaks[i])):
    #         cv.circle(canvas, all_peaks[i][1][0:2], 4, colors[i], thickness=-1)

    # to_plot = cv.addWeighted(oriImg, 0.3, canvas, 0.7, 0)
    # plt.imshow(to_plot[:,:,[2,1,0]])
    # fig = matplotlib.pyplot.gcf()
    # fig.set_size_inches(11, 11)
    # # visualize 2
    canvas = oriImg
    img_ori = canvas.copy()

    for n in range(len(subset)):
        for i in range(numofparts - 1):
            index_head = subset[n][i]
            # if -1 in index_head:
            #     continue
            x = int(candidate[index_head.astype(int), 0])
            y = int(candidate[index_head.astype(int), 1])
            coo = (x, y)
            cv2.circle(
                img_ori,
                coo,
                3,
                colors[n],
                thickness=3,
            )
    img_ori = img_ori[:, :, (2, 1, 0)]
    plt.imshow(img_ori)
    plt.show()
Beispiel #56
0
 def getProfile(sample,excluded=None,cromwell=1):
     return np.divide(getCounts(sample,excluded,cromwell=cromwell),4*(1+cromwell))
def fu1(x,dx):
    f1=float(np.divide((fu((x+dx))-fu((x-dx ))),(np.multiply(2,dx))))
    return f1
        #xwidth = [0.5]*len(param_list)
        xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
	xwidth_left = np.append(xwidth[0] , xwidth)
	xwidth_right = np.append(xwidth,xwidth[-1])
	print("xwidth : ", xwidth)
	fig = plt.figure()
        ax = fig.add_axes([0.2,0.15,0.75,0.8])

	if True:
		for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
			ml_classifiers_dict[ml_classifier]= []
			for param in param_list:
				p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
				p_values_in_CL = sum(i < (1-CL) for i in p_values)
				ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
			ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)


		ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
		print("bdt : ", ml_classifiers_dict['bdt'])
		ax.errorbar(param_list,ml_classifiers_dict['bdt'], yerr=binomial_error(ml_classifiers_dict['bdt']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)


	for chi2_split_index, chi2_split in enumerate(chi2_splits):
		chi2_splits_dict[str(chi2_split)]=[]

        chi2_best = []
        for param in param_list:
                chi2_best_dim = []
                for chi2_split_index, chi2_split in enumerate(chi2_splits):
                        p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
def fu(x):
    f=float(np.add(np.power(x,2),np.divide(54,x)))
    return f
Beispiel #60
0
def nomlize(array):
    return np.divide(array, np.max(array))