Example #1
0
def get_reconstructed(
        S0, S1, D0, D1, L,
        U0, U1, lam0, lam1, XQ,
        ):
    """
    Return the reconstructed matrix given a spectral form.
    """
    R11 = ndot(
            np.diag(np.reciprocal(np.sqrt(D0))),
            U0,
            np.diag(lam0),
            U0.T,
            np.diag(np.reciprocal(D0)),
            )
    R22 = ndot(
            np.diag(np.reciprocal(np.sqrt(D1))),
            U1,
            np.diag(lam1),
            U1.T,
            np.diag(np.reciprocal(D1)),
            )
    Q_reconstructed = build_block_2x2([
        [R11, ndot(R11, XQ) - ndot(XQ, R22)],
        [np.zeros_like(np.diag(L)), R22],
        ])
    return Q_reconstructed
 def torque_raise(self,F,fs,dm,lead,dc,fc,alpha=2):
     if self.type == 'Square':
         return F*dm/2.0*((lead+np.pi*fs*dm)/(np.pi*dm-fs*lead)) + F*fc*dc/2.0
     elif self.type == 'ACME':#beep beep
         return F*dm/2.0*((lead+np.pi*fs*dm*np.reciprocal(np.cos(np.radians(14.5))))/(np.pi*dm-fs*lead*np.reciprocal(np.cos(np.radians(14.5))))) + F*fc*dc/2
     else:
         return F*dm/2.0*((lead+np.pi*fs*dm*np.reciprocal(np.cos(np.radians(alpha))))/(np.pi*dm-fs*lead*np.reciprocal(np.cos(np.radians(alpha))))) + F*fc*dc/2
def invert_sart(csc_A,csc_b,max_iterations=50,lam_start=1.0):
    # This code has been checked against Scott Silburn's Matlab code

    shap = csc_A.shape
    lam = lam_start
    colsum = (csc_A.transpose()).dot(sps.csc_matrix(np.ones(shap[0])).transpose())
    lamda = colsum
    #lamda = lamda.multiply(colsum != 0)
    np.reciprocal(lamda.data,out=lamda.data)
    np.multiply(lamda.data,lam,out=lamda.data)
    
    # Initialise output
    sol = sps.csc_matrix(np.zeros((shap[1],1))+np.exp(-1))
    # Create an array to monitor the convergence
    conv = np.zeros(max_iterations)
    
    for i in range(max_iterations):
        # Calculate sol_new = sol+lambda*(x'*(b-Ax))
        tmp = csc_b.transpose()-csc_A.dot(sol)
        tmp2 = csc_A.transpose().dot(tmp)
        #newsol = sol+tmp2*lamda
        newsol = sol+tmp2.multiply(lamda)
        # Eliminate negative values
        newsol = newsol.multiply(newsol > 0.0)
        newsol.eliminate_zeros()
        # Calculate how quickly the code is converging
        conv[i] = (sol.multiply(sol).sum()-newsol.multiply(newsol).sum())/sol.multiply(sol).sum()
        # Set the new solution to be the old solution and repeat
        sol = newsol
        
    return newsol.todense(), conv
def directions(degrees):
	if degrees == 0:
		return "You are going East"
	elif degrees == 90:
		return "You are going North"
	elif degrees == 180:
		return "You are going West"
	elif degrees == 270:
		return "You are going South"
	directions = [["North ","East "],["North ","West "],["South ", "West "],["South ", "East "]]
	main_dir = int(degrees/90)
	if degrees<90 or (degrees>180 and degrees<270):
		if degrees%90>45:
			num_say = int(numpy.reciprocal(round((90-(degrees%90)))/90)/2)
			if num_say == 0:
				num_say = 1
			return "You are going " + directions[main_dir][0] * num_say + directions[main_dir][1]
		else:
			num_say = int(numpy.reciprocal(round((degrees%90))/90)/2)
			if num_say == 0:
				num_say = 1
			return "You are going " + directions[main_dir][0] + directions[main_dir][1] * num_say
	else:
		if degrees%90>45:
			num_say = int(numpy.reciprocal(round((90-(degrees%90)))/90)/2)
			if num_say == 0:
				num_say = 1
			return "You are going " + directions[main_dir][0] + directions[main_dir][1] * num_say
		else:
			num_say = int(numpy.reciprocal(round((degrees%90))/90)/2)
			if num_say == 0:
				num_say = 1
			return "You are going " + directions[main_dir][0] * num_say + directions[main_dir][1]
Example #5
0
 def calMPRC_Fileter_scores(self):
     '''Calculate the weight of every term per document, using PubMed Related Citation (PRC) algorithm, Jimmy Lin and John Wilbur 2007.
        input: idf vector, docLen vector, occurrence count matrix (n documents, all terms in the vocabulary)
        output: a matrix of PRC scores.
     '''
     la = 0.022
     mu = 0.013
     score_threshold = 0.5 # the PRC weight threshold 
     div = mu/la
     ## generate m1
     reciSqrtIdf = np.reciprocal(np.sqrt(np.log(len(self.stemmed_corpus)*2.0/(self.df+1)))) # dim 1*19, conversion verified
     expDoclen = np.exp(self.doclen*(la-mu)) # dim 10*1, conversion verified
     m1 = np.dot(expDoclen,reciSqrtIdf) # dim 10*19, product verified
     ## generate m2: matrix
     matrix = np.power(div,self.doc_term_matrix)/div
     ## Hadamard product
     matrix = np.multiply(matrix,m1)
     ## offset
     offset = np.dot(np.ones((matrix.shape[0],1)),reciSqrtIdf)
     ## matrix+offset
     matrix = matrix+offset
     ## reciprocal of recWt
     raw_prc_matrix = np.reciprocal(matrix)
     ## reset scores for the terms that do not occur
     label = (self.doc_term_matrix>0)
     self.prc_matrix = np.multiply(label, raw_prc_matrix)
     
     ## modify the score matrix, remove terms with low scores
     keyword_index_vec = np.where(self.prc_matrix.A[self.pmidList.index(self.query),:]>score_threshold)[0].tolist()
     self.prc_matrix = self.prc_matrix.A[:,keyword_index_vec]
Example #6
0
    def predict_proba(self, X):
        """Probability estimates.

        The returned estimates for all classes are ordered by the
        label of classes.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]

        Returns
        -------
        T : array-like, shape = [n_samples, n_classes]
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in ``self.classes_``.
        """
        # 1. / (1. + np.exp(-scores)), computed in-place
        prob = self.decision_function(X)
        prob *= -1
        np.exp(prob, prob)
        prob += 1
        np.reciprocal(prob, prob)
        if len(prob.shape) == 1:
            return np.vstack([1 - prob, prob]).T
        else:
            # OvR, not softmax, like Liblinear's predict_probability
            prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
            return prob
Example #7
0
    def predict(self):

        m, n = self.A.shape # m observations

        convgraph = np.zeros(self.maxiter / 25)
        prevdist = 0.
        converged = False

        eps = 1e-6

        dd = np.array(self.A.sum(1))[:,0]
        D = diags(dd,0, format="csr")

        m, n = self.A.shape


        # random initialization, will initialize with K-means if told to
        H = csr_matrix(np.random.rand(m, self.k))

        EPS = csr_matrix(np.ones(H.shape) * eps)

        if self._embedding:
            # Apply eigenspace embedding K-means for initialization (Ng Weiss Jordan)

            Dz = diags(1 / (np.sqrt(dd) + eps), 0, format="csr")
            DAD = Dz.dot(self.A).dot(Dz)

            V = eigs(DAD, self.k)[1].real
            km_data = V / (np.linalg.norm(V, 2, axis=1).T * np.ones((self.k,1))).T

            km_predict = KMeans(n_clusters=self.k).fit_predict(km_data)

            indices = km_predict
            indptr = range(len(indices)+1)
            data = np.ones(len(indices))
            H = csr_matrix((data, indices, indptr))

        # Run separately for sparse and dense versions

        for i in range(self.maxiter):

            AH = self.A.dot(H)
            alpha = H.T.dot(AH)

            M1 = AH + EPS
            M2 = D.dot(H).dot(alpha) + EPS

            np.reciprocal(M2.data, out=M2.data)
            d1 = M1.multiply(M2).sqrt()

            H = H.multiply(d1)

            if i % 25 == 0:
                dist = sptrace(alpha)
                convgraph[i/25] = dist

                diff = dist / prevdist - 1
                prevdist = dist

        return NMFResult((H.toarray(),), convgraph, pdist)
Example #8
0
    def forwardTrain(self, layer):
        forwardSink = layer.forward
        '''
        The logistic neuron applies the following transformation to the 
        linear combination of the incoming signals:
        1/1+e^(-x)
        
        This is done in place through the following logic:
        0. x = x + tiny (in place)
        1. x = x * -1 (in place)
        2. x = e^x (in place)
        3. x = 1 + x + tiny(in place)
        5. x = 1/x (in place)

        In this case, x is the "forwardSink" of this layer.
        
        The original data is "lost", but is no longer needed (techincally
        it could be recovered because each of the applied operations has
        an inverse.)
        '''
        np.add(forwardSink, tiny, out=forwardSink)
        np.multiply(forwardSink, -1, out=forwardSink)
        np.exp(forwardSink, out=forwardSink)
        np.add(forwardSink, 1 + tiny, out=forwardSink)
        np.reciprocal(forwardSink,out=forwardSink)
 def torque_lower(self,l,k,C):
     if self.type == 'Square':
         return F*dm/2.0*((-lead+np.pi*fs*dm)/(np.pi*dm+fs*lead)) + F*fc*dc/2.0
     elif self.type == 'ACME':#beep beep
         return F*dm/2.0*((-lead+np.pi*fs*dm*np.reciprocal(np.cos(np.radians(14.5))))/(np.pi*dm+fs*lead*np.reciprocal(np.cos(np.radians(14.5))))) + F*fc*dc/2
     else:
         return F*dm/2.0*((-lead+np.pi*fs*dm*np.reciprocal(np.cos(np.radians(alpha))))/(np.pi*dm+fs*lead*np.reciprocal(np.cos(np.radians(alpha))))) + F*fc*dc/2
def _normalize_by_index(workspace, index):
    """
    Normalize each spectra of the specified workspace by the
    y-value at the specified index in that spectra.

    @param workspace    The workspace to normalize.
    @param index        The index of the y-value to normalize by.
    """
    number_of_histograms = workspace.getNumberHistograms()

    for idx in range(0, number_of_histograms):
        y_values = workspace.readY(idx)
        y_errors = workspace.readE(idx)

        # Avoid divide by zero
        if y_values[index] == 0.0:
            scale = np.reciprocal(1.0e-8)
        else:
            scale = np.reciprocal(y_values[index])

        # Normalise y values
        y_values_normalised = scale * y_values

        # Propagate y errors: C = A / B ; dC = sqrt( (dA/B)^2 + (A*dB/B^2)^2 )
        a = (y_errors*scale)
        b = (y_values*y_errors[index]*(scale ** 2))
        y_errors_propagated = np.sqrt(a ** 2 + b ** 2)

        workspace.setY(idx, y_values_normalised)
        workspace.setE(idx, y_errors_propagated)
Example #11
0
def evaluation(dat , parent , p_order , trial , t_order):
    sum_dist = 0
    sum_dist_2 = 0

    # 親の評価
    for i in range(len(parent) - 1):
        sum_dist += distance(dat.ix[parent[i]] , dat.ix[parent[i + 1]])

    sum_dist += distance(dat.ix[parent[len(parent) - 1]] , dat.ix[parent[0]])

    fitness_parent = np.reciprocal(sum_dist)     # 小さい方が適応度が高いので逆数

    # 子の評価
    for i in range(len(trial) - 1):
        sum_dist_2 += distance(dat.ix[trial[i]] , dat.ix[trial[i + 1]])

    sum_dist_2 += distance(dat.ix[trial[len(trial) - 1]] , dat.ix[trial[0]])

    fitness_trial = np.reciprocal(sum_dist_2)     # 小さい方が適応度が高いので逆数

    # 親と子の比較
    if fitness_parent > fitness_trial:
        return fitness_parent , parent , p_order
    elif fitness_parent < fitness_trial:
        return fitness_trial , trial , t_order
def estimate_dirichlet_param(samples, param):
    """
    Uses a Newton-Raphson scheme to estimating the parameter of a
    K-dimensional Dirichlet distribution

    :param samples: an NxK matrix of K-dimensional vectors drawn from
    a Dirichlet distribution
    :param param: the old value of the paramter. This is overwritten
    :return: a K-dimensional vector which is the new
    """

    N, K = samples.shape
    p = np.sum(np.log(samples), axis=0)

    for _ in range(60):
        g = -N * fns.digamma(param)
        g += N * fns.digamma(param.sum())
        g += p

        q = -N * fns.polygamma(1, param)
        np.reciprocal(q, out=q)

        z = N * fns.polygamma(1, param.sum())

        b = np.sum(g * q)
        b /= 1 / z + q.sum()

        param -= (g - b) * q

        print("%.2f" % param.mean(), end=" --> ")
    print

    return param
Example #13
0
 def predict_proba(self, X):
     """ estimate probability """
     prob = -(np.dot(X, self.coef_.T) + self.intercept_)
     np.exp(prob, prob)
     prob += 1
     np.reciprocal(prob, prob)
     prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
     return prob
Example #14
0
 def forward(self, bottom_blobs, top_blobs):
   x = bottom_blobs[0].vals
   y = top_blobs[0].vals
   # Compute y = 1 / (1 + exp(-x))
   np.multiply(x, -1.0, out=y)
   np.exp(y, out=y)
   np.add(y, 1, out=y)
   np.reciprocal(y, out=y)
Example #15
0
 def forward_cpu(self, inputs):
     self.retain_inputs((0, 1))
     x, gy = inputs
     gx = utils.force_array(numpy.square(x))
     gx += 1
     numpy.reciprocal(gx, out=gx)
     gx *= gy
     return gx,
Example #16
0
 def backward_cpu(self, x, gy):
     gx = utils.force_array(numpy.square(x[0]))
     numpy.negative(gx, out=gx)
     gx += 1
     numpy.sqrt(gx, out=gx)
     numpy.reciprocal(gx, out=gx)
     gx *= gy[0]
     return gx,
Example #17
0
    def learn(self, features, labels):
        """Train the logistic regression model.
        :param features: The training instances' feature vectors
        :param labels: The training instances' labels
        """

        f = self.add_bias(features)
        w = np.random.normal(size=f.shape[1])

        regularization = self.regularization
        num_epochs = self.num_epochs

        # Optimize F_alpha for a suitable alpha (according to beta)
        alpha = None if self.f_beta_sq is None else np.reciprocal(self.f_beta_sq + 1.0)

        lr_init = self.learning_rate
        lr = lr_init

        batch_size = self.batch_size

        for epoch in xrange(num_epochs):
            if batch_size is None:
                f_batch = f
                labels_batch = labels
            else:
                batch = np.random.randint(0, f.shape[0], size=batch_size)
                f_batch = f[batch]
                labels_batch = labels[batch]

            predictions = np.array(map(sigmoid, f_batch.dot(w.T).T))

            if alpha is None:
                # Optimize Accuracy
                errors = labels_batch - predictions
                dw = f_batch.T.dot(errors.T).T

            else:
                # Optimize F_beta^2 
                dp = (f_batch.T.multiply(predictions * (1.0 - predictions))).T    # Matrix of the same shape as "features"
                sum_dp = dp.sum(axis=0)                                     # Vector (len = num features)
                sum_g_dp = dp.T.dot(labels_batch.T)                         # Vector (len = num features)
                sum_p = predictions.sum()                                   # Scalar
                sum_g = labels_batch.sum()                                  # Scalar
                sum_g_p = labels_batch.dot(predictions)                     # Scalar

                # Update rule for optimizing F_beta^2 - see paper for details
                denominator = np.reciprocal(alpha*sum_p + (1 - alpha)*sum_g)

                dw = denominator * sum_g_dp - alpha * denominator * denominator * sum_g_p * sum_dp
                dw *= len(labels_batch)
                if type(dw) != np.ndarray:
                    dw = np.array(dw)[0]

            w += lr * (dw - regularization*w)

            lr = lr_init * (1.0 - (float(epoch) / num_epochs))

        self.weights = w
Example #18
0
 def numpy_run(self):
     """Forward propagation from batch on CPU only.
     """
     super(All2AllSigmoid, self).numpy_run()
     self.output.map_write()
     mem = self.output.mem
     # 1 / (1 + numpy.exp(-mem))
     numpy.exp(-mem, mem)
     numpy.reciprocal(mem + 1, mem)
Example #19
0
    def learn(self, path_sets, labels):
        """Train the weighted edge model
        :param path_sets -- the term-pairs represented as path-sets
        :param labels -- the term-pairs gold standard annotations
        """
        regularization = self.regularization
        num_epochs = self.num_epochs

        # Compute alpha = 1/(1+beta^2) - for F measure
        alpha = None if self.f_beta_sq is None else np.reciprocal(self.f_beta_sq + 1.0)

        lr_init = self.learning_rate
        lr = lr_init

        num_pairs = len(labels)

        pair_to_paths, path_features = prebuild_data_structures(path_sets)

        # Initialize the edge types weights randomly
        w = np.random.normal(size=path_features.shape[1])

        # Choose a maximum-score path for each term-pair randomly
        max_paths = np.zeros(num_pairs, dtype=np.int)
        for i, (path_set, label) in enumerate(zip(pair_to_paths, labels)):
            max_paths[i] = np.random.choice(path_set)

        for epoch in xrange(num_epochs):

            # "M-step": Use F_beta derivative to update w
            f = path_features[max_paths]

            predictions = probabilities(w, f)

            dp = (f.T * (predictions * (1.0 - predictions))).T    # Matrix of the same shape as "features"
            sum_dp = np.sum(dp, axis=0)                           # Vector (len = num features)
            sum_g_dp = labels.dot(dp)                             # Vector (len = num features)
            sum_p = np.sum(predictions)                           # Scalar
            sum_g = np.sum(labels)                                # Scalar
            sum_g_p = labels.dot(predictions)                     # Scalar

            denominator = np.reciprocal(alpha*sum_p + (1 - alpha)*sum_g)

            dw = denominator * sum_g_dp - alpha * denominator * denominator * sum_g_p * sum_dp
            dw *= num_pairs

            w += lr * (dw - regularization*w)

            # Reduce the learning rate
            lr = lr_init * (1.0 - (float(epoch) / num_epochs))

            # "E-step": Give a score to each path according to the current weights,
            # and choose the highest-scored path for each pair
            path_probabilities = probabilities(w, path_features)
            for i, (path_set, label) in enumerate(zip(pair_to_paths, labels)):
                max_paths[i] = path_set[np.argmax(path_probabilities[path_set])]

        self.weights = w
    def _compute_lu(self):
        if self._L is None:
            self._L, self._U, self._P, self._Q, self._R, do_recip = self.umf.lu(self._A)
            if do_recip:
                with np.errstate(divide='ignore'):
                    np.reciprocal(self._R, out=self._R)

            # Conform to scipy.sparse.splu convention on permutation matrices
            self._P = self._P[self._P]
Example #21
0
def extract_recipcontact(project, close, stride, far=None):
    A,B,C = triplets(project, close, stride, far)
    m = metrics.ContinuousContact(contacts='all', scheme='CA')
    pA, pB, pC = map(m.prepare_trajectory, [A, B, C])

    # reciprocate the maps
    pA, pB, pC = np.reciprocal(pA), np.reciprocal(pB), np.reciprocal(pC)

    return pA - pB, pA - pC
Example #22
0
def generate_unit_phase_shifts(shape, float_type=float):
    """
        Computes the complex phase shift's angle due to a unit spatial shift.

        This is meant to be a helper function for ``register_mean_offsets``. It
        does this by computing a table of the angle of the phase of a unit
        shift in each dimension (with a factor of :math:`2\pi`).

        This allows arbitrary phase shifts to be made in each dimensions by
        multiplying these angles by the size of the shift and added to the
        existing angle to induce the proper phase shift in fourier space, which
        is equivalent to the spatial translation.

        Args:
            shape(tuple of ints):       shape of the data to be shifted.

            float_type(real type):      phase type (default numpy.float64)

        Returns:
            (numpy.ndarray):            an array containing the angle of the
                                        complex phase shift to use for each
                                        dimension.

        Examples:
            >>> generate_unit_phase_shifts((2,4))
            array([[[-0.        , -0.        , -0.        , -0.        ],
                    [-3.14159265, -3.14159265, -3.14159265, -3.14159265]],
            <BLANKLINE>
                   [[-0.        , -1.57079633, -3.14159265, -4.71238898],
                    [-0.        , -1.57079633, -3.14159265, -4.71238898]]])
    """

    # Convert to `numpy`-based type if not done already.
    float_type = numpy.dtype(float_type).type

    # Must be of type float.
    assert issubclass(float_type, numpy.floating)
    assert numpy.dtype(float_type).itemsize >= 4

    # Get the negative wave vector
    negative_wave_vector = numpy.asarray(shape, dtype=float_type)
    numpy.reciprocal(negative_wave_vector, out=negative_wave_vector)
    negative_wave_vector *= 2*numpy.pi
    numpy.negative(negative_wave_vector, out=negative_wave_vector)

    # Get the indices for each point in the selected space.
    indices = xnumpy.cartesian_product([numpy.arange(_) for _ in shape])

    # Determine the phase offset for each point in space.
    complex_angle_unit_shift = indices * negative_wave_vector
    complex_angle_unit_shift = complex_angle_unit_shift.T.copy()
    complex_angle_unit_shift = complex_angle_unit_shift.reshape(
        (len(shape),) + shape
    )

    return(complex_angle_unit_shift)
Example #23
0
    def fit(self, params, twotheta, data, std=None, maxfev=200):
        """
        make least squares fit with parameters supplied by the user

        Parameters
        ----------
        params :    lmfit.Parameters
            object with all parameters set as intended by the user
        twotheta :  array-like
            angular values for the fit
        data :      array-like
            experimental intensities for the fit
        std :       array-like
            standard deviation of the experimental data. if 'None' the sqrt of
            the data will be used
        maxfev:     int
            maximal number of simulations during the least squares refinement

        Returns
        -------
        lmfit.MinimizerResult
        """
        lmfit = utilities.import_lmfit('XU.PowderModel')

        def residual(pars, tt, data, weight):
            """
            residual function for lmfit Minimizer routine

            Parameters
            ----------
            pars :      lmfit.Parameters
                fit Parameters
            tt :        array-like
                array of twotheta angles
            data :      array-like
                experimental data, same shape as tt
            eps :       array-like
                experimental error bars, shape as tt
            """
            # set parameters in this instance
            self.set_lmfit_parameters(pars)

            # run simulation
            model = self.simulate(tt)
            return (model - data) * weight

        if std is None:
            weight = numpy.reciprocal(numpy.sqrt(data))
        else:
            weight = numpy.reciprocal(std)
        weight[numpy.isinf(weight)] = 1
        self.minimizer = lmfit.Minimizer(residual, params,
                                         fcn_args=(twotheta, data, weight))
        fitres = self.minimizer.minimize(maxfev=maxfev)
        self.set_lmfit_parameters(fitres.params)
        return fitres
Example #24
0
def create_stratified_partition(hdf5, partition_save_prefix, train_prop=0.5, valid_prop=0.25, test_prop=0.25, tframes=1, compute_std=True, compute_pca=False):
    
    nfolds = int(np.reciprocal(test_prop))

    if np.linalg.norm(np.sum((train_prop, valid_prop, test_prop))-1)>1e-6:
        raise ValueError('train_prop + valid_prop + test_prop must add up to 1')    
    
    if np.linalg.norm(nfolds - np.reciprocal(test_prop))>1e-6:
        raise ValueError('Increase precision of test_prop')

    # extract metadata from dataset
    hdf5_file = tables.open_file(hdf5, mode='r')
    param     = hdf5_file.get_node('/', 'Param')    
    file_dict = param.file_dict[0]
    
    train_list = [[] for i in xrange(nfolds)]
    valid_list = [[] for i in xrange(nfolds)]
    test_list  = [[] for i in xrange(nfolds)]

    rng = np.random.RandomState(111)
    for key, files in file_dict.iteritems(): # for all files that share a given label
        nfiles = len(files)     
        ntest  = nfiles // nfolds
        ntrain = int(nfiles * train_prop)
        nvalid = nfiles - ntest - ntrain
        
        perm = rng.permutation(nfiles)      
        for fold in range(nfolds):
            sup         = fold*ntest + np.arange(ntest)
            test_index  = perm[sup]                     
            rest_index  = np.setdiff1d(perm, test_index)
            train_index = rest_index[:ntrain]
            valid_index = rest_index[ntrain:]
            
            train_list[fold].append([files[i] for i in train_index])
            valid_list[fold].append([files[i] for i in valid_index])
            test_list[fold].append([files[i] for i in test_index])

    # flatten lists
    for fold in xrange(nfolds):
        train_list[fold] = sum(train_list[fold],[])
        valid_list[fold] = sum(valid_list[fold],[])
        test_list[fold]  = sum(test_list[fold],[])

    for fold, (train, valid, test) in enumerate(zip(train_list, valid_list, test_list)):
        partition_save_name = os.path.splitext(partition_save_prefix)[0] + '-fold-%d_of_%d.pkl' % (fold+1, nfolds)
        
        if os.path.exists(partition_save_name):
            warnings.warn('partition file {} already exists, new file will not be created'.format(partition_save_name))
            continue
        else:       
            create_partition(hdf5, partition_save_name, train, valid, test, tframes, compute_std, compute_pca)
            print 'Created stratified partition %s' % partition_save_name
    
    hdf5_file.close()
Example #25
0
 def _predict_proba_lr(self, X):
     self.decision_function = self._predict
     prob = self.decision_function(X)
     prob *= -1
     np.exp(prob, prob)
     prob += 1
     np.reciprocal(prob, prob)
     if len(prob.shape) == 1:
         return np.vstack([1 - prob, prob]).T
     else:
         # OvR normalization, like LibLinear's predict_probability
         prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
         return prob
Example #26
0
def test_numpy_functions_change_dimensions():
    '''
    Test some numpy functions that change the dimensions of the quantity.
    '''
    unit_values = [np.array([1, 2]) * mV,
                   np.ones((3, 3)) * 2 * mV]
    for value in unit_values:
        assert_quantity(np.var(value), np.var(np.array(value)), volt ** 2)
        assert_quantity(np.square(value), np.square(np.array(value)),
                        volt ** 2)        
        assert_quantity(np.sqrt(value), np.sqrt(np.array(value)), volt ** 0.5)
        assert_quantity(np.reciprocal(value), np.reciprocal(np.array(value)),
                        1.0 / volt)
Example #27
0
def calc_pmi(counts, cds):
    sum_w = np.array(counts.sum(axis=1))[:, 0]
    sum_c = np.array(counts.sum(axis=0))[0, :]
    if cds != 1:
        sum_c = sum_c ** cds
    sum_total = sum_c.sum()
    sum_w = np.reciprocal(sum_w)
    sum_c = np.reciprocal(sum_c)
    
    pmi = csr_matrix(counts)
    pmi = multiply_by_rows(pmi, sum_w)
    pmi = multiply_by_columns(pmi, sum_c)
    pmi = pmi * sum_total
    return pmi
Example #28
0
def sqr_diagonal_prec(A, sigma):
   """
   Return the square diagonal preconditioner.
   """

   if sigma is None: sigma = 0
   with warnings.catch_warnings():
      warnings.simplefilter("error")
      while True:
         try:
            return {'precAHA': np.diag(np.reciprocal(np.diag(A.T.conj().dot(A)) - sigma)),
                    'precAAH': np.diag(np.reciprocal(np.diag(A.dot(A.T.conj())) - sigma))}
         except RuntimeWarning:
            sigma = sigma*1.001 if sigma != 0.0 else 0.1
Example #29
0
 def returnLMS(self, msr):
     '''
     This function returns the LMS parameter for given sample of mesurements.
     The following step is first step of Appendix A of LMS method for growth standards.
     '''
     mlogs = np.log(msr) # measurement of logs
     meanOfmlogs = np.mean(mlogs)
     sdOflmlogs = np.std(mlogs)
     gMean = np.exp(meanOfmlogs)
     gSD = sdOflmlogs
     
     '''
     Following is the second step of appendix A paper "LMS methods for growth standard".
     '''
     aMean = np.mean(msr)
     aSD = np.divide(np.std(msr), gMean)    
     
     '''
     following is third step of appendix A
     '''
     recmsr = np.reciprocal(msr)
     recMean = np.mean(recmsr)
     hMean  = np.reciprocal(recMean) # harmonic mean
     hSD = np.multiply(np.std(recmsr), gMean) # harmonic CV or harmonic SD
     
     '''
     following is forth step of the appendix A.
     '''
     print aSD, ' ' ,hSD, ' ', gSD
     A = np.log(np.divide(aSD, hSD))
     B = np.log(np.multiply(aSD, hSD) / np.power(gSD, 2))
     L  = np.multiply(-1 , np.divide(A , np.multiply(B,2)))
     errorL = np.divide(1, np.sqrt(np.multiply(len(msr), B)))
     print A , ' ', B
     '''
     following is the fifth step of the appendix A        
     '''
     S = np.multiply(gSD, np.exp(np.divide(np.multiply(A,L), 4)))    
     
     '''
     the sixth step of the appendix A of paper LMS method for growth 
     standards
     '''
     M = gMean + np.divide(np.multiply((aMean - hMean), L), 2) + \
         (((aMean - (2 * gMean) + hMean) * np.power(L,2))/ 2)            
     errorM = (M * S) / (np.sqrt(len(msr)))
     return (L,M,S)
         
               
Example #30
0
    def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                # skip true divide for ints
                if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
                    assert_almost_equal(np.reciprocal(inp2),
                                        np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                np.add(inp1, 2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
                inp2[...] = np.ones_like(inp2)
                np.add(2, inp2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
Example #31
0
 def test_reciprocal_array(self):
     assert np.all(
         np.reciprocal(np.array([1., 2., 4.]) *
                       u.m) == np.array([1., 0.5, 0.25]) / u.m)
Example #32
0
    def gradient_x(self, x, X_train):
        x = np.asarray(x)
        X_train = np.asarray(X_train)
        length_scale = np.asarray(self.length_scale)

        # diff = (x - X_train) / length_scale
        # size = (n_train_samples, n_dimensions)
        diff = x - X_train
        diff /= length_scale

        # dist_sq = \sum_{i=1}^d (diff ^ 2)
        # dist = sqrt(dist_sq)
        # size = (n_train_samples,)
        dist_sq = np.sum(diff**2, axis=1)
        dist = np.sqrt(dist_sq)

        if self.nu == 0.5:
            # e = -np.exp(-dist) / dist
            # size = (n_train_samples, 1)
            scaled_exp_dist = -dist
            scaled_exp_dist = np.exp(scaled_exp_dist, scaled_exp_dist)
            scaled_exp_dist *= -1

            # grad = (e * diff) / length_scale
            # For all i in [0, D) if x_i equals y_i.
            # 1. e -> -1
            # 2. (x_i - y_i) / \sum_{j=1}^D (x_i - y_i)**2 approaches 1.
            # Hence the gradient when for all i in [0, D),
            # x_i equals y_i is -1 / length_scale[i].
            gradient = -np.ones((X_train.shape[0], x.shape[0]))
            mask = dist != 0.0
            scaled_exp_dist[mask] /= dist[mask]
            scaled_exp_dist = np.expand_dims(scaled_exp_dist, axis=1)
            gradient[mask] = scaled_exp_dist[mask] * diff[mask]
            gradient /= length_scale
            return gradient

        elif self.nu == 1.5:
            # grad(fg) = f'g + fg'
            # where f = 1 + sqrt(3) * euclidean((X - Y) / length_scale)
            # where g = exp(-sqrt(3) * euclidean((X - Y) / length_scale))
            sqrt_3_dist = sqrt(3) * dist
            f = np.expand_dims(1 + sqrt_3_dist, axis=1)

            # When all of x_i equals y_i, f equals 1.0, (1 - f) equals
            # zero, hence from below
            # f * g_grad + g * f_grad (where g_grad = -g * f_grad)
            # -f * g * f_grad + g * f_grad
            # g * f_grad * (1 - f) equals zero.
            # sqrt_3_by_dist can be set to any value since diff equals
            # zero for this corner case.
            sqrt_3_by_dist = np.zeros_like(dist)
            nzd = dist != 0.0
            sqrt_3_by_dist[nzd] = sqrt(3) / dist[nzd]
            dist_expand = np.expand_dims(sqrt_3_by_dist, axis=1)

            f_grad = diff / length_scale
            f_grad *= dist_expand

            sqrt_3_dist *= -1
            exp_sqrt_3_dist = np.exp(sqrt_3_dist, sqrt_3_dist)
            g = np.expand_dims(exp_sqrt_3_dist, axis=1)
            g_grad = -g * f_grad

            # f * g_grad + g * f_grad (where g_grad = -g * f_grad)
            f *= -1
            f += 1
            return g * f_grad * f

        elif self.nu == 2.5:
            # grad(fg) = f'g + fg'
            # where f = (1 + sqrt(5) * euclidean((X - Y) / length_scale) +
            #            5 / 3 * sqeuclidean((X - Y) / length_scale))
            # where g = exp(-sqrt(5) * euclidean((X - Y) / length_scale))
            sqrt_5_dist = sqrt(5) * dist
            f2 = (5.0 / 3.0) * dist_sq
            f2 += sqrt_5_dist
            f2 += 1
            f = np.expand_dims(f2, axis=1)

            # For i in [0, D) if x_i equals y_i
            # f = 1 and g = 1
            # Grad = f'g + fg' = f' + g'
            # f' = f_1' + f_2'
            # Also g' = -g * f1'
            # Grad = f'g - g * f1' * f
            # Grad = g * (f' - f1' * f)
            # Grad = f' - f1'
            # Grad = f2' which equals zero when x = y
            # Since for this corner case, diff equals zero,
            # dist can be set to anything.
            nzd_mask = dist != 0.0
            nzd = dist[nzd_mask]
            dist[nzd_mask] = np.reciprocal(nzd, nzd)

            dist *= sqrt(5)
            dist = np.expand_dims(dist, axis=1)
            diff /= length_scale
            f1_grad = dist * diff
            f2_grad = (10.0 / 3.0) * diff
            f_grad = f1_grad + f2_grad

            sqrt_5_dist *= -1
            g = np.exp(sqrt_5_dist, sqrt_5_dist)
            g = np.expand_dims(g, axis=1)
            g_grad = -g * f1_grad
            return f * g_grad + g * f_grad
Example #33
0
def chatterjeeMachlerHadi(X, y, **kwargs):
    # basic info
    options = parseKeywords(kwargs)

    # for the distances, will use absX - do this before adding intercept term
    # a column of all ones will cause problems with non full rank covariance matrices
    absX = np.absolute(X)

    # now calculate p and n
    n = absX.shape[0]
    p = absX.shape[1]

    # we treat the X matrix as a multivariate matrix with n observations and p variables
    # first need to find a basic subset free of outliers
    correctionFactor = 1 + (1.0 * (p + 1) / (n - p)) + (2.0 / (n - 1 - 3 * p))
    chi = stats.chi2(p, 0)
    alpha = 0.05
    chi2bound = correctionFactor * chi.pdf(alpha / n)
    # calculate h, this is the size of the firt basic subset
    # note that this is the value h, the index of the hth element is h-1
    h = int(1.0 * (n + p + 1) / 2)  # here, only want the integer part of this
    # need to get the coordinatewise medians - this is the median of the columns
    medians = np.median(absX)
    # now compute the matrix to help calculate the distance
    A = np.zeros(shape=(p, p))
    for i in xrange(0, n):
        tmp = absX[i, :] - medians
        A += np.outer(tmp, tmp)
    A = 1.0 / (n - 1) * A

    # now calculate initial distances
    dInit = calculateDistCMH(n, absX, medians, A)

    # now get the h smallest values of d
    sortOrder = np.argsort(dInit)
    indices = sortOrder[0:h]
    means = np.average(absX[indices, :])
    covariance = np.cov(
        absX[indices],
        rowvar=False)  # observations in rows, columns are variables
    dH = calculateDistCMH(n, absX, means, covariance)

    # rearrange into n observations into order and partition into two initial subsets
    # one subset p+1, the n-p-1
    sortOrder = np.argsort(dH)
    indicesBasic = sortOrder[:p + 1]
    # there is a rank issue here, but ignore for now - natural observations will presumably be full rank
    means = np.average(absX[indicesBasic, :])
    covariance = np.cov(absX[indicesBasic], rowvar=False)
    dist = calculateDistCMH(n, absX, means, covariance)

    # create the basic subset
    r = p + 2
    increment = (h - r) / 100
    if increment < 1:
        increment = 1  # here, limiting to 100 iterations of this
    while r <= h:
        sortOrder = np.argsort(dist)
        indices = sortOrder[:r]  # indices start from zero, hence the - 1
        means = np.average(absX[indices])
        covariance = np.cov(absX[indices], rowvar=False)
        dist = calculateDistCMH(n, absX, means, covariance)
        if h - r > 0 and h - r < increment:
            r = h
        else:
            r += increment

    # now the second part = add more points and exclude outliers to basic set
    # all distances above r+1 = outliers
    #r = p + 1
    #increment = (n - 1 - r)/100
    while r < n:
        sortOrder = np.argsort(dist)
        dist2 = np.power(dist, 2)
        if dist2[sortOrder[r]] > chi2bound:
            break  # then leave, everything else is an outlier - it would be good if this could be saved somehow
        # otherwise, continue adding points
        sortOrder = np.argsort(dist)
        indices = sortOrder[:r]
        means = np.average(absX[indices])
        covariance = np.cov(absX[indices], rowvar=False)
        dist = calculateDistCMH(n, absX, means, covariance)
        if n - 1 - r > 0 and n - 1 - r < increment:
            r = n - 1
        else:
            r += increment

    # now with the Hadi distances calculated, can proceed to do the robust regression
    # normalise and manipulate Hadi distances
    dist = dist / np.max(dist)
    # for the median, use the basic subset
    # indicesBasic = sortOrder[:r]
    # distMedian = np.median(dist[indicesBasic]) # I am using on indicesBasic
    distMedian = np.median(
        dist)  # the paper suggests using the median of the complete
    tmp = np.maximum(dist, np.ones(shape=(n)) * distMedian)
    dist = np.reciprocal(tmp)
    dist2 = np.power(dist, 2)
    dist = dist2 / np.sum(dist2)

    # calculate first set of weights - this is simply dist
    weights = dist

    # now add the additional constant intercept column if required
    if options["intercept"] == True:
        # add column of ones for constant term
        X = np.hstack((np.ones(shape=(X.shape[0], 1), dtype="complex"), X))

    n = X.shape[0]
    p = X.shape[1]

    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = weightLS(X, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew, ynew)
        residsNew = y - np.dot(X, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights

        residsAbs = np.absolute(residsNew)
        residsSquare = np.power(residsAbs, 2)
        residsNew = residsSquare / np.sum(residsSquare)
        residsMedian = np.median(residsAbs)

        # calculate the new weights
        tmpDenom = np.maximum(residsNew,
                              np.ones(shape=(n), dtype="float") * residsMedian)
        tmp = (1 - dist) / tmpDenom
        weightsNew = np.power(tmp, 2) / np.sum(np.power(tmp, 2))

        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        if iteration > 1:
            # check to see whether the change is smaller than the tolerance
            changeResids = linalg.norm(residsNew -
                                       resids) / linalg.norm(residsNew)
            if changeResids < eps():
                # update resids
                resids = residsNew
                break
        # update resids
        resids = residsNew

    # at the end, return the components
    return params, resids, weights
Example #34
0
 def radius_from_volume(volume):
     return np.power(0.75 * volume / np.pi, np.reciprocal(3.0))
Example #35
0
def gaussian_normalization(means, variances):
    variance = np.reciprocal(np.sum(np.reciprocal(np.exp(variances)), 0))
    mean = np.sum(means * np.reciprocal(np.exp(variances)), 0) * variance
    return mean, variance
Example #36
0
    def generatePlaneGroundTruth(self,
                                 normalFilename,
                                 maskFilename,
                                 depthFilename,
                                 useGlobal=True):
        planeFilename = normalFilename.replace('norm_camera.png',
                                               'plane_global.npy')
        if os.path.exists(planeFilename):
            self.planeFilenames.append(planeFilename)
            return

        normals = np.array(PIL.Image.open(normalFilename)).astype(
            np.float) / 255 * 2 - 1
        norm = np.linalg.norm(normals, 2, 2)
        for c in xrange(3):
            normals[:, :, c] /= norm
            continue

        invalidMask = (np.array(PIL.Image.open(maskFilename)) < 128)

        sampleRatio = 3
        azimuthAngleImage = (-np.round(
            np.rad2deg(np.arctan2(normals[:, :, 1], normals[:, :, 0])) /
            sampleRatio).astype(np.int) * sampleRatio + 360) % 360

        altitudeAngleImage = (np.round(
            np.rad2deg(
                np.arctan2(
                    np.sign(-normals[:, :, 1]) * np.linalg.norm(
                        normals[:, :, :2], 2, 2), normals[:, :, 2])) /
            sampleRatio).astype(np.int) * sampleRatio + 360) % 360

        orthogonalThreshold = 5
        orthogonalAzimuthMask_1 = (
            (azimuthAngleImage - 0) < orthogonalThreshold) + (
                (360 - azimuthAngleImage) < orthogonalThreshold)
        orthogonalAzimuthMask_2 = np.abs(azimuthAngleImage -
                                         180) < orthogonalThreshold
        azimuthAngleImage[orthogonalAzimuthMask_1] = 0
        azimuthAngleImage[orthogonalAzimuthMask_2] = 180
        altitudeAngleImage[orthogonalAzimuthMask_1 +
                           orthogonalAzimuthMask_2] = 0

        orthogonalAltitudeMask_1 = (
            (altitudeAngleImage - 0) < orthogonalThreshold) + (
                (360 - altitudeAngleImage) < orthogonalThreshold)
        orthogonalAltitudeMask_2 = np.abs(altitudeAngleImage -
                                          180) < orthogonalThreshold
        altitudeAngleImage[orthogonalAltitudeMask_1] = 0
        altitudeAngleImage[orthogonalAltitudeMask_2] = 180
        azimuthAngleImage[orthogonalAltitudeMask_1 +
                          orthogonalAltitudeMask_2] = 0

        azimuthAngleImage[invalidMask] = 360
        altitudeAngleImage[invalidMask] = 360

        sampleRatio = 5
        depths = np.array(PIL.Image.open(depthFilename)).astype(
            np.float) / 1000
        focalLength = 517.97
        urange = np.arange(self.width).reshape(1, -1).repeat(
            self.height, 0) - self.width * 0.5
        vrange = np.arange(self.height).reshape(-1, 1).repeat(
            self.width, 1) - self.height * 0.5
        X = depths / focalLength * urange
        Y = depths
        Z = -depths / focalLength * vrange
        d = -(normals[:, :, 0] * X + normals[:, :, 1] * Y +
              normals[:, :, 2] * Z)
        dImage = np.round(d / (10. / 360) / sampleRatio).astype(
            np.int) * sampleRatio
        dImage[dImage < 0] = 0
        dImage[dImage > 360] = 360
        dImage[invalidMask] = 360

        valueMaps = [azimuthAngleImage, altitudeAngleImage, dImage]
        planes = []
        values_1, counts_1 = np.unique(valueMaps[0], return_counts=True)

        for index_1, value_1 in enumerate(values_1):
            if counts_1[index_1] < self.planeAreaThreshold or value_1 == 360:
                continue
            mask_1 = valueMaps[0] == value_1

            values_2, counts_2 = np.unique(valueMaps[1][mask_1],
                                           return_counts=True)
            for index_2, value_2 in enumerate(values_2):
                if counts_2[
                        index_2] < self.planeAreaThreshold or value_2 == 360:
                    continue
                mask_2 = mask_1 * (valueMaps[1] == value_2)
                values_3, counts_3 = np.unique(valueMaps[2][mask_2],
                                               return_counts=True)
                for index_3, value_3 in enumerate(values_3):
                    if counts_3[
                            index_3] < self.planeAreaThreshold or value_3 == 360:
                        continue
                    mask_3 = mask_2 * (valueMaps[2] == value_3)
                    mask_3 = ndimage.binary_erosion(mask_3).astype(
                        mask_3.dtype)
                    if mask_3.sum() < self.planeAreaThreshold:
                        continue

                    normal = np.array([
                        normals[:, :, 0][mask_3].mean(),
                        normals[:, :, 1][mask_3].mean(),
                        normals[:, :, 2][mask_3].mean()
                    ])
                    normal /= np.linalg.norm(normal, 2)
                    dPlane = (-(normal[0] * X + normal[1] * Y + normal[2] * Z)
                              )[mask_3].mean()
                    planes.append(((-normal[0] * dPlane, -normal[1] * dPlane,
                                    -normal[2] * dPlane), mask_3))
                    continue
                continue
            continue

        if False:
            planeImage = np.zeros((self.height, self.width, 3))
            for plane in planes:
                mask = plane[1]
                for c in xrange(3):
                    planeImage[:, :, c][mask] = random.randint(0, 255)
                    #planeImage[:, :, c][mask] = max(min(round((plane[0][c] + 1) / 2 * 255), 255), 0)
                    continue
                continue
            PIL.Image.fromarray(planeImage.astype(
                np.uint8)).save('test/plane.png')
            exit(1)

        #planes = [planes[0]]
        residualPlanes = []
        if True:
            for plane in planes:
                mask = skimage.measure.block_reduce(plane[1], (32, 32),
                                                    np.mean).reshape(-1)
                residualPlanes.append(np.append(plane[0], mask))
                continue
            pass
        elif useGlobal:
            residualPlaneMap = {}
            for plane in planes:
                planeParameters = np.array(plane[0])
                predefinedPlanes = self.predefinedPlanes
                diff = planeParameters.reshape(1, 3).repeat(
                    predefinedPlanes.shape[0], 0) - predefinedPlanes
                diffSum = np.linalg.norm(diff, 2, 1)
                #diffSum = np.abs(diff[:, 1])
                planeIndex = np.argmin(diffSum)

                planeArea = plane[1].sum()
                if planeIndex not in residualPlaneMap or planeArea > residualPlaneMap[
                        planeIndex][1]:
                    residualPlaneMap[planeIndex] = (diff[planeIndex].tolist(),
                                                    planeArea)
                    pass
                continue
            for planeIndex, residualPlane in residualPlaneMap.items():
                residualPlanes.append([
                    planeIndex,
                ] + residualPlane[0])
                continue
            pass
        else:
            for plane in planes:
                planeParameters = np.array(plane[0])
                mask = plane[1]
                for cell in xrange(self.width * self.width /
                                   (self.stride * self.stride)):
                    gridX = int(cell) % (self.width / self.stride)
                    gridY = int(cell) / (self.width / self.stride)
                    intersection = mask[gridY * self.stride:(gridY + 1) *
                                        self.stride,
                                        gridX * self.stride:(gridX + 1) *
                                        self.stride].sum()
                    if intersection > self.positivePlaneThreshold:
                        predefinedPlanes = self.predefinedPlanes
                        diff = planeParameters.reshape(1, 3).repeat(
                            predefinedPlanes.shape[0], 0) - predefinedPlanes
                        diffSum = np.linalg.norm(diff, 2, 1)
                        #diffSum = np.abs(diff[:, 1])
                        planeIndex = np.argmin(diffSum)
                        index = cell * self.numClusters + planeIndex
                        residualPlanes.append([
                            index,
                        ] + diff[planeIndex].tolist())
                        pass
                    continue
                continue
            pass

        residualPlanes = np.array(residualPlanes)
        #planeFilename = normalFilename.replace('norm_camera.png', 'plane_global.npy')
        np.save(planeFilename, residualPlanes)
        self.planeFilenames.append(planeFilename)

        if False:
            invalidMask += Y > 10
            X[invalidMask] = 0
            Y[invalidMask] = 3
            Z[invalidMask] = 0

            planeParametersArray = []
            for plane in planes:
                planeParametersArray.append(plane[0])
                continue
            planeParametersArray = np.array(planeParametersArray)
            planeNormals = copy.deepcopy(planeParametersArray)
            planeD = np.linalg.norm(planeNormals, 2, 1)
            for c in xrange(3):
                planeNormals[:, c] /= planeD
                continue

            normalXYZ = np.array([
                urange / focalLength,
                np.ones(urange.shape), -vrange / focalLength
            ])

            normalXYZ = np.dot(normalXYZ.transpose([1, 2, 0]),
                               planeNormals.transpose())
            normalXYZ = np.reciprocal(normalXYZ)

            XYZ = np.array([X, Y, Z])
            planeXYZ = np.zeros(XYZ.shape)
            for i in xrange(planeParametersArray.shape[0]):

                mask = planes[i][1]
                planeY = normalXYZ[:, :, i] * planeD[i]
                planeX = planeY * urange / focalLength
                planeZ = -planeY * vrange / focalLength

                planeXYZ[0][mask] = planeX[mask]
                planeXYZ[1][mask] = planeY[mask]
                planeXYZ[2][mask] = planeZ[mask]
                continue

            for c in xrange(3):
                inputImage = XYZ[c]
                cMin = inputImage.min()
                cMax = inputImage.max()
                PIL.Image.fromarray(
                    ((inputImage - cMin) / (cMax - cMin) * 255).astype(
                        np.uint8)).save('test/' + str(c) + '.png')
                reconstructed = planeXYZ[c]
                PIL.Image.fromarray(
                    ((reconstructed - cMin) / (cMax - cMin) * 255).astype(
                        np.uint8)).save('test/' + str(c) +
                                        '_reconstructed.png')
                continue

            planeImage = np.zeros((self.height, self.width, 3))
            for plane in planes:
                mask = plane[1]
                for c in xrange(3):
                    planeImage[:, :, c][mask] = random.randint(0, 255)
                    #planeImage[:, :, c][mask] = max(min(round((plane[0][c] + 1) / 2 * 255), 255), 0)
                    continue
                continue
            PIL.Image.fromarray(planeImage.astype(
                np.uint8)).save('test/plane.png')
            exit(1)
            pass

        return
Example #37
0
    def generatePlaneGroundTruthFitting(self,
                                        normalFilename,
                                        maskFilename,
                                        depthFilename,
                                        useGlobal=True):
        normals = np.array(PIL.Image.open(normalFilename)).astype(
            np.float) / 255 * 2 - 1

        height = self.height
        width = self.width

        norm = np.linalg.norm(normals, 2, 2)
        for c in xrange(3):
            normals[:, :, c] /= norm
            continue

        depths = np.array(PIL.Image.open(depthFilename)).astype(
            np.float) / 1000
        focalLength = 517.97
        urange = np.arange(width).reshape(1, -1).repeat(height,
                                                        0) - width * 0.5
        vrange = np.arange(height).reshape(-1, 1).repeat(width,
                                                         1) - height * 0.5
        X = depths / focalLength * urange
        Y = depths
        Z = -depths / focalLength * vrange

        invalidMask = (np.array(PIL.Image.open(maskFilename)) < 128)
        invalidMask += depths > 10

        #if outputFolder != None:
        #XYZ = np.array([X, Y, Z])
        diffNormals = np.ones((height, width)) * (-1)
        segmentationNormals = np.zeros((height, width))

        diffDepths = np.ones(depths.shape) * 1000000
        segmentationDepths = np.zeros((height, width))

        diffD = np.ones((height, width)) * 1000000
        segmentationD = np.zeros((height, width))

        ranges = np.array([
            urange / focalLength,
            np.ones(urange.shape), -vrange / focalLength
        ]).transpose([1, 2, 0])

        predefinedPlanes = self.predefinedPlanes
        for planeIndex, plane in enumerate(predefinedPlanes):
            planeD = np.linalg.norm(plane)
            planeNormal = -plane / planeD

            mask = np.dot(normals, planeNormal) > diffNormals
            diffNormals[mask] = np.dot(normals, planeNormal)[mask]
            segmentationNormals[mask] = planeIndex

            normalXYZ = np.dot(ranges, planeNormal)
            normalXYZ = np.reciprocal(normalXYZ)
            planeY = -normalXYZ * planeD

            mask = pow(planeY - depths, 2) < diffDepths
            diffDepths[mask] = pow(planeY - depths, 2)[mask]
            segmentationDepths[mask] = planeIndex

            D = pow(
                X * planeNormal[0] + Y * planeNormal[1] + Z * planeNormal[2] +
                planeD, 2)
            mask = D < diffD
            diffD[mask] = D[mask]
            segmentationD[mask] = planeIndex
            continue

        segmentation = segmentationD

        residualPlanes = []
        segmentationImage = np.zeros((self.height, self.width, 3))
        for clusterIndex in xrange(self.numClusters):
            mask = segmentation == clusterIndex
            mask = ndimage.binary_erosion(mask).astype(mask.dtype)
            if mask.sum() < self.planeAreaThreshold:
                continue
            normal = np.array([
                normals[:, :, 0][mask].mean(), normals[:, :, 1][mask].mean(),
                normals[:, :, 2][mask].mean()
            ])
            normal /= np.linalg.norm(normal, 2)
            dPlane = (
                -(normal[0] * X + normal[1] * Y + normal[2] * Z))[mask].mean()
            predefinedPlane = predefinedPlanes[clusterIndex]
            residualPlanes.append(
                (clusterIndex, -normal[0] * dPlane - predefinedPlane[0],
                 -normal[1] * dPlane - predefinedPlane[1],
                 -normal[2] * dPlane - predefinedPlane[2]))
            segmentationImage[mask] = np.random.randint(255, size=(3, ))
            continue
        PIL.Image.fromarray(segmentationImage.astype(
            np.uint8)).save('test/segmentation.png')
        exit(1)
        planeFilename = normalFilename.replace('norm_camera.png',
                                               'plane_global.npy')
        np.save(planeFilename, residualPlanes)

        return
Example #38
0
def csc(x):
    return np.reciprocal(sin(x))
Example #39
0
    def __next__(self):
        # If in the fixed-subvolumes-per-epoch mode and completed, yield fake
        # data quickly.
        if all(self.fake_mask):
            inputs = collections.OrderedDict({
                'image_input':
                np.repeat(pad_dims(self.fake_block['image']),
                          CONFIG.training.num_gpus,
                          axis=0),
                'mask_input':
                np.repeat(pad_dims(self.fake_block['mask']),
                          CONFIG.training.num_gpus,
                          axis=0)
            })
            inputs['kludge'] = self.kludge
            outputs = np.repeat(pad_dims(self.fake_block['target']),
                                CONFIG.training.num_gpus,
                                axis=0)
            return (inputs, outputs)

        # Before clearing last batches, reuse them to predict mask outputs
        # for move training. Add mask outputs to regions.
        active_regions = [
            n for n, region in enumerate(self.regions) if region is not None
        ]
        if active_regions and self.kludge[
                'outputs'] is not None and self.kludge['inputs'] is not None:
            for n in active_regions:
                assert np.array_equal(self.kludge['inputs'][n, :],
                                      self.batch_image_input[n, 0, 0, :, 0])
                self.regions[n].add_mask(self.kludge['outputs'][n, :, :, :, 0],
                                         self.region_pos[n])

        self.batch_image_input = [None] * self.batch_size
        batch_mask_input = [None] * self.batch_size
        batch_mask_target = [None] * self.batch_size

        for r, region in enumerate(self.regions):
            block_data = region.get_next_block(
            ) if region is not None else None
            if block_data is None:
                if self.subv_per_epoch:
                    if region is not None:
                        metric = region.prediction_metric(
                            self.subv_metric_fn,
                            threshold=self.subv_metric_threshold)
                        self.epoch_subv_metrics.append(metric)
                        self.regions[r] = None
                    if self.epoch_subvolumes >= self.subv_per_epoch:
                        block_data = self.fake_block
                        self.fake_mask[r] = True
                while block_data is None:
                    subvolume = six.next(self.subvolumes)
                    self.epoch_subvolumes += 1
                    self.f_as[r] = subvolume.f_a()

                    self.regions[r] = Region.from_subvolume(subvolume)
                    region = self.regions[r]
                    self.epoch_move_counts.append(self.move_counts[r])
                    self.move_counts[r] = 0
                    block_data = region.get_next_block()
            else:
                self.move_counts[r] += 1

            self.batch_image_input[r] = pad_dims(block_data['image'])
            batch_mask_input[r] = pad_dims(block_data['mask'])
            batch_mask_target[r] = pad_dims(block_data['target'])
            self.region_pos[r] = block_data['position']

        self.batch_image_input = np.concatenate(self.batch_image_input)
        batch_mask_input = np.concatenate(batch_mask_input)
        batch_mask_target = np.concatenate(batch_mask_target)

        inputs = collections.OrderedDict({
            'image_input': self.batch_image_input,
            'mask_input': batch_mask_input
        })
        inputs['kludge'] = self.kludge
        # These inputs are only necessary for assurance the correct FOV is updated.
        self.kludge['inputs'] = self.batch_image_input[:, 0, 0, :, 0].copy()
        self.kludge['outputs'] = None

        if self.subv_per_epoch and self.fake_block is None:
            self.fake_block = copy.deepcopy(block_data)

        if self.f_a_bins is None:
            return (inputs, [batch_mask_target])
        else:
            f_a_inds = np.digitize(self.f_as, self.f_a_bins) - 1
            inds, counts = np.unique(f_a_inds, return_counts=True)
            if self.f_a_init:
                self.f_a_counts[inds] += counts.astype(np.int64)
                sample_weights = np.ones(self.f_as.size, dtype=np.float64)
            else:
                sample_weights = np.reciprocal(self.f_a_counts[f_a_inds],
                                               dtype=np.float64) * float(
                                                   self.f_as.size)
            return (inputs, [batch_mask_target], sample_weights)
fd = fd.loc[:, fd.columns != 'Special']
#fd=fd.loc[:,fd.columns!='Weak Foot']
#fd=fd.loc[:,fd.columns!='Skill Moves']
#fd=fd.loc[:,fd.columns!='attack_Work_Rate']
#fd=fd.loc[:,fd.columns!='defense_workrate']
fd_id = fd.iloc[:, 0]
fd = fd.iloc[:, 1:]
fd_position = fd.iloc[:, 35]
fd = fd.iloc[:, :35]

#X=fd.iloc[:,0:30]
#Y=fd.iloc[:,30]
fd.iloc[:, 0] = pd.to_numeric(fd.iloc[:, 0], downcast='float')
# taking out age
#fd=fd.iloc[:,1:]
fd.iloc[:, 0] = np.reciprocal(fd.iloc[:, 0])

# not needed as of now
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
labelencoder_fd = LabelEncoder()
fd.iloc[:, 5] = labelencoder_fd.fit_transform(fd.iloc[:, 5])
fd.iloc[:, 4] = labelencoder_fd.fit_transform(fd.iloc[:, 4])

#X_id=X.iloc[:,0]
#X=X.iloc[:,1:]

from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
fd = sc.fit_transform(fd)

# PCA
Example #41
0
def cot(x):
    return np.reciprocal(tan(x))
Example #42
0
def sec(x):
    return np.reciprocal(cos(x))
        if formulation.fields != "mechanics" and formulation.fields != "electro_mechanics":
            raise NotImplementedError("Explicit solver for {} is not available".format(formulation.fields))

        # GET BOUNDARY CONDITIONS INFROMATION
<<<<<<< HEAD
        self.GetBoundaryInfo(mesh, formulation,boundary_condition)
=======
        self.GetBoundaryInfo(mesh, formulation, boundary_condition)
>>>>>>> upstream/master

        # COMPUTE INVERSE OF LUMPED MASS MATRIX
        if formulation.fields == "electro_mechanics":
            if fem_solver.mass_type == "lumped":
                M = M.ravel()
                invM = np.zeros_like(M)
                invM[self.mechanical_dofs] = np.reciprocal(M[self.mechanical_dofs])
                # M_mech = M[self.mechanical_dofs]
                M_mech = M[self.mechanical_dofs]
            else:
                M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
        else:
            if fem_solver.mass_type == "lumped":
                M = M.ravel()
                M_mech = M
                invM = np.reciprocal(M)
            else:
                M_mech = M

        # COMPUTE DAMPING MATRIX BASED ON MASS
        if fem_solver.include_physical_damping:
            raise NotImplementedError("Damping is not included in the explicit solver")
 def get_inverse(a):
     inverse = np.reciprocal(a)
     return inverse
Example #45
0
def evaluatePlanes(planes, filename, outputFolder=None):
    normalFilename = filename.replace('mlt', 'norm_camera')
    normals = np.array(PIL.Image.open(normalFilename)).astype(
        np.float) / 255 * 2 - 1
    height = normals.shape[0]
    width = normals.shape[1]
    norm = np.linalg.norm(normals, 2, 2)
    for c in xrange(3):
        normals[:, :, c] /= norm
        continue

    depthFilename = filename.replace('mlt', 'depth')
    depths = np.array(PIL.Image.open(depthFilename)).astype(np.float) / 1000
    focalLength = 517.97
    urange = np.arange(width).reshape(1, -1).repeat(height, 0) - width * 0.5
    vrange = np.arange(height).reshape(-1, 1).repeat(width, 1) - height * 0.5
    X = depths / focalLength * urange
    Y = depths
    Z = -depths / focalLength * vrange
    d = -(normals[:, :, 0] * X + normals[:, :, 1] * Y + normals[:, :, 2] * Z)

    maskFilename = filename.replace('mlt', 'valid')
    invalidMask = (np.array(PIL.Image.open(maskFilename)) < 128)
    invalidMask += depths > 10

    #if outputFolder != None:
    #XYZ = np.array([X, Y, Z])
    reconstructedNormals = np.zeros(normals.shape)
    diffNormals = np.ones((height, width)) * (-1)
    segmentationNormals = np.zeros((height, width, 3))
    reconstructedDepths = np.zeros(depths.shape)
    diffDepths = np.ones(depths.shape) * 1000000
    segmentationDepths = np.zeros((height, width, 3))

    ranges = np.array(
        [urange / focalLength,
         np.ones(urange.shape), -vrange / focalLength]).transpose([1, 2, 0])

    for planeIndex, plane in enumerate(planes):
        planeD = np.linalg.norm(plane)
        planeNormal = -plane / planeD

        mask = np.dot(normals, planeNormal) > diffNormals
        reconstructedNormals[mask] = planeNormal
        diffNormals[mask] = np.dot(normals, planeNormal)[mask]
        segmentationNormals[mask] = np.random.randint(255, size=(3, ))

        normalXYZ = np.dot(ranges, planeNormal)
        normalXYZ = np.reciprocal(normalXYZ)

        planeY = -normalXYZ * planeD

        #if planeNormal[2] > 0.9:
        #print(planeD)
        #print(planeNormal)
        # minDepth = depths.min()
        # maxDepth = depths.max()
        # print(depths[300][300])
        # print(planeY[300][300])
        # print(depths[350][350])
        # print(planeY[350][350])
        # PIL.Image.fromarray((np.maximum(np.minimum((planeY - minDepth) / (maxDepth - minDepth), 1), 0) * 255).astype(np.uint8)).save(outputFolder + '/plane.png')
        # exit(1)
        #pass
        if planeIndex in [1, 4]:
            print(planeY[113][251])
            continue
        mask = pow(planeY - Y, 2) < diffDepths
        reconstructedDepths[mask] = planeY[mask]
        diffDepths[mask] = pow(planeY - Y, 2)[mask]
        segmentationDepths[mask] = np.random.randint(255, size=(3, ))
        continue

    if outputFolder != None:
        depths[invalidMask] = 0
        normals[invalidMask] = 0
        reconstructedDepths[invalidMask] = 0
        reconstructedNormals[invalidMask] = 0
        minDepth = depths.min()
        maxDepth = depths.max()
        print(minDepth)
        print(maxDepth)
        PIL.Image.fromarray(
            ((depths - minDepth) / (maxDepth - minDepth) * 255).astype(
                np.uint8)).save(outputFolder + '/depth.png')
        PIL.Image.fromarray((np.maximum(
            np.minimum(
                (reconstructedDepths - minDepth) /
                (maxDepth - minDepth), 1), 0) * 255).astype(
                    np.uint8)).save(outputFolder + '/depth_reconstructed.png')
        PIL.Image.fromarray(segmentationDepths.astype(
            np.uint8)).save(outputFolder + '/depth_segmentation.png')
        PIL.Image.fromarray(((normals + 1) / 2 * 255).astype(
            np.uint8)).save(outputFolder + '/normal.png')
        PIL.Image.fromarray(((reconstructedNormals + 1) / 2 * 255).astype(
            np.uint8)).save(outputFolder + '/normal_reconstructed.png')
        PIL.Image.fromarray(segmentationNormals.astype(
            np.uint8)).save(outputFolder + '/normal_segmentation.png')
        depthImage = ((depths - minDepth) / (maxDepth - minDepth) *
                      255).astype(np.uint8)
        #PIL.Image.fromarray((invalidMask * 255).astype(np.uint8)).save(outputFolder + '/mask.png')
        exit(1)
        pass
    return diffDepths.mean(), diffNormals.mean()
Example #46
0
def bisquareScaleWeights(r, k):
    #r = r/k
    tmp1 = 3 - 3 * np.power(r, 2) + np.power(r, 4)
    tmp2 = np.reciprocal(np.power(r, 2))
    return np.minimum(tmp1, tmp2)
Example #47
0
    def evaluatePlaneGroundTruth(self, normalFilename, maskFilename,
                                 depthFilename, planeFilename):

        normals = np.array(PIL.Image.open(normalFilename)).astype(
            np.float) / 255 * 2 - 1
        norm = np.linalg.norm(normals, 2, 2)
        for c in xrange(3):
            normals[:, :, c] /= norm
            continue

        invalidMask = (np.array(PIL.Image.open(maskFilename)) < 128)

        depths = np.array(PIL.Image.open(depthFilename)).astype(
            np.float) / 1000
        focalLength = 517.97
        urange = np.arange(self.width).reshape(1, -1).repeat(
            self.height, 0) - self.width * 0.5
        vrange = np.arange(self.height).reshape(-1, 1).repeat(
            self.width, 1) - self.height * 0.5
        X = depths / focalLength * urange
        Y = depths
        Z = -depths / focalLength * vrange
        #d = -(normals[:, :, 0] * X + normals[:, :, 1] * Y + normals[:, :, 2] * Z)

        residualPlanes = np.load(planeFilename)

        XYZ = np.array([X, Y, Z])
        planesXYZ = np.zeros(XYZ.shape)
        diffImage = np.ones(XYZ.shape) * 10000
        ranges = np.array([
            urange / focalLength,
            np.ones(urange.shape), -vrange / focalLength
        ]).transpose([1, 2, 0])

        for residualPlane in residualPlanes:
            #residualPlane[1:] = 0
            gridIndex = int(residualPlane[0]) / self.numClusters
            planeIndex = int(residualPlane[0]) % self.numClusters
            plane = self.predefinedPlanes[planeIndex] + residualPlane[1:]
            #print(plane)
            planeD = np.linalg.norm(plane)
            planeNormal = plane / planeD

            normalXYZ = np.dot(ranges, planeNormal)
            normalXYZ = np.reciprocal(normalXYZ)

            planeY = normalXYZ * planeD
            planeX = planeY * urange / focalLength
            planeZ = -planeY * vrange / focalLength

            planeXYZ = [planeX, planeY, planeZ]
            for c in xrange(3):
                mask = np.abs(planeXYZ[c] - XYZ[c]) < diffImage[c]
                planesXYZ[c][mask] = planeXYZ[c][mask]
                diffImage[c][mask] = np.abs(planeXYZ[c] - XYZ[c])[mask]
                continue
            continue

        for c in xrange(3):
            inputImage = XYZ[c]
            cMin = inputImage.min()
            cMax = inputImage.max()
            PIL.Image.fromarray(
                ((inputImage - cMin) / (cMax - cMin) * 255).astype(
                    np.uint8)).save('test/' + str(c) + '.png')
            reconstructed = planesXYZ[c]
            PIL.Image.fromarray(
                ((reconstructed - cMin) / (cMax - cMin) * 255).astype(
                    np.uint8)).save('test/' + str(c) + '_reconstructed.png')
            continue

        return
Example #48
0
import numpy as np

print('numpy 算数运算')

a = np.arange(9, dtype=np.float_).reshape(3,3)
print(a)
b = np.array([10, 10, 10])
print(b)
print(np.add(a, b))
print(np.subtract(a, b))
print(np.multiply(a, b))
print(np.divide(a, b))

a = np.array([0.25, 1.33, 1, 0, 100])
print(np.reciprocal(a))

print('pytho处理整数除法的方式,对于绝对值大于1的整数元素')
print('始终位0')
b = np.array([100], dtype=int)
print(np.reciprocal(b))

a = np.array([10, 100, 1000])
print(np.power(a, 2))

b = np.array([1,2,3])
print(np.power(a, b))

a = np.array([10, 20, 30])
b = np.array([3, 5, 7])
print(np.mod(a, b))
print(np.remainder(a, b))
Example #49
0
    def addPlaneInfo(self, normalFilename, maskFilename, depthFilename):
        normals = np.array(PIL.Image.open(normalFilename)).astype(
            np.float) / 255 * 2 - 1
        norm = np.linalg.norm(normals, 2, 2)
        for c in xrange(3):
            normals[:, :, c] /= norm
            continue

        invalidMask = (np.array(PIL.Image.open(maskFilename)) < 128)

        sampleRatio = 3
        azimuthAngleImage = (-np.round(
            np.rad2deg(np.arctan2(normals[:, :, 1], normals[:, :, 0])) /
            sampleRatio).astype(np.int) * sampleRatio + 360) % 360

        altitudeAngleImage = (np.round(
            np.rad2deg(
                np.arctan2(
                    np.sign(-normals[:, :, 1]) * np.linalg.norm(
                        normals[:, :, :2], 2, 2), normals[:, :, 2])) /
            sampleRatio).astype(np.int) * sampleRatio + 360) % 360

        orthogonalThreshold = 5
        orthogonalAzimuthMask_1 = (
            (azimuthAngleImage - 0) < orthogonalThreshold) + (
                (360 - azimuthAngleImage) < orthogonalThreshold)
        orthogonalAzimuthMask_2 = np.abs(azimuthAngleImage -
                                         180) < orthogonalThreshold
        azimuthAngleImage[orthogonalAzimuthMask_1] = 0
        azimuthAngleImage[orthogonalAzimuthMask_2] = 180
        altitudeAngleImage[orthogonalAzimuthMask_1 +
                           orthogonalAzimuthMask_2] = 0

        orthogonalAltitudeMask_1 = (
            (altitudeAngleImage - 0) < orthogonalThreshold) + (
                (360 - altitudeAngleImage) < orthogonalThreshold)
        orthogonalAltitudeMask_2 = np.abs(altitudeAngleImage -
                                          180) < orthogonalThreshold
        altitudeAngleImage[orthogonalAltitudeMask_1] = 0
        altitudeAngleImage[orthogonalAltitudeMask_2] = 180
        azimuthAngleImage[orthogonalAltitudeMask_1 +
                          orthogonalAltitudeMask_2] = 0

        azimuthAngleImage[invalidMask] = 360
        altitudeAngleImage[invalidMask] = 360

        sampleRatio = 5
        depths = np.array(PIL.Image.open(depthFilename)).astype(
            np.float) / 1000
        focalLength = 517.97
        urange = np.arange(self.width).reshape(1, -1).repeat(
            self.height, 0) - self.width * 0.5
        vrange = np.arange(self.height).reshape(-1, 1).repeat(
            self.width, 1) - self.height * 0.5
        X = depths / focalLength * urange
        Y = depths
        Z = -depths / focalLength * vrange
        d = -(normals[:, :, 0] * X + normals[:, :, 1] * Y +
              normals[:, :, 2] * Z)
        dImage = np.round(d / (10. / 360) / sampleRatio).astype(
            np.int) * sampleRatio
        dImage[dImage < 0] = 0
        dImage[dImage > 360] = 360
        dImage[invalidMask] = 360

        valueMaps = [azimuthAngleImage, altitudeAngleImage, dImage]
        planes = []
        values_1, counts_1 = np.unique(valueMaps[0], return_counts=True)

        self.mask = np.zeros((self.height, self.width)) == 1

        for index_1, value_1 in enumerate(values_1):
            if counts_1[index_1] < self.planeAreaThreshold or value_1 == 360:
                continue
            mask_1 = valueMaps[0] == value_1

            values_2, counts_2 = np.unique(valueMaps[1][mask_1],
                                           return_counts=True)
            for index_2, value_2 in enumerate(values_2):
                if counts_2[
                        index_2] < self.planeAreaThreshold or value_2 == 360:
                    continue
                mask_2 = mask_1 * (valueMaps[1] == value_2)
                values_3, counts_3 = np.unique(valueMaps[2][mask_2],
                                               return_counts=True)
                for index_3, value_3 in enumerate(values_3):
                    if counts_3[
                            index_3] < self.planeAreaThreshold or value_3 == 360:
                        continue
                    mask_3 = mask_2 * (valueMaps[2] == value_3)
                    mask_3 = ndimage.binary_erosion(mask_3).astype(
                        mask_3.dtype)
                    if mask_3.sum() < self.planeAreaThreshold:
                        continue

                    normal = np.array([
                        normals[:, :, 0][mask_3].mean(),
                        normals[:, :, 1][mask_3].mean(),
                        normals[:, :, 2][mask_3].mean()
                    ])
                    normal /= np.linalg.norm(normal, 2)
                    dPlane = (-(normal[0] * X + normal[1] * Y + normal[2] * Z)
                              )[mask_3].mean()

                    self.mask += mask_3

                    azimuth = np.arctan2(-normal[1], normal[0])
                    altitude = np.arctan2(
                        np.sign(-normal[1]) * np.linalg.norm(normal[:2]),
                        normal[2])
                    #planes.append(((azimuth, altitude, dPlane), mask_3))
                    planes.append(((-normal[0] * dPlane, -normal[1] * dPlane,
                                    -normal[2] * dPlane), mask_3))

                    # azimuthAngleImage = np.arctan2(-normals[:, :, 1], normals[:, :, 0])
                    # altitudeAngleImage = np.arctan2(np.sign(-normals[:, :, 1]) * np.linalg.norm(normals[:, :, :2], 2, 2), normals[:, :, 2])

                    # dImage = -(normals[:, :, 0] * X + normals[:, :, 1] * Y + normals[:, :, 2] * Z)
                    # dImage *= self.depthScaleFactor

                    #PIL.Image.fromarray(mask_1.astype(np.uint8) * 255).save('test/mask_1.png')
                    #PIL.Image.fromarray(mask_2.astype(np.uint8) * 255).save('test/mask_2.png')
                    #PIL.Image.fromarray(mask_3.astype(np.uint8) * 255).save('test/mask_3.png')
                    continue
                continue
            continue

        for plane in planes:
            planeParameters = plane[0]
            self.planeParametersArray.append(planeParameters)
            continue

        if False:
            self.planeParametersArray = np.array(self.planeParametersArray)
            planeNormals = copy.deepcopy(self.planeParametersArray)
            planeD = np.linalg.norm(planeNormals, 2, 1)
            for c in xrange(3):
                planeNormals[:, c] /= planeD
                continue

            normalXYZ = np.array([
                urange / focalLength,
                np.ones(urange.shape), -vrange / focalLength
            ])

            normalXYZ = np.dot(normalXYZ.transpose([1, 2, 0]),
                               planeNormals.transpose())
            normalXYZ = np.reciprocal(normalXYZ)

            XYZ = np.array([X, Y, Z])
            planeXYZ = np.zeros(XYZ.shape)
            for i in xrange(self.planeParametersArray.shape[0]):

                mask = planes[i][1]
                planeY = normalXYZ[:, :, i] * planeD[i]
                planeX = planeY * urange / focalLength
                planeZ = -planeY * vrange / focalLength

                planeXYZ[0][mask] = planeX[mask]
                planeXYZ[1][mask] = planeY[mask]
                planeXYZ[2][mask] = planeZ[mask]
                continue

            for c in xrange(3):
                inputImage = XYZ[c]
                cMin = inputImage.min()
                cMax = inputImage.max()
                PIL.Image.fromarray(
                    ((inputImage - cMin) / (cMax - cMin) * 255).astype(
                        np.uint8)).save('test/' + str(c) + '.png')
                reconstructed = planeXYZ[c]
                PIL.Image.fromarray(
                    ((reconstructed - cMin) / (cMax - cMin) * 255).astype(
                        np.uint8)).save('test/' + str(c) +
                                        '_reconstructed.png')
                continue

            planeImage = np.zeros((self.height, self.width, 3))
            for plane in planes:
                mask = plane[1]
                for c in xrange(3):
                    planeImage[:, :, c][mask] = random.randint(0, 255)
                    #planeImage[:, :, c][mask] = max(min(round((plane[0][c] + 1) / 2 * 255), 255), 0)
                    continue
                continue
            PIL.Image.fromarray(planeImage.astype(
                np.uint8)).save('test/plane.png')
            exit(1)
            pass
Example #50
0
def chatterjeeMachlerMod(A, y, **kwargs):
    # using the weights in chaterjeeMachler means that min resids val in median(resids)
    # instead, use M estimate weights with a modified residual which includes a measure of leverage
    # for this, use residuals / (1-p)^2
    # I wonder if this will have a divide by zero bug

    # now calculate p and n
    n = A.shape[0]
    p = A.shape[1]
    pnRatio = 1.0 * p / n

    # calculate the projection matrix
    q, r = linalg.qr(A)
    Pdiag = np.empty(shape=(n), dtype="float")
    for i in xrange(0, n):
        Pdiag[i] = np.absolute(np.sum(q[i, :] * np.conjugate(q[i, :]))).real
    del q, r
    Pdiag = Pdiag / (np.max(Pdiag) + 0.0000000001)
    locP = np.median(Pdiag)
    scaleP = sampleMAD(Pdiag)
    # bound = locP + 6*scaleP
    bound = locP + 6 * scaleP
    indices = np.where(Pdiag > bound)
    Pdiag[indices] = 0.99999
    leverageMeas = np.power(1.0 - Pdiag, 2)

    # weights for the first iteration
    # this is purely based on the leverage
    tmp = np.ones(shape=(n), dtype="float") * pnRatio
    tmp = np.maximum(Pdiag, tmp)
    weights = np.reciprocal(tmp)

    # get options
    options = parseKeywords(kwargs)
    #generalPrint("S-Estimate", "Using weight function = {}".format(weightFnc))
    if options["intercept"] == True:
        # add column of ones for constant term
        A = np.hstack((np.ones(shape=(A.shape[0], 1), dtype="complex"), A))

    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = weightLS(A, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew, ynew)
        residsNew = y - np.dot(A, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights
        residsNew = residsNew / leverageMeas
        scale = sampleMAD0(residsNew)

        # standardise and calculate weights
        residsNew = residsNew / scale
        weightsNew = getRobustLocationWeights(residsNew, "huber")
        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        if iteration > 1:
            # check to see whether the change is smaller than the tolerance
            changeResids = linalg.norm(residsNew -
                                       resids) / linalg.norm(residsNew)
            if changeResids < eps():
                # update resids
                resids = residsNew
                break
        # update resids
        resids = residsNew

    # now do the same again, but with a different function
    # do the least squares solution
    params, resids, squareResid, rank, s = olsModel(A, y)
    resids = resids / leverageMeas
    resids = resids / scale
    weights = getRobustLocationWeights(resids, "trimmedMean")
    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = weightLS(A, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew, ynew)
        residsNew = y - np.dot(A, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights

        residsNew = residsNew / leverageMeas
        scale = sampleMAD0(residsNew)

        # standardise and calculate weights
        residsNew = residsNew / scale
        weightsNew = getRobustLocationWeights(residsNew, options["weights"])
        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        # check to see whether the change is smaller than the tolerance
        changeResids = linalg.norm(residsNew - resids) / linalg.norm(residsNew)
        if changeResids < eps():
            # update resids
            resids = residsNew
            break
        # update resids
        resids = residsNew

    # at the end, return the components
    return params, resids, weights
def compute_sensitivity_map(img):
    sensitivity_map = np.reciprocal(img)
    return sensitivity_map
Example #52
0
from GrTiPy import *

Lambda, rho, P, pi, omega = symbols(' Lambda rho P pi omega')
d = 5
t, r, theta, varphi, psi = symbols('t r theta varphi psi')

x = np.array([t, r, theta, varphi, psi])
Lambda, rho, P, pi, omega = symbols(' Lambda rho P pi omega')
a = Function('a')(t)
phi = Function('phi')(t)

g00 = 1
g11 = -a**2
g22 = -a**2 * r**2
g33 = -a**2 * r**2 * sin(theta)**2
g44 = -a**2

g = np.array([[g00, 0, 0, 0, 0], [0, g11, 0, 0, 0], [0, 0, g22, 0, 0],
              [0, 0, 0, g33, 0], [0, 0, 0, 0, g44]])

ginverse = np.array([[np.reciprocal(g00), 0, 0, 0, 0],
                     [0, np.reciprocal(g11), 0, 0, 0],
                     [0, 0, np.reciprocal(g22), 0, 0],
                     [0, 0, 0, np.reciprocal(g33), 0],
                     [0, 0, 0, 0, np.reciprocal(g44)]])

T = np.array([[-rho, 0, 0, 0, 0], [0, P, 0, 0, 0], [0, 0, P, 0, 0],
              [0, 0, 0, P, 0], [0, 0, 0, 0, P]])

print(Scalar_Brans_Dicke_field_Equation(d, x, g, ginverse, phi, T))
Example #53
0
 def test_reciprocal_scalar(self):
     assert np.reciprocal(4. * u.m) == 0.25 / u.m
Example #54
0
    def _compute_acq_withGradients(self, X):
        """
        Computes the acquisition function and its gradient at X.

        :param X: point at which the acquisition function is evaluated. Should be a 2d array. WARNIG: note that this
        assumes X is a single point and not multiple as in the previous functions.
        """
        X = np.atleast_2d(X)
        acqX = np.zeros((X.shape[0], 1))
        dacq_dX = np.zeros(X.shape)
        Z_samples = np.random.normal(size=5)
        for h in range(self.n_gp_hyps_samples):
            self.model.set_hyperparameters(h)
            inv_sqrt_varX = (self.model.posterior_variance(X))**(-0.5)
            inv_varX_noiseless = np.reciprocal(
                self.model.posterior_variance_noiseless(X))
            dvar_dX = self.model.posterior_variance_gradient(X)
            for n in range(X.shape[0]):
                x = np.atleast_2d(X[n, :])
                self.model.partial_precomputation_for_covariance(x)
                self.model.partial_precomputation_for_covariance_gradient(x)
                self.model.partial_precomputation_for_variance_conditioned_on_next_point(
                    x)
                for l in range(self.utility_support_cardinality):
                    for Z in Z_samples:
                        aux_sigma_tilde = inv_sqrt_varX[n, 0] * Z

                        # Inner function of the KG acquisition function.
                        def inner_func(D):
                            D = np.atleast_2d(D)
                            n_d = D.shape[0]
                            func_val = np.zeros((D.shape[0], 1))
                            cross_product_grid = np.vstack([
                                np.append(d, theta)
                                for theta in self.scenario_support for d in D
                            ])
                            mean = self.model.posterior_mean(
                                cross_product_grid)
                            var = self.model.posterior_variance_conditioned_on_next_point(
                                cross_product_grid)
                            mean += self.model.posterior_covariance_between_points_partially_precomputed(
                                cross_product_grid, x) * aux_sigma_tilde
                            for w in range(self.scenario_support_cardinality):
                                func_val[:, 0] += self.scenario_prob_dist[
                                    w] * self.expectation_utility.eval_func(
                                        mean[w * n_d:(w + 1) * n_d,
                                             0], var[w * n_d:(w + 1) * n_d, 0],
                                        self.utility_support[l])
                            return -func_val

                        # Inner function and its gradient of the KG acquisition function.
                        def inner_func_with_gradient(d):
                            d = np.atleast_2d(d)
                            func_val = np.zeros((1, 1))
                            func_gradient = np.zeros(d.shape)
                            cross_product_grid = np.vstack([
                                np.append(d, theta)
                                for theta in self.scenario_support
                            ])
                            mean = self.model.posterior_mean(
                                cross_product_grid)
                            var = self.model.posterior_variance_conditioned_on_next_point(
                                cross_product_grid)
                            mean += self.model.posterior_covariance_between_points_partially_precomputed(
                                cross_product_grid, x) * aux_sigma_tilde
                            # Gradient
                            mean_gradient = self.model.posterior_mean_gradient(
                                cross_product_grid)[:, :d.shape[1]]
                            var_gradient = self.model.posterior_variance_gradient_conditioned_on_next_point(
                                cross_product_grid)[:, :d.shape[1]]
                            mean_gradient += self.model.posterior_covariance_gradient_partially_precomputed(
                                cross_product_grid,
                                x)[:, 0, :d.shape[1]] * aux_sigma_tilde
                            for w in range(self.scenario_support_cardinality):
                                func_val[:, 0] += self.scenario_prob_dist[
                                    w] * self.expectation_utility.eval_func(
                                        mean[w, 0], var[w, 0],
                                        self.utility_support[l])
                                expectation_utility_gradient = self.expectation_utility.eval_gradient(
                                    mean[w, 0], var[w, 0],
                                    self.utility_support[l])
                                aux = np.vstack(
                                    (mean_gradient[w, :], var_gradient[w, :]))
                                func_gradient += self.scenario_prob_dist[
                                    w] * np.matmul(
                                        expectation_utility_gradient, aux)
                            return -func_val, -func_gradient

                        d_opt, opt_val = self.optimizer.optimize_inner_func(
                            f=inner_func, f_df=inner_func_with_gradient)
                        acqX[n, 0] -= self.utility_prob_dist[l] * opt_val
                        #
                        cross_product_grid = np.vstack([
                            np.append(d_opt, theta)
                            for theta in self.scenario_support
                        ])
                        cross_cov = self.model.posterior_covariance_between_points_partially_precomputed(
                            cross_product_grid, x)[:, 0]
                        dcross_cov_dx = self.model.posterior_covariance_gradient(
                            x, cross_product_grid)[0, :, :]
                        mean = self.model.posterior_mean(cross_product_grid)[:,
                                                                             0]
                        mean += cross_cov * aux_sigma_tilde
                        var = self.model.posterior_variance_conditioned_on_next_point(
                            cross_product_grid)[:, 0]
                        # mean gradient
                        tmp1 = (-0.5 * Z * inv_sqrt_varX[n, :]**3) * cross_cov
                        mean_gradient = aux_sigma_tilde * dcross_cov_dx + np.tensordot(
                            tmp1, dvar_dX[n, :], axes=0)
                        tmp2 = inv_varX_noiseless[n, 0] * cross_cov
                        var_gradient = np.multiply(
                            dcross_cov_dx.T, -2 * tmp2).T + np.tensordot(
                                np.square(tmp2), dvar_dX[n, :], axes=0)
                        marginal_acqu_grad = 0
                        for w in range(self.scenario_support_cardinality):
                            expectation_utility_gradient = self.expectation_utility.eval_gradient(
                                mean[w], var[w], self.utility_support[l])
                            aux = np.vstack(
                                (mean_gradient[w, :], var_gradient[w, :]))
                            marginal_acqu_grad += self.scenario_prob_dist[
                                w] * np.matmul(expectation_utility_gradient,
                                               aux)
                        dacq_dX[n, :] += self.utility_prob_dist[
                            l] * marginal_acqu_grad
        acqX /= (self.n_gp_hyps_samples * len(Z_samples))
        dacq_dX /= (self.n_gp_hyps_samples * len(Z_samples))
        acqX = (acqX - self.acq_mean) / self.acq_std
        dacq_dX /= self.acq_std
        return acqX, dacq_dX
Example #55
0
 def log_barrier_risk_parity_gradient(x):
     return np.multiply(2, np.dot(c_m, x)) - np.multiply(
         c, np.reciprocal(x))
Example #56
0
 def M_step(self, X):
     self.mu = np.dot(np.transpose(X), self.p_mat)
     denominator = np.sum(self.p_mat, axis=0)
     self.mu = np.multiply(self.mu, np.reciprocal(denominator))
Example #57
0
#               cut_coords=(-34, -16), display_mode="yz")

# # Save the coefficients to a nifti file
# coef_img.to_filename('haxby_tv-l1_weights.nii')

##############################################################################
# sklearn unstructured classifiers
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.base import clone
from sklearn2nilearn import SklearnEstimatorWrapper

high = decoder.alpha_grids_.max()
low = high * decoder.eps
C = np.reciprocal(np.logspace(np.log10(high), np.log10(low), decoder.n_alphas))
masker = clone(decoder.masker_)
for estimator, params_grid in zip(
    [SVC(kernel='linear'),
     RidgeClassifier(),
     LogisticRegression()],
    [dict(C=C), dict(C=C), dict(gamma=C)]):
    decoder_name = estimator.__class__.__name__.lower()
    decoder = SklearnEstimatorWrapper(GridSearchCV,
                                      masker=masker,
                                      estimator=estimator,
                                      param_grid=params_grid,
                                      n_jobs=2)
    decoder.fit(X_train, y_train)
    y_pred = decoder.predict(X_test)
Example #58
0
def chatterjeeMachler(A, y, **kwargs):
    # get options
    options = parseKeywords(kwargs)
    #generalPrint("S-Estimate", "Using weight function = {}".format(weightFnc))
    if options["intercept"] == True:
        # add column of ones for constant term
        A = np.hstack((np.ones(shape=(A.shape[0], 1), dtype="complex"), A))

    # now calculate p and n
    n = A.shape[0]
    p = A.shape[1]
    pnRatio = 1.0 * p / n

    # calculate the projection matrix
    q, r = linalg.qr(A)
    Pdiag = np.empty(shape=(n), dtype="float")
    for i in xrange(0, n):
        Pdiag[i] = np.absolute(np.sum(q[i, :] * np.conjugate(q[i, :]))).real
    del q, r
    # and save an array for later
    Pdiag = Pdiag / np.max(Pdiag)
    weightsNom = np.power(1.0 - Pdiag, 2)

    # weights for the first iteration
    tmp = np.ones(shape=(n), dtype="float") * pnRatio
    tmp = np.maximum(Pdiag, tmp)
    weights = np.reciprocal(tmp)

    # iteratively weighted least squares
    iteration = 0
    while iteration < options["maxiter"]:
        # do the weighted least-squares
        Anew, ynew = weightLS(A, y, weights)
        paramsNew, squareResidNew, rankNew, sNew = linalg.lstsq(Anew, ynew)
        residsNew = y - np.dot(A, paramsNew)
        # check residsNew to make sure not all zeros (i.e. will happen in undetermined or equally determined system)
        if np.sum(np.absolute(residsNew)) < eps():
            # then return everything here
            return paramsNew, residsNew, weights
        residsAbs = np.absolute(residsNew)
        residsMedian = np.median(residsAbs)
        # now compute the new weights
        weightsDenom = np.maximum(
            residsAbs,
            np.ones(shape=(n), dtype="float") * residsMedian)
        weightsNew = weightsNom / weightsDenom

        # increment iteration
        iteration = iteration + 1
        weights = weightsNew
        params = paramsNew

        if iteration > 1:
            # check to see whether the change is smaller than the tolerance
            changeResids = linalg.norm(residsNew -
                                       resids) / linalg.norm(residsNew)
            if changeResids < eps():
                # update resids
                resids = residsNew
                break
        # update resids
        resids = residsNew
    return params, resids, weights
Example #59
0
def mrr(gt_item, pred_items):
    if gt_item in pred_items:
        index = np.where(pred_items == (gt_item))[0][0]
        return np.reciprocal(float(index + 1))
    else:
        return 0
Example #60
0
                psi_x[:, j] = c.reshape(psi_x.shape[0])

            utl.progress('Running matrix-based MANDy',
                         50,
                         cpu_time=_time.time() - start_time)

            # compute xi in matrix format
            with utl.timer() as time:
                [u, s, v] = splin.svd(psi_x,
                                      full_matrices=False,
                                      overwrite_a=True,
                                      check_finite=False,
                                      lapack_driver='gesdd')
                del psi_x
                xi = y @ v.transpose() @ np.diag(
                    np.reciprocal(s)) @ u.transpose()
            cpu_times[0, ind] += time.elapsed / repeats
            if r == 0:
                rel_errors[0, ind] = np.linalg.norm(
                    xi.transpose() -
                    xi_exact_mat) / np.linalg.norm(xi_exact_mat)
            del xi, u, s, v
            utl.progress('Running matrix-based MANDy',
                         100,
                         cpu_time=_time.time() - start_time)
            print('   CPU time      : ' + str("%.2f" % time.elapsed) + 's')
            print('   relative error: ' + str("%.2e" % rel_errors[0, ind]))

        else:

            # extrapolate cpu times of matrix approach