예제 #1
0
def get_u(x,t):
	# input

	print(param.get('controller'))

	if param.get('controller') is 'empty':
		u = np.zeros( [param.get('m'),1])

	elif param.get('controller') is 'fdbk': 
		u = get_fdbk_controller( x,t)

	elif param.get('controller') is 'clf':
		u = get_clf_controller( x,t)

	elif param.get('controller') is 'scp':
		start_idx = np.where( param.get('T') == t)[0][0]
		end_idx = param.get('nt')-1
		T = param.get('T')[start_idx:end_idx]
		u = get_scp_clf_controller( x, T)

	elif param.get('controller') is 'mpc':
		start_idx = np.where( param.get('T') == t)[0][0]
		end_idx = np.min( (start_idx + param.get('mpc_horizon'), param.get('nt')-1))
		T = param.get('T')[start_idx:end_idx]
		u = get_scp_clf_controller( x, T)

	return u 
예제 #2
0
파일: get.py 프로젝트: fagan2888/PyCO2SYS
def _pHfromTAVX(TA, VX, totals, k_constants, initialfunc, deltafunc):
    """Calculate pH from total alkalinity and DIC or one of its components using a
    Newton-Raphson iterative method.

    Although it is coded for H on the total pH scale, for the pH values occuring in
    seawater (pH > 6) it will be equally valid on any pH scale (H terms negligible) as
    long as the K Constants are on that scale.

    Based on the CalculatepHfromTA* functions, version 04.01, Oct 96, by Ernie Lewis.
    """
    # First guess inspired by M13/OE15, added v1.3.0:
    pH = initialfunc(TA, VX, totals["TB"], k_constants["K1"],
                     k_constants["K2"], k_constants["KB"])
    deltapH = 1.0 + pHTol
    while np.any(np.abs(deltapH) >= pHTol):
        pHdone = np.abs(
            deltapH) < pHTol  # check which rows don't need updating
        deltapH = deltafunc(pH, TA, VX, totals, k_constants)  # the pH jump
        # To keep the jump from being too big:
        abs_deltapH = np.abs(deltapH)
        np.sign_deltapH = np.sign(deltapH)
        # Jump by 1 instead if `deltapH` > 5
        deltapH = np.where(abs_deltapH > 5.0, np.sign_deltapH, deltapH)
        # Jump by 0.5 instead if 1 < `deltapH` < 5
        deltapH = np.where(
            (abs_deltapH > 0.5) & (abs_deltapH <= 5.0),
            0.5 * np.sign_deltapH,
            deltapH,
        )  # assumes that once we're within 1 of the correct pH, we will converge
        pH = np.where(pHdone, pH,
                      pH + deltapH)  # only update rows that need it
    return pH
예제 #3
0
def RGasConstant(WhichR):
    """Return the gas constant R in ml / (bar * K * mol)."""
    RGas = np.full(np.shape(WhichR), np.nan)
    RGas = np.where(WhichR == 1, RGasConstant_DOEv2, RGas)  # default, DOEv2
    RGas = np.where(WhichR == 2, RGasConstant_DOEv3, RGas)  # DOEv3
    RGas = np.where(WhichR == 3, RGasConstant_CODATA2018, RGas)  # 2018 CODATA
    return RGas
예제 #4
0
파일: model_full.py 프로젝트: cpempire/pWGD
    def load_data(self, fips):
        # load data
        states = pickle.load(
            open("utils/states_dictionary_moving_average", 'rb'))

        first_confirmed = np.where(states[fips]["positive"] > 100)[0][0]
        data_confirmed = states[fips]["positive"][first_confirmed:]
        self.number_days_data = len(data_confirmed)
        self.data_confirmed = data_confirmed

        first_hospitalized = np.where(
            states[fips]["hospitalizedCurrently"] > 10)[0][0]
        self.lag_hospitalized = first_hospitalized - first_confirmed
        self.data_hospitalized = states[fips]["hospitalizedCurrently"][
            first_hospitalized:]

        print("self.data_hospitalized = ", self.data_hospitalized.shape)

        first_deceased = np.where(states[fips]["death"] > 10)[0][0]
        self.lag_deceased = first_deceased - first_confirmed
        self.data_deceased = states[fips]["death"][first_deceased:]

        # self.obs = np.append(np.log(self.data_deceased), np.log(np.diff(self.data_deceased)))
        self.obs = np.log(self.data_hospitalized)
        # self.obs = np.append(np.log(self.data_hospitalized), np.log(np.diff(self.data_deceased)))

        # self.Gamma_noise_inv = np.diag(1./np.power(0.01*self.obs, 2))
        self.Gamma_noise_inv = np.eye(len(self.obs))
예제 #5
0
def K1fac(TempK, Pbar, RGas, WhichKs):
    """Calculate pressure correction factor for K1."""
    TempC = convert.TempK2C(TempK)
    K1fac = np.full(np.shape(TempK),
                    np.nan)  # because GEOSECS doesn't use _pcxKfac p1atm.
    F = WhichKs == 8  # freshwater
    # Pressure effects on K1 in freshwater: this is from Millero, 1983.
    deltaV = -30.54 + 0.1849 * TempC - 0.0023366 * TempC**2
    Kappa = (-6.22 + 0.1368 * TempC - 0.001233 * TempC**2) / 1000
    K1fac = np.where(F, Kfac(deltaV, Kappa, Pbar, TempK, RGas), K1fac)
    F = (WhichKs == 6) | (WhichKs == 7)
    # GEOSECS Pressure Effects On K1, K2, KB (on the NBS scale)
    # Takahashi et al, GEOSECS Pacific Expedition v. 3, 1982 quotes
    # Culberson and Pytkowicz, L and O 13:403-417, 1968:
    # but the fits are the same as those in
    # Edmond and Gieskes, GCA, 34:1261-1291, 1970
    # who in turn quote Li, personal communication
    K1fac = np.where(F, np.exp((24.2 - 0.085 * TempC) * Pbar / (RGas * TempK)),
                     K1fac)
    # This one is handled differently because the equation doesn't fit the
    # standard deltaV & Kappa form of _pcxKfac.
    F = (WhichKs != 6) & (WhichKs != 7) & (WhichKs != 8)
    # These are from Millero, 1995.
    # They are the same as Millero, 1979 and Millero, 1992.
    # They are from data of Culberson and Pytkowicz, 1968.
    deltaV = -25.5 + 0.1271 * TempC
    # deltaV = deltaV - .151*(Sali - 34.8) # Millero, 1979
    Kappa = (-3.08 + 0.0877 * TempC) / 1000
    # Kappa = Kappa - .578*(Sali - 34.8)/1000 # Millero, 1979
    # The fits given in Millero, 1983 are somewhat different.
    K1fac = np.where(F, Kfac(deltaV, Kappa, Pbar, TempK, RGas), K1fac)
    return K1fac
def test_on_data(change_val=1.0):
    exp_data_file_list = [
        "/media/timothysit/180C-2DDD/second_rotation_project/exp_data/data/data_IO_083.mat"
    ]
    for exp_data_file in exp_data_file_list:
        exp_data = scipy.io.loadmat(exp_data_file)

    trial_type_list = exp_data["hazard"]
    change_magnitude = np.exp(exp_data["sig"].flatten())
    noiseless_trial_type = exp_data["noiseless"].flatten()
    mouse_abort = (exp_data["outcome"].flatten() == "abort").astype(float)

    # remove aborted trials, and noisless trials
    if change_magnitude is not None:
        trial_index = np.where((mouse_abort == 0)
                               & (noiseless_trial_type == 0) *
                               (change_magnitude == change_val))[0]
    else:
        trial_index = np.where((mouse_abort == 0)
                               & (noiseless_trial_type == 0))[0]

    signal = exp_data["ys"].flatten()[trial_index][0][0]
    tau = exp_data["change"][trial_index][0][0]

    p_z_given_x = forward_inference(signal)

    # Plot example
    plot_signal_and_inference(signal=signal, tau=tau, prob=p_z_given_x[:, 1])

    return None
예제 #7
0
def K2fac(TempK, Pbar, RGas, WhichKs):
    """Calculate pressure correction factor for K2."""
    TempC = convert.TempK2C(TempK)
    K2fac = np.full(np.shape(TempK),
                    np.nan)  # because GEOSECS doesn't use _pcxKfac p1atm.
    F = WhichKs == 8  # freshwater
    # Pressure effects on K2 in freshwater: this is from Millero, 1983.
    deltaV = -29.81 + 0.115 * TempC - 0.001816 * TempC**2
    Kappa = (-5.74 + 0.093 * TempC - 0.001896 * TempC**2) / 1000
    K2fac = np.where(F, Kfac(deltaV, Kappa, Pbar, TempK, RGas), K2fac)
    F = (WhichKs == 6) | (WhichKs == 7)
    # GEOSECS Pressure Effects On K1, K2, KB (on the NBS scale)
    # Takahashi et al, GEOSECS Pacific Expedition v. 3, 1982 quotes
    # Culberson and Pytkowicz, L and O 13:403-417, 1968:
    # but the fits are the same as those in
    # Edmond and Gieskes, GCA, 34:1261-1291, 1970
    # who in turn quote Li, personal communication
    K2fac = np.where(F, np.exp((16.4 - 0.04 * TempC) * Pbar / (RGas * TempK)),
                     K2fac)
    # Takahashi et al had 26.4, but 16.4 is from Edmond and Gieskes
    # and matches the GEOSECS results
    # This one is handled differently because the equation doesn't fit the
    # standard deltaV & Kappa form of _pcxKfac.
    F = (WhichKs != 6) & (WhichKs != 7) & (WhichKs != 8)
    # These are from Millero, 1995.
    # They are the same as Millero, 1979 and Millero, 1992.
    # They are from data of Culberson and Pytkowicz, 1968.
    deltaV = -15.82 - 0.0219 * TempC
    # deltaV = deltaV + .321*(Sali - 34.8) # Millero, 1979
    Kappa = (1.13 - 0.1475 * TempC) / 1000
    # Kappa = Kappa - .314*(Sali - 34.8)/1000 # Millero, 1979
    # The fit given in Millero, 1983 is different.
    # Not by a lot for deltaV, but by much for Kappa.
    K2fac = np.where(F, Kfac(deltaV, Kappa, Pbar, TempK, RGas), K2fac)
    return K2fac
예제 #8
0
def mc_link_lik(w, mu_shift, q, ln_q, ln_1_q, ln_s):

    n = numpy.shape(q)[0]

    w_a = w[:, numpy.arange(0, n) * 3]

    w_b = w[:, numpy.arange(0, n) * 3 + 1]

    a = -numpy.exp(w_a / mu_shift[0] + mu_shift[1])

    b = numpy.exp(w_b / mu_shift[2] + mu_shift[3])

    c = w[:, numpy.arange(0, n) * 3 + 2] / mu_shift[4] + mu_shift[5]

    tmp_sum = a * ln_q.ravel() + b * ln_1_q.ravel() + c

    tmp_de = numpy.where(
        tmp_sum <= 0, 2 * numpy.log(1 + numpy.exp(tmp_sum)),
        2 * (tmp_sum + numpy.log(1 + 1 / (numpy.exp(tmp_sum)))))

    ln_s_hat = (tmp_sum + numpy.log((a + b) * q.ravel() - a) - ln_q.ravel() -
                ln_1_q.ravel() - tmp_de) + ln_s.ravel()

    mean_exp = numpy.mean(numpy.exp(ln_s_hat), axis=0)

    ln_mean_s_hat = numpy.where(mean_exp > 0, numpy.log(mean_exp),
                                numpy.log(1e-16))

    link_ll = numpy.sum(ln_mean_s_hat)

    return link_ll
예제 #9
0
    def get_g(self,x):
        atoms3 = self.atoms3
        atoms4 = self.atoms4
        p = self.p

        output = np.zeros(p)
        combos = np.asarray([[0, 1, 2], [1, 2, 3], [0, 2, 3], [0, 1, 3]])
        for k in range(p):
            atom4 = atoms4[k, :]
            angles4 = []
            # get identities of triangles on boundary of tetrahedron
            actived = np.zeros(4)
            for i in range(4):
                actived[i] = np.where([set(item).issubset(atom4[combos[i, :]]) for item in atoms3])[0][0]
            actived = np.asarray(actived, dtype=int)
            naive = np.reshape(x, (int(x.shape[0] / 3), 3))[actived, :]
            for i in range(4):
                a = atoms3[actived[i]]
                b = atom4[np.in1d(atom4, atoms3[actived[i]])]
                for j in range(3):
                    angles4.append(naive[i, np.where(a == b[j])[0]])
            # the jth position in the ith row contains the gradient corresponding to the jth position in the truncated atom4
            a4 = np.reshape(angles4, (4, 3))
            fitin = self.g4(a4)
            # plus the lowest index first
            output[k] = fitin
        return (output)
예제 #10
0
def training_core(c, xi, yin, lambdas, tol, tau, eta):
    #implements the gradient descent time marching method for Total Variation learning
    #this is
    #lambda is the regularization parameter
    #c is the radial basis function parameter
    #tau is the step-size of the gradient descent method
    #tol is tolerance for the stopping criteria
    dim1, dim2 = xi.shape
    w = np.random.random((dim1, 1))
    PSI = psi(xi, c, xi, w)
    w = np.linalg.inv(PSI.T.dot(PSI) + eta * np.identity(dim1)).dot(
        PSI.T.dot(yin))
    w = np.reshape(w, (dim1, 1))

    nr = 1
    i = 0
    while nr > tol:
        if i == 50:
            break
        i = i + 1
        PSI = psi(xi, c, xi, w)
        DUDT = dudtv(c, xi, w, yin, lambdas)
        residual = np.linalg.inv(PSI.T.dot(PSI) + eta * np.identity(dim1)).dot(
            PSI.T.dot(DUDT))
        w = w + tau * residual
        nr = np.linalg.norm(residual) / len(w)
        #print('iter= %3.0i, rel.residual= %1.2e' % (i,nr))
    yout = psi(xi, c, xi, w).dot(w).T

    inds = np.where(yout > 0)
    yout[inds] = 1
    inds = np.where(yout < 0)
    yout[inds] = -1
    return yout, w
def get_neighbors(i, distances, offsets, oneway=False):
  """Get the indices and distances of neighbors to atom i.

  Parameters
  ----------

  i: int, index of the atom to get neighbors for.
  distances: the distances returned from `get_distances`.

  Returns
  -------
  indices: a list of indices for the neighbors corresponding to the index of the
  original atom list.
  offsets: a list of unit cell offsets to generate the position of the neighbor.
  """

  di = distances[i]

  within_cutoff = di > 0.0

  indices = np.arange(len(distances))

  inds = indices[np.where(within_cutoff)[0]]
  offs = offsets[np.where(within_cutoff)[1]]

  return inds, np.int_(offs)
예제 #12
0
def log_py_zM_ord_j(lambda_ord_j, y_oh_j, zM, k, nj_ord_j): 
    ''' Compute log p(y_j | zM, s1 = k1) of each ordinal variable 
    
    lambda_ord_j ( (nj_ord_j + r - 1) 1darray): Coefficients of the ordinal distributions in the GLLVM layer
    y_oh_j (numobs 1darray): The jth ordinal variable in the dataset
    zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
    k (int): The number of components of the mixture
    nj_ord_j (int): The number of possible values values of the jth ordinal variable
    --------------------------------------------------------------
    returns (ndarray): The p(y_j | zM, s1 = k1) for the jth ordinal variable
    '''    

    r = zM.shape[1]
    M = zM.shape[0]
    epsilon = 1E-10 # Numeric stability
    lambda0 = lambda_ord_j[:(nj_ord_j - 1)]
    Lambda = lambda_ord_j[-r:]
 
    broad_lambda0 = lambda0.reshape((nj_ord_j - 1, 1, 1, 1))
    eta = broad_lambda0 - (np.transpose(zM, (0, 2, 1)) @ Lambda.reshape((1, r, 1)))[np.newaxis]
    
    gamma = expit(eta)
    
    gamma_prev = np.concatenate([np.zeros((1,M, k, 1)), gamma])
    gamma_next = np.concatenate([gamma, np.ones((1,M, k, 1))])
    pi = gamma_next - gamma_prev
    
    pi = np.where(pi <= 0, epsilon, pi)
    pi = np.where(pi >= 1, 1 - epsilon, pi)
    
    yg = np.expand_dims(y_oh_j.T, 1)[..., np.newaxis, np.newaxis] 
    
    log_p_y_z = yg * np.log(np.expand_dims(pi, axis=2)) 
   
    return log_p_y_z.sum((0))
예제 #13
0
def kernelpdf(scale, sigma, dataset, datasetGen):

    #dataset is binned as eta1,eta2,mass,pt2,pt1

    maxR = np.full((100), 3.3)
    minR = np.full((100), 2.9)

    valsReco = np.linspace(minR[0], maxR[0], 100)
    valsGen = valsReco

    h = np.tensordot(
        scale, valsGen, axes=0
    )  #get a 5D vector with np.newaxis with all possible combos of kinematics and gen mass values
    h_ext = np.swapaxes(np.swapaxes(h, 2, 4), 3, 4)[:, :, np.newaxis, :, :, :]

    sigma_ext = sigma[:, :, np.newaxis, np.newaxis, :, :]

    xscale = np.sqrt(2.) * sigma_ext

    maxR_ext = maxR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                    np.newaxis]
    minR_ext = minR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                    np.newaxis]

    maxZ = ((maxR_ext - h_ext.astype('float64')) / xscale)
    minZ = ((minR_ext - h_ext.astype('float64')) / xscale)

    arg = np.sqrt(np.pi / 2.) * sigma_ext * (erf(maxZ) - erf(minZ))

    #take tensor product between mass and genMass dimensions and sum over gen masses
    #divide each bin by the sum of gen events in that bin
    den = np.where(
        np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2),
        -1)[:, :, np.newaxis, :, :]

    I = np.sum(arg * datasetGen[:, :, np.newaxis, :, :, :], axis=3) / den

    #give vals the right shape -> add dimension for gen mass (axis = 3)
    vals_ext = valsReco[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                        np.newaxis]

    gaus = np.exp(-np.power(vals_ext - h_ext.astype('float64'), 2.) /
                  (2 * np.power(sigma_ext, 2.)))

    #take tensor product between mass and genMass dimensions and sum over gen masses
    #divide each bin by the sum of gen events in that bin
    den2 = np.where(
        np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2),
        1)[:, :, np.newaxis, :, :]

    pdf = np.sum(gaus * datasetGen[:, :, np.newaxis, :, :, :],
                 axis=3) / den2 / np.where(I > 0., I, -1)

    pdf = np.where(pdf > 0., pdf, 0.)

    massbinwidth = (maxR[0] - minR[0]) / 100

    pdf = pdf * massbinwidth

    return pdf
예제 #14
0
 def deadzone(errors):
     if self.effect == "linear":
         return np.where(errors > self.threshold, errors,
                         np.zeros(errors.shape))
     if self.effect == "quadratic":
         return np.where(errors > self.threshold, errors**2,
                         np.zeros(errors.shape))
예제 #15
0
파일: GLM.py 프로젝트: codanonymous/tslasso
    def _prox(self, beta, thresh):
        """Proximal operator."""

        #print('beginprox', beta[0:2],thresh)
        group_ids = np.unique(self.group)
        result = np.zeros(beta.shape)
        result = np.asarray(result, dtype=float)
        #print('gids',group_ids)
        for i in range(len(group_ids)):
            gid = i
            #print(self.group)
            idxs_to_update = np.where(self.group == gid)[0]
            #print('idx',idxs_to_update)
            #print('norm', np.linalg.norm(beta[idxs_to_update]))
            if np.linalg.norm(beta[idxs_to_update]) > 0.:
                #print('in here')
                potentialoutput = beta[idxs_to_update] - (
                    thresh / np.linalg.norm(
                        beta[idxs_to_update])) * beta[idxs_to_update]
                posind = np.where(beta[idxs_to_update] > 0.)[0]
                negind = np.where(beta[idxs_to_update] < 0.)[0]
                po = beta[idxs_to_update].copy()
                #print('potention', potentialoutput[0:2])
                po[posind] = np.asarray(np.clip(potentialoutput[posind],
                                                a_min=0.,
                                                a_max=1e15),
                                        dtype=float)
                po[negind] = np.asarray(np.clip(potentialoutput[negind],
                                                a_min=-1e15,
                                                a_max=0.),
                                        dtype=float)
                result[idxs_to_update] = po
        #print('end', result[0:2])
        return result
예제 #16
0
def split_data_crossvalid(data):
    """Split data using crossvalid"""
    X_trainfolder = []
    X_testfolder = []
    y_trainfolder = []
    y_testfolder = []
    data = data[data[:, 0].argsort()]
    number_one = np.count_nonzero(data[:, :1])
    data_one = data[np.where(data[:, 0] == 1)]
    data_zero = data[np.where(data[:, 0] == 0)]
    one_ratio = round(number_one / len(data), 1)
    one_zero_ratio = 1 - one_ratio
    batch_one = int(70 * one_ratio)
    batch_zero = int(70 * one_zero_ratio)
    batchs = len(data) // 70
    for i in range(batchs):
        test_one = data_one[i * batch_one:(i + 1) * batch_one, :]
        train_one = np.delete(data_one, test_one, axis = 0)
        test_zero = data_zero[i * batch_zero:(i + 1) * batch_zero, :]
        train_zero = np.delete(data_zero, test_zero, axis = 0)
        train_sets = np.concatenate((train_one, train_zero), axis=0)
        test_sets = np.concatenate((test_one, test_zero), axis=0)
        np.random.shuffle(train_sets)
        np.random.shuffle(test_sets)
        X_trainfolder.append(train_sets[:, 1:])
        y_trainfolder.append(train_sets[:, 0])
        X_testfolder.append(test_sets[:, 1:])
        y_testfolder.append(test_sets[:, 0])
    return X_trainfolder, y_trainfolder, X_testfolder, y_testfolder
예제 #17
0
def get_activation_function(mode: str = "sigmoid",
                            derivate: bool = False) -> object:
    '''
    returns corresponding activation function for given mode
    Parameters:
        - mode: mode of the activation function. Possible values are [String]
            - Sigmoid-function --> "sigmoid"
            - Tangens hyperbolicus --> "tanh"
            - Rectified Linear Unit --> "relu"
            - Leaky Rectified Linear Unit --> "leaky-relu"
            - Soft-max --> "softmax"
        - derivate: whether (=True, default) or not (=False) to return the derivated value of given function and x [Boolean]
    Returns:
        - y: desired activation function [object]
    '''
    if mode == "sigmoid":
        y = lambda x: 1 / (1 + np.exp(-x))
    elif mode == "tanh":
        y = lambda x: (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
    elif mode == "relu":
        y = lambda x: np.where(x <= 0, 0.0, 1.0) * x
    elif mode == "leaky-relu":
        y = lambda x: np.where(x <= 0, 0.1, 1.0) * x
    elif mode == "softmax":
        y = lambda x: np.exp(x - x.max()) / (
            (np.exp(x - x.max()) / np.sum(np.exp(x - x.max()))))
    else:
        print('Unknown activation function. linear is used')
        y = lambda x: x
    ## when derivation of function shall be returned
    if derivate:
        return elementwise_grad(y)
    return y
예제 #18
0
 def like_t(self, t, t_flags, *params):
     # Needs to be updated to work with zi and ds models.
     # until then, can prevent it working in the `fit` method
     tr_denom = np.where(t_flags[:, 1] == 1, self.ff(t[:, 1], *params), 1.)
     tl_denom = np.where(t_flags[:, 0] == 1, self.ff(t[:, 0], *params), 0.)
     t_denom = tr_denom - tl_denom
     return t_denom
예제 #19
0
def corr_spline_grad(D, theta):
    ss = np.zeros(D.shape)
    xi = np.abs(D) * theta
    I = np.where(xi <= 0.2)
    if len(I) > 0:
        ss[I] = 1 - xi[I]**2 * (15 - 30 * xi[I])
    I = np.where(np.logical_and(xi > 0.2, xi < 1.0))
    if len(I) > 0:
        ss[I] = 1.25 * (1 - xi[I])**3

    dr = np.zeros(D.shape)
    m, n = D.shape
    u = np.sign(D) * theta

    I = np.where(u <= 0.2)
    if len(I) > 0:
        dr[I] = u[I] * ((90 * xi[I] - 30) * xi[I])
    I = np.where(np.logical_and(xi > 0.2, xi < 1.0))
    if len(I) > 0:
        dr[I] = -3.75 * u[I] * (1 - xi[I]**2)

    for j in range(n):
        _ss = np.copy(ss)
        _ss[:, j] = dr[:, j]
        dr[:, j] = np.prod(_ss, axis=1)

    return dr
예제 #20
0
 def like_i(self, x, c, n, inf_c_flags, *params):
     # This makes sure that any intervals that are at the boundaries of support or
     # are infinite will not cause the autograd functions to fail.
     ir = np.where(inf_c_flags[:, 1] == 1, 1, self.ff(x[:, 1], *params))
     il = np.where(inf_c_flags[:, 0] == 1, 0, self.ff(x[:, 0], *params))
     like_i = ir - il
     return like_i
예제 #21
0
def testing_step(feature_test, c, feature_training, w):
    #computes one testing step
    #c is the radial basis function parameter
    dim1, dim2 = feature_test.shape
    xt = np.zeros((dim1, dim2))
    xt = feature_test[0:dim1, 1:dim2]

    yin = np.zeros((dim1, 1))
    yin[:, 0] = feature_test[:, 0]

    dim1, dim2 = feature_training.shape
    xi = np.zeros((dim1, dim2))
    xi = feature_training[0:dim1, 1:dim2]

    yout = psi(xt, c, xi, w).dot(w).T

    inds = np.where(yout > 0)
    yout[inds] = 1
    inds = np.where(yout < 0)
    yout[inds] = -1

    dim1, dim2 = feature_test.shape
    Error = np.matrix.trace(yout != yin)
    Efficiency = 100 - 100 * Error / dim1

    return yout, Error, Efficiency
예제 #22
0
def log_py_zM_categ_j(lambda_categ_j, y_categ_j, zM, k, nj_categ_j):
    ''' Compute log p(y_j | zM, s1 = k1) of each categorical variable 
    
    lambda_categ_j (nj_categ x (r + 1) ndarray): Coefficients of the categorical distributions in the GLLVM layer
    y_categ_j (numobs 1darray): The jth categorical variable in the dataset
    zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
    k (int): The number of components of the mixture
    nj_categ_j (int): The number of possible values values of the jth categorical variable
    --------------------------------------------------------------
    returns (ndarray): The p(y_j | zM, s1 = k1) for the jth categorical variable
    '''  
    epsilon = 1E-10

    r = zM.shape[1]
    nj = y_categ_j.shape[1]
        
    zM_broad = np.expand_dims(np.expand_dims(np.transpose(zM, (0, 2, 1)), 2), 3)
    lambda_categ_j_ = lambda_categ_j.reshape(nj, r + 1, order = 'C')

    eta = zM_broad @ lambda_categ_j_[:, 1:][n_axis, n_axis, ..., n_axis] # Check que l'on fait r et pas k ?
    eta = eta + lambda_categ_j_[:,0].reshape(1, 1, nj_categ_j, 1, 1) # Add the constant
    
    pi = softmax_(eta.astype(np.float), axis = 2)
    # Numeric stability
    pi = np.where(pi <= 0, epsilon, pi)
    pi = np.where(pi >= 1, 1 - epsilon, pi)

    yg = np.expand_dims(np.expand_dims(y_categ_j, 1), 1)[..., np.newaxis, np.newaxis] 
    log_p_y_z = yg * np.log(pi[n_axis]) 
    
    # Reshaping output
    log_p_y_z = log_p_y_z.sum((3)) # Suming over the modalities nj
    log_p_y_z = log_p_y_z[:,:,:,0,0] # Deleting useless axes
        
    return np.transpose(log_p_y_z,(1,0, 2))
예제 #23
0
def fH(TempK, Sal, WhichKs):
    """Calculate NBS to Seawater pH scale conversion factor for the given options."""
    fH = np.where(WhichKs == 8, 1.0, np.nan)
    fH = np.where(WhichKs == 7, convert.fH_PTBO87(TempK, Sal), fH)
    # Use GEOSECS's value for all other cases
    fH = np.where((WhichKs != 7) & (WhichKs != 8),
                  convert.fH_TWB82(TempK, Sal), fH)
    return fH
예제 #24
0
 def neg_ll(self, X, x, c, n, *params):
     params = np.array(params)
     like = np.zeros_like(x).astype(float)
     like = np.where(c == 0, self.log_df(x, X, *params), like)
     like = np.where(c == 1, self.log_sf(x, X, *params), like)
     like = np.where(c == -1, self.log_ff(x, X, *params), like)
     like = np.multiply(n, like)
     return -np.sum(like)
예제 #25
0
 def predict(self, X, partial=False):
     if partial:
         Z = 1 / (1 + np.exp(-(X.dot(self.W[:2]) + self.b)))
         Y = np.where(Z >= 0.5, 1, 0)
     else:
         Z = 1 / (1 + np.exp(-(X.dot(self.W)) + self.b))
         Y = np.where(Z >= 0.5, 1, 0)
     return Y, Z
예제 #26
0
 def log_like_i(self, x, c, n, inf_c_flags, p, f0, *params):
     ir = np.where(inf_c_flags[:, 1] == 1, 1,
                   (1 - f0) * p * self.ff(x[:, 1], *params))
     il = np.where(inf_c_flags[:, 0] == 1, 0,
                   (1 - f0) * p * self.ff(x[:, 0], *params))
     like_i = ir - il
     like_i = np.where(c != 2, 1., like_i)
     return np.log(like_i)
예제 #27
0
def _co2sys_TB(salinity, WhichKs, WhoseTB):
    """Calculate total borate from salinity for the given options."""
    TB = np.where(WhichKs == 8, 0.0, np.nan)  # pure water
    TB = np.where((WhichKs == 6) | (WhichKs == 7), borate_C65(salinity), TB)
    F = (WhichKs != 6) & (WhichKs != 7) & (WhichKs != 8)
    TB = np.where(F & (WhoseTB == 1), borate_U74(salinity), TB)
    TB = np.where(F & (WhoseTB == 2), borate_LKB10(salinity), TB)
    return TB
예제 #28
0
파일: get.py 프로젝트: fagan2888/PyCO2SYS
def speciation(dic, pH, totals, k_constants):
    """Calculate the full chemical speciation of seawater given DIC and pH.
    Based on CalculateAlkParts by Ernie Lewis.
    """
    h_scale = 10.0**-pH  # on the pH scale declared by the user
    sw = {}
    # Carbonate
    sw["HCO3"] = HCO3fromTCH(dic, h_scale, totals, k_constants)
    sw["CO3"] = CarbfromTCH(dic, h_scale, totals, k_constants)
    sw["CO2"] = dic - sw["HCO3"] - sw["CO3"]
    # Borate
    sw["BOH4"] = sw["BAlk"] = (totals["TB"] * k_constants["KB"] /
                               (k_constants["KB"] + h_scale))
    sw["BOH3"] = totals["TB"] - sw["BOH4"]
    # Water
    sw["OH"] = k_constants["KW"] / h_scale
    sw["Hfree"] = h_scale * k_constants["pHfactor_to_Free"]
    # Phosphate
    sw.update(phosphate_components(h_scale, totals, k_constants))
    sw["PAlk"] = sw["HPO4"] + 2 * sw["PO4"] - sw["H3PO4"]
    # Silicate
    sw["H3SiO4"] = sw["SiAlk"] = (totals["TSi"] * k_constants["KSi"] /
                                  (k_constants["KSi"] + h_scale))
    sw["H4SiO4"] = totals["TSi"] - sw["H3SiO4"]
    # Ammonium
    sw["NH3"] = sw["NH3Alk"] = (totals["TNH3"] * k_constants["KNH3"] /
                                (k_constants["KNH3"] + h_scale))
    sw["NH4"] = totals["TNH3"] - sw["NH3"]
    # Sulfide
    sw["HS"] = sw["H2SAlk"] = (totals["TH2S"] * k_constants["KH2S"] /
                               (k_constants["KH2S"] + h_scale))
    sw["H2S"] = totals["TH2S"] - sw["HS"]
    # KSO4 and KF are always on the Free scale, so:
    # Sulfate
    sw["HSO4"] = totals["TSO4"] / (1 + k_constants["KSO4"] / sw["Hfree"])
    sw["SO4"] = totals["TSO4"] - sw["HSO4"]
    # Fluoride
    sw["HF"] = totals["TF"] / (1 + k_constants["KF"] / sw["Hfree"])
    sw["F"] = totals["TF"] - sw["HF"]
    # Extra alkalinity components (added in v1.6.0)
    sw["alpha"] = (totals["alpha"] * k_constants["alpha"] /
                   (k_constants["alpha"] + h_scale))
    sw["alphaH"] = totals["alpha"] - sw["alpha"]
    sw["beta"] = totals["beta"] * k_constants["beta"] / (k_constants["beta"] +
                                                         h_scale)
    sw["betaH"] = totals["beta"] - sw["beta"]
    zlp = 4.5  # pK of 'zero level of protons' [WZK07]
    sw["alk_alpha"] = np.where(-np.log10(k_constants["alpha"]) <= zlp,
                               -sw["alphaH"], sw["alpha"])
    sw["alk_beta"] = np.where(-np.log10(k_constants["beta"]) <= zlp,
                              -sw["betaH"], sw["beta"])
    # Total alkalinity
    sw["alk_total"] = (sw["HCO3"] + 2 * sw["CO3"] + sw["BAlk"] + sw["OH"] +
                       sw["PAlk"] + sw["SiAlk"] + sw["NH3Alk"] + sw["H2SAlk"] -
                       sw["Hfree"] - sw["HSO4"] - sw["HF"] + sw["alk_alpha"] +
                       sw["alk_beta"])
    return sw
예제 #29
0
파일: q11.py 프로젝트: yanzhizhang/CSC_412
def compute_means(train_images, train_labels):
    np.where(train_images > 0.5, 1, 0)
    means = []
    for i in range(0, 10):
        i_digits = get_digits_by_label(train_images, train_labels, i)
        means.append(np.mean(i_digits, axis=0))

    save_images(np.array(means), "1_c.jpg")
    return np.array(means)
예제 #30
0
 def initialize_ramp_choice(ys, choices, bin_size):
     choice_0 = np.where(choices == 0)[0]
     choice_1 = np.where(choices == 1)[0]
     y_end = np.array([y[-5:] for y in ys])
     C0 = np.mean(y_end[choice_0]) / bin_size
     C1 = np.mean(y_end[choice_1]) / bin_size
     C = max(C0, C1)
     y0_mean = np.mean([y[:3] for y in ys])
     x0 = y0_mean / C / bin_size
     return C, x0
    def predict_expectation(self, X, ancillary_X=None):
        """
        Predict the expectation of lifetimes, :math:`E[T | x]`.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        ancillary_X: numpy array or DataFrame, optional
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.

        Returns
        -------
        percentiles: DataFrame
            the median lifetimes for the individuals. If the survival curve of an
            individual does not cross 0.5, then the result is infinity.


        See Also
        --------
        predict_median
        """
        alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X)
        v = (alpha_ * np.pi / beta_) / np.sin(np.pi / beta_)
        v = np.where(beta_ > 1, v, np.nan)
        return pd.DataFrame(v, index=_get_index(X))
예제 #32
0
파일: models.py 프로젝트: HIPS/DESI-MCMC
 def get_brightest(self, object_type='star', num_srcs=1, band='r', return_idx=False):
     """return brightest sources (by source type, band)"""
     fluxes      = np.array([s.params.flux_dict[band] for s in self.srcs])
     type_idx    = np.where(self.source_types == object_type)[0]
     type_fluxes = fluxes[type_idx]
     type_idx    = type_idx[np.argsort(type_fluxes)[::-1]][:num_srcs]
     blist       = [self.srcs[i] for i in type_idx]
     if return_idx:
         return blist, type_idx
     else:
         return blist
예제 #33
0
    def evaluate(variable_values, parameters):
        ax = variable_values[parameters["ax"]]
        bx = variable_values[parameters["bx"]]
        ay = variable_values[parameters["ay"]]
        by = variable_values[parameters["by"]]

        h = variable_values[parameters["h"]]
        c = numpy.hypot(bx - ax, by - ay)

        h2 = 2 * h

        length_h0 = c + h2 * h2 / c
        length = numpy.atan2(h2, c) * (c * c / h2 + h2)
        length = numpy.where(h < 1e-6, length_h0, length)

        return length - variable_values[parameters["length"]]
예제 #34
0
    def _accumulate_sufficient_statistics(self, stats, X, framelogprob,
                                          posteriors, fwdlattice, bwdlattice):
        """Updates sufficient statistics from a given sample.
        Parameters
        ----------
        stats : dict
            Sufficient statistics as returned by
            :meth:`~base._BaseHMM._initialize_sufficient_statistics`.
        X : array, shape (n_samples, n_features)
            Sample sequence.
        framelogprob : array, shape (n_samples, n_components)
            Log-probabilities of each sample under each of the model states.
        posteriors : array, shape (n_samples, n_components)
            Posterior probabilities of each sample being generated by each
            of the model states.
        fwdlattice, bwdlattice : array, shape (n_samples, n_components)
            Log-forward and log-backward probabilities.
        """
        # Based on hmmlearn's _BaseHMM
        safe_transmat = self.transmat_ + np.finfo(float).eps
        stats['nobs'] += 1
        if 's' in self.params:
            stats['start'] += posteriors[0]
        if 't' in self.params:
            n_samples, n_components = framelogprob.shape
            # when the sample is of length 1, it contains no transitions
            # so there is no reason to update our trans. matrix estimate
            if n_samples <= 1:
                return

            lneta = np.zeros((n_samples - 1, n_components, n_components))
            _hmmc._compute_lneta(n_samples, n_components, fwdlattice,
                                 np.log(safe_transmat),
                                 bwdlattice, framelogprob, lneta)
            stats['trans'] += np.exp(logsumexp(lneta, axis=0))
            # stats['trans'] = np.round(stats['trans'])
            # if np.sum(stats['trans']) != X.shape[0]-1:
            #     warnings.warn("transmat counts != n_samples", RuntimeWarning)
            #     import pdb; pdb.set_trace()
            stats['trans'][np.where(stats['trans'] < 0.01)] = 0.0
예제 #35
0
파일: synth.py 프로젝트: aasensio/DNHazel
    def compute_rotated_map(self, rotation):
        """
        Compute stellar maps projected on the plane of the sky for a given rotation of the star
        Args:
            rotation (float) : rotation around the star in degrees given as [longitude, latitude] in degrees
        
        Returns:
            pixel_unique (int) : vector with the "active" healpix pixels
            pixel_map (int) : map showing the healpix pixel projected on the plane of the sky
            mu_pixel (float): map of the astrocentric angle for each pixel on the plane of the sky (zero for pixels not in the star)
            T_pixel (float): map of temperatures for each pixel on the plane of the sky
        """
        mu_pixel = np.zeros_like(self.mu_angle)
        T_pixel = np.zeros_like(self.mu_angle)

# Get the projection of the healpix pixel indices on the plane of the sky
        pixel_map = self.projector.projmap(self.indices, self.f_vec2pix, rot=rotation)[:,0:int(self.npix/2)]

# Get the unique elements in the vector
        pixel_unique = np.unique(pixel_map)
        
# Now loop over all unique pixels, filling up the array of the projected map with the mu and temeperature values
        for j in range(len(pixel_unique)):
            ind = np.where(pixel_map == pixel_unique[j])            

            if (np.all(np.isfinite(self.mu_angle[ind[0],ind[1]]))):
                if (self.mu_angle[ind[0],ind[1]].size == 0):
                    value = 0.0
                else:                    
                    value = np.nanmean(self.mu_angle[ind[0],ind[1]])
                    mu_pixel[ind[0],ind[1]] = value

                    T_pixel[ind[0],ind[1]] = self.temperature_map[int(pixel_unique[j])]
            else:
                mu_pixel[ind[0],ind[1]] = 0.0
                T_pixel[ind[0],ind[1]] = 0.0

        return pixel_unique, pixel_map, mu_pixel, T_pixel
예제 #36
0
파일: util.py 프로젝트: WuCPMark/svae
def rle(stateseq):
    pos, = np.where(np.diff(stateseq) != 0)
    pos = np.concatenate(([0],pos+1,[len(stateseq)]))
    return stateseq[pos[:-1]], np.diff(pos)
예제 #37
0
파일: hmm_em.py 프로젝트: RaoJun06/autograd
 def replace_zeros(a):
     return np.where(a > 0., a, 1.)
예제 #38
0
 def fun(x, y):
     b = np.where(C, x, y)
     return to_scalar(b)
예제 #39
0
파일: chi2.py 프로젝트: HIPS/autograd
def grad_chi2_logpdf(x, df):
    return np.where(df % 1 == 0, (df - x - 2) / (2 * x), 0)
예제 #40
0
파일: poisson.py 프로젝트: HIPS/autograd
def grad_poisson_logpmf(k, mu):
    return np.where(k % 1 == 0, k / mu - 1, 0)
예제 #41
0
파일: tm.py 프로젝트: simonkamronn/autohmm
    def _do_mstep(self, stats, params):  # M-Step for startprob and transmat
        if 's' in params:
            startprob_ = self.startprob_prior + stats['start']
            normalize(startprob_)
            self.startprob_ = np.where(self.startprob_ <= np.finfo(float).eps,
                                       self.startprob_, startprob_)
        if 't' in params:

            if self.n_tied == 0:
                transmat_ = self.transmat_prior + stats['trans']
                normalize(transmat_, axis=1)
                self.transmat_ = np.where(self.transmat_ <= np.finfo(float).eps,
                                          self.transmat_, transmat_)
            else:
                transmat_ = np.zeros((self.n_components, self.n_components))
                transitionCnts = stats['trans'] + self.transmat_prior
                transition_index = [i * self.n_chain for i in range(self.n_unique)]

                for b in range(self.n_unique):

                    block = \
                    transitionCnts[self.n_chain * b : self.n_chain * (b + 1)][:] + 0.

                    denominator_diagonal = np.sum(block)
                    diagonal = 0.0

                    index_line = range(0, self.n_chain)
                    index_row = range(self.n_chain * b, self.n_chain * (b + 1))

                    for l, r in zip(index_line, index_row):
                        diagonal += (block[l][r])

                    for l, r in zip(index_line, index_row):
                        block[l][r] = diagonal / denominator_diagonal

                    self_transition = block[0][self.n_chain * b]
                    denominator_off_diagonal = \
                    (np.sum(block[self.n_chain-1])) - self_transition
                    template = block[self.n_chain - 1] + 0.

                    for entry in range(len(template)):
                        template[entry] = (template[entry] * (1 - self_transition)) \
                        / float(denominator_off_diagonal)

                    template[(self.n_chain * (b + 1)) - 1] = 0.
                    line_value = 1 - self_transition

                    for entry in range(len(template)):
                        line_value = line_value - template[entry]

                    for index in transition_index:
                        if index != (b * self.n_chain):
                            block[self.n_chain - 1][index] = \
                            line_value + template[index]

                    line = range(self.n_chain - 1)
                    row = [b * self.n_chain + i for i in range(1, self.n_chain)]

                    for x, y in zip(line, row):
                        block[x][y] = 1 - self_transition


                    transmat_[self.n_chain * b : self.n_chain * (b + 1)][:] = block

                self.transmat_ = np.copy(transmat_)
예제 #42
0
파일: synth.py 프로젝트: aasensio/DNHazel
    def precompute_rotation_maps(self, rotations=None):
        """
        Compute the averaged spectrum on the star for a given temperature map and for a given rotation
        Args:
            rotations (float) : [N_phases x 2] giving [longitude, latitude] in degrees for each phase
        
        Returns:
            None
        """
        if (rotations is None):
            print("Use some angles for the rotations")
            return

        self.n_phases = rotations.shape[0]

        self.avg_mu = [None] * self.n_phases
        self.avg_v = [None] * self.n_phases
        self.velocity = [None] * self.n_phases
        self.n_pixel_unique = [None] * self.n_phases
        self.n_pixels = [None] * self.n_phases
        self.pixel_unique = [None] * self.n_phases

        for loop in range(self.n_phases):
            mu_pixel = np.zeros_like(self.mu_angle)
            v_pixel = np.zeros_like(self.vel_projection)
        
            pixel_map = self.projector.projmap(self.indices, self.f_vec2pix, rot=rotations[loop,:])[:,0:int(self.npix/2)]
            pixel_unique = np.unique(pixel_map[np.isfinite(pixel_map)])

            for j in range(len(pixel_unique)):
                ind = np.where(pixel_map == pixel_unique[j])

                if (np.all(np.isfinite(self.mu_angle[ind[0],ind[1]]))):
                    if (self.mu_angle[ind[0],ind[1]].size == 0):
                        mu_pixel[ind[0],ind[1]] = 0.0
                        v_pixel[ind[0],ind[1]] = 0.0
                    else:                    
                        
                        if (self.clv):
                            value = np.nanmean(self.mu_angle[ind[0],ind[1]])
                        else:
                            value = 1.0

                        mu_pixel[ind[0],ind[1]] = value

                        value = np.nanmean(self.vel_projection[ind[0],ind[1]])
                        v_pixel[ind[0],ind[1]] = value
                else:
                    mu_pixel[ind[0],ind[1]] = 0.0
                    v_pixel[ind[0],ind[1]] = 0.0

            self.n_pixel_unique[loop] = len(pixel_unique)
            self.avg_mu[loop] = np.zeros(self.n_pixel_unique[loop])
            self.avg_v[loop] = np.zeros(self.n_pixel_unique[loop])
            self.velocity[loop] = np.zeros(self.n_pixel_unique[loop])
            self.n_pixels[loop] = np.zeros(self.n_pixel_unique[loop], dtype='int')
            self.pixel_unique[loop] = pixel_unique.astype('int')

            for i in range(len(pixel_unique)):
                ind = np.where(pixel_map == pixel_unique[i])
                self.n_pixels[loop][i] = len(ind[0])
                self.avg_mu[loop][i] = np.unique(mu_pixel[ind[0], ind[1]])
                self.avg_v[loop][i] = np.unique(v_pixel[ind[0], ind[1]])            
                self.velocity[loop][i] = self.avg_mu[loop][i] * self.avg_v[loop][i]
예제 #43
0
 def get_inferred_patterns(self, thres=1e-3):
     ixs = np.where(self.Lambda > thres)
     return ixs  # returns three dimensional arrays of TxKxK indices