Ejemplo n.º 1
0
    def _cal(self, x_new: Number, y_new: Number):

        p = np.where(np.abs(self.x - x_new) == np.abs(self.x - x_new).min())[0][0] - 1
        q = np.where(np.abs(self.y - y_new) == np.abs(self.y - y_new).min())[0][0] - 1

        sum_ = 0
        for pi in range(p, p + 3):
            for qi in range(q, q + 3):
                _p = np.empty(shape=(3,))
                _q = np.empty(shape=(3,))
                for i, p_ in enumerate(range(p, p + 3)):
                    if pi != p_:
                        _p[i] = (x_new - self.x[p_]) / (self.x[pi] - self.x[p_])
                    else:
                        _p[i] = np.nan
                for i, q_ in enumerate(range(q, q + 3)):
                    if qi != q_:
                        _q[i] = (y_new - self.y[q_]) / (self.y[qi] - self.y[q_])
                    else:
                        _q[i] = np.nan
                p_prod = np.nanprod(_p)
                q_prod = np.nanprod(_q)
                sum_ += p_prod * q_prod * self.z[pi, qi]

        return sum_
Ejemplo n.º 2
0
    def testNanReduction(self):
        raw = np.random.choice(a=[0, 1, np.nan], size=(10, 10), p=[0.3, 0.4, 0.3])

        arr = tensor(raw, chunks=3)

        self.assertEqual(np.nansum(raw), self.executor.execute_tensor(nansum(arr))[0])
        self.assertEqual(np.nanprod(raw), self.executor.execute_tensor(nanprod(arr))[0])
        self.assertEqual(np.nanmax(raw), self.executor.execute_tensor(nanmax(arr))[0])
        self.assertEqual(np.nanmin(raw), self.executor.execute_tensor(nanmin(arr))[0])
        self.assertEqual(np.nanmean(raw), self.executor.execute_tensor(nanmean(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw), self.executor.execute_tensor(nanvar(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
        self.assertAlmostEqual(np.nanstd(raw), self.executor.execute_tensor(nanstd(arr))[0])
        self.assertAlmostEqual(np.nanstd(raw, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])

        arr = tensor(raw, chunks=10)

        self.assertEqual(np.nansum(raw), self.executor.execute_tensor(nansum(arr))[0])
        self.assertEqual(np.nanprod(raw), self.executor.execute_tensor(nanprod(arr))[0])
        self.assertEqual(np.nanmax(raw), self.executor.execute_tensor(nanmax(arr))[0])
        self.assertEqual(np.nanmin(raw), self.executor.execute_tensor(nanmin(arr))[0])
        self.assertEqual(np.nanmean(raw), self.executor.execute_tensor(nanmean(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw), self.executor.execute_tensor(nanvar(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
        self.assertAlmostEqual(np.nanstd(raw), self.executor.execute_tensor(nanstd(arr))[0])
        self.assertAlmostEqual(np.nanstd(raw, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])

        raw = np.random.random((10, 10))
        raw[:3, :3] = np.nan
        arr = tensor(raw, chunks=3)
        self.assertEqual(np.nanargmin(raw), self.executor.execute_tensor(nanargmin(arr))[0])
        self.assertEqual(np.nanargmax(raw), self.executor.execute_tensor(nanargmax(arr))[0])

        raw = np.full((10, 10), np.nan)
        arr = tensor(raw, chunks=3)

        self.assertEqual(0, self.executor.execute_tensor(nansum(arr))[0])
        self.assertEqual(1, self.executor.execute_tensor(nanprod(arr))[0])
        self.assertTrue(np.isnan(self.executor.execute_tensor(nanmax(arr))[0]))
        self.assertTrue(np.isnan(self.executor.execute_tensor(nanmin(arr))[0]))
        self.assertTrue(np.isnan(self.executor.execute_tensor(nanmean(arr))[0]))
        self.assertRaises(ValueError, lambda: self.executor.execute_tensor(nanargmin(arr))[0])
        self.assertRaises(ValueError, lambda: self.executor.execute_tensor(nanargmax(arr))[0])

        raw = sps.random(10, 10, density=.1, format='csr')
        raw[:3, :3] = np.nan
        arr = tensor(raw, chunks=3)

        self.assertAlmostEqual(np.nansum(raw.A), self.executor.execute_tensor(nansum(arr))[0])
        self.assertAlmostEqual(np.nanprod(raw.A), self.executor.execute_tensor(nanprod(arr))[0])
        self.assertAlmostEqual(np.nanmax(raw.A), self.executor.execute_tensor(nanmax(arr))[0])
        self.assertAlmostEqual(np.nanmin(raw.A), self.executor.execute_tensor(nanmin(arr))[0])
        self.assertAlmostEqual(np.nanmean(raw.A), self.executor.execute_tensor(nanmean(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw.A), self.executor.execute_tensor(nanvar(arr))[0])
        self.assertAlmostEqual(np.nanvar(raw.A, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
        self.assertAlmostEqual(np.nanstd(raw.A), self.executor.execute_tensor(nanstd(arr))[0])
        self.assertAlmostEqual(np.nanstd(raw.A, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])

        arr = nansum(1)
        self.assertEqual(self.executor.execute_tensor(arr)[0], 1)
Ejemplo n.º 3
0
def patient_staging(pi0,event_centers,likeli_post,likeli_pre,type_staging):
    
    L_yes=np.divide(likeli_post,likeli_post+likeli_pre+1e-10)
    
    L_no = 1 - L_yes
    event_centers_pad=np.insert(event_centers,0,0)
    event_centers_pad=np.append(event_centers_pad,1)
    pk_s=np.diff(event_centers_pad)
    pk_s[:]=1;
    
    m=L_yes.shape
    prob_stage = np.zeros((m[0],m[1]+1))
    p_no_perm = L_no[:,pi0];
    p_yes_perm = L_yes[:,pi0];
    for j in range(m[1]+1):
        prob_stage[:,j]=pk_s[j]*np.multiply(np.nanprod(p_yes_perm[:,:j],axis=1),np.nanprod(p_no_perm[:,j:],axis=1))

    all_stages_rep2=matlib.repmat(event_centers_pad[:-1],m[0],1)
    
    if type_staging[0]=='exp':
        subj_stages = np.zeros(prob_stage.shape[0])
        for i in range(prob_stage.shape[0]):
            idx_nan=np.isnan(p_yes_perm[i,:])
            pr=prob_stage[i,1:]
            ev = event_centers_pad[1:-1]
            subj_stages[i]=np.mean(np.multiply(np.append(prob_stage[i,0],pr[~idx_nan]),np.append(event_centers_pad[0],ev[~idx_nan])))/(np.mean(np.append(prob_stage[i,0],pr[~idx_nan])))
    elif type_staging[0]=='ml':
        subj_stages=np.argmax(prob_stage,axis=1)
    
    return subj_stages
Ejemplo n.º 4
0
    def _find_configs(self, eV, T_des=None):
        """
        Find the optimal configurations for attaining
        desired transmission ``T_des`` at photon
        energy ``eV``.

        Returns configurations which yield closest
        highest and lowest transmissions and their
        transmission values.
        """
        if not T_des:
            T_des = self.T_des.get()
        T_set = self._all_transmissions(eV)
        T_table = np.nanprod(T_set * self.config_table, axis=1)
        T_config_table = np.asarray(
            sorted(np.transpose([T_table[:],
                                 range(len(self.config_table))]),
                   key=lambda x: x[0]))
        i = np.argmin(np.abs(T_config_table[:, 0] - T_des))
        closest = self.config_table[T_config_table[i, 1]]
        T_closest = np.nanprod(T_set * closest)
        if T_closest == T_des:
            config_bestHigh = config_bestLow = closest
            T_bestHigh = T_bestLow = T_closest
        if T_closest < T_des:
            config_bestHigh = self.config_table[T_config_table[i + 1, 1]]
            config_bestLow = closest
            T_bestHigh = np.nanprod(T_set * config_bestHigh)
            T_bestLow = T_closest
        if T_closest > T_des:
            config_bestHigh = closest
            config_bestLow = self.config_table[T_config_table[i - 1, 1]]
            T_bestHigh = T_closest
            T_bestLow = np.nanprod(T_set * config_bestLow)
        return config_bestLow, config_bestHigh, T_bestLow, T_bestHigh
Ejemplo n.º 5
0
def _ts_prod(x1, window):
    result = np.nanprod(x1[:, -window:], axis=1)
    for i in range(1, x1.shape[1] - window):
        result = np.column_stack(
            [np.nanprod(x1[:, -window - i:-i], axis=1), result])
    for i in range(x1.shape[1] - window, x1.shape[1]):
        result = np.column_stack([result[:, 0], result])
    return result
def absd(eff):
    
    arr = []
    for yrs in range(11,19):
        for i in range(eff.shape[0]):
            if eff.loc[i, f'pc_{yrs}'] > 1:
                arr.append(np.nanprod(eff[f'ec_{yrs}'])**(1.0/eff[f'ec_{yrs}'].notnull().sum()))
                arr.append(np.nanprod(eff[f'tc_{yrs}'])**(1.0/eff[f'tc_{yrs}'].notnull().sum())) 
    return arr
Ejemplo n.º 7
0
 def update_consensus():
     c = np.nanprod(bin_responses * competencies.reshape(N, -1) +
                    (1 - bin_responses) * (1 - competencies).reshape(N, -1),
                    axis=0)
     #         print(c[:10])
     e = np.nanprod(bin_responses * (1 - competencies).reshape(N, -1) +
                    (1 - bin_responses) * competencies.reshape(N, -1),
                    axis=0)
     #         print(e[:10])
     return (c / (c + e))
Ejemplo n.º 8
0
    def test_nanprod(self):
        # Testing a 1D array
        mag_1d = np.array([5., 6., np.nan])
        speeds_1d = Quantity.from_units(mag=mag_1d, units='m/s')
        units_1d = speeds_1d.units
        expected_nanprod_1d = Quantity._from_qty(mag=np.nanprod(mag_1d),
                                              units=units_1d*len(mag_1d))
        self.assertEqual(expected_nanprod_1d, np.nanprod(speeds_1d))

        # Testing a 2D array
        mag_2d = np.array([[5., 6.], [7., np.nan]])
        speeds_2d = Quantity.from_units(mag=mag_2d, units='m/s')
        units_2d = speeds_2d.units

        # Axis not specified
        expected_nanprod_2d = Quantity._from_qty(mag=np.nanprod(mag_2d),
                                              units=units_2d*mag_2d.size)
        self.assertEqual(expected_nanprod_2d, np.nanprod(speeds_2d))

        # Axis = 0
        axis = 0
        expected_nanprod_2d = Quantity._from_qty(mag=np.nanprod(mag_2d, axis=axis),
                                              units=units_2d*mag_2d.shape[1])
        np.testing.assert_array_equal(expected_nanprod_2d,
                                      np.nanprod(speeds_2d, axis=axis))

        # Axis = 1
        axis = 1
        expected_nanprod_2d = Quantity._from_qty(mag=np.nanprod(mag_2d, axis=axis),
                                              units=units_2d*mag_2d.shape[0])
        np.testing.assert_array_equal(expected_nanprod_2d,
                                      np.nanprod(speeds_2d, axis=axis))
Ejemplo n.º 9
0
def decode(H, c_Rx, globalstd):
    mx_iter = 15
    l_intrinsic = np.multiply(2 / globalstd**2, c_Rx)
    d_bits = [[] for i in range(MODEL)]
    for model in selModel:
        lin = l_intrinsic
        L = np.multiply(H, lin)
        indx = np.where(L == 0)
        L[indx] = np.nan
        if model == 0:  # 0 -> original model.
            for i in range(mx_iter):
                L = np.tanh(L / 2)
                L_ = np.nanprod(L, axis=1).reshape(Global.n - Global.k, 1)
                L = np.divide(L_, L)
                L = 2 * ATANH(L)
                lin = (lin + np.nansum(L, axis=0)).reshape(1, Global.n)
                L = lin - L
            d_bits[model] = demod(lin)
        if model == 1:  # 1 -> alpha beta model.
            for i in range(mx_iter):
                S = np.sign(L)
                S = np.nanprod(S, axis=1).reshape(Global.n - Global.k, 1)
                l = MIN(np.abs(L))
                L = np.sign(L / S) * l
                L = alpha * L + beta
                lin = (lin + np.nansum(L, axis=0)).reshape(1, Global.n)
                L = lin - L
            d_bits[model] = demod(lin)
        if model == 2:  # 2 -> min sum model
            for i in range(mx_iter):
                S = np.sign(L)
                S = np.nanprod(S, axis=1).reshape(Global.n - Global.k, 1)
                l = MIN(np.abs(L))
                L = np.sign(L / S) * l
                lin = (lin + np.nansum(L, axis=0)).reshape(1, Global.n)
                L = lin - L
            d_bits[model] = demod(lin)
        if model == 3:  # 3 -> approximation model.
            for i in range(mx_iter):
                L = tanh_mine(L / 2)
                L_ = np.nanprod(L, axis=1).reshape(Global.n - Global.k, 1)
                L = np.divide(L_, L)
                L = 2 * atanh_mine(L)
                lin = (lin + np.nansum(L, axis=0)).reshape(1, Global.n)
                L = lin - L
            d_bits[model] = demod(lin)
        if model == 4:  # 4 -> default model
            d_bits[model] = demod(l_intrinsic)
    return np.array(d_bits)
Ejemplo n.º 10
0
    async def _update_active_transmission(self):
        """Re-calculate transmission_actual based on working filters."""
        config = tuple(self.active_config.value)
        offset = self.parent.first_filter

        transm = np.zeros_like(config) * np.nan
        transm3 = np.zeros_like(config) * np.nan
        for idx, filt in self.active_filters.items():
            zero_index = idx - offset
            if State(config[zero_index]).is_inserted:
                transm[zero_index] = filt.transmission.value
                transm3[zero_index] = filt.transmission_3omega.value

        await self.transmission_actual.write(np.nanprod(transm))
        await self.transmission_3omega_actual.write(np.nanprod(transm3))
Ejemplo n.º 11
0
def test_sequences(net_study):
    net = net_study
    c_study = net.c.copy()

    # all possible recall sequences
    n_item = 3
    sequences = []
    for i in range(n_item + 1):
        sequences.extend(list(permutations(range(n_item), i)))

    B = 0.8
    T = 10
    X1 = 0.05
    X2 = 1
    p_stop = cmr.p_stop_op(n_item, X1, X2)
    p = np.empty((len(sequences), n_item + 1))
    p[:] = np.nan
    for i, recalls in enumerate(sequences):
        net.c = c_study.copy()
        p_recalls = net.p_recall(('task', 'item'), recalls, 'task', B, T,
                                 p_stop)
        p[i, :len(p_recalls)] = p_recalls

    # probability of any recall sequence should be 1
    p_any = np.sum(np.nanprod(p, 1))
    np.testing.assert_allclose(p_any, 1)
Ejemplo n.º 12
0
def maximum_likelihood_fig(testmag, testmagerr, meanmag, posvar):
    ''' Function that calculates the maximum likelihood variance for a single 
    source and plots the corresponding likelihood curce with error bars marked.
    
    Inputs:
        testmag = array of magnitudes/fluxes (i.e. the light curve) to test
        testmagerr = array of corresponding errors
        meanmag = mean of the lightcurve/test value for light curve being flat
        posvar = array of sigmas to test for maximum likelihood
        
    Outputs:
        sig = maximum likelihood sigma for the inputted light curve
        err = the error on sig according to the likelihood curve
    '''
    # Calculate likelihood curve
    L = np.array([np.nanprod((np.exp((-0.5*((testmag - meanmag)**2))/(
            testmagerr**2 + testsig**2)))/(((2*np.pi)**0.5)*
            (testmagerr**2 + testsig**2)**0.5)) for testsig in posvar])
    sig = float(posvar[L==np.nanmax(L)][0]) #sigma value at max L
    err = np.sqrt(np.average((posvar-np.average(posvar, weights=L))**2, weights=L))
    plt.figure()
    plt.plot(posvar, L)
    plt.vlines(sig, np.min(L), np.max(L))
    plt.vlines(sig+err, np.min(L), np.max(L))
    return sig, err
Ejemplo n.º 13
0
def maximum_likelihood(testmag, testmagerr, meanmag, posvar, n=None, printn=10):
    ''' Function that calculates the maximum likelihood variance for a single 
    source. Has the capability to print a counter as it progresses as is slow 
    to run over a full catalogue.
    
    Inputs:
        testmag = array of magnitudes/fluxes (i.e. the light curve) to test
        testmagerr = array of corresponding errors
        meanmag = mean of the lightcurve/test value for light curve being flat
        posvar = array of sigmas to test for maximum likelihood
        n = submitted counter for how many times the function has run. Default
            is None as do not usually want the counter
        printn = at what multiple of n to print a counter. Default is 10
    Outputs:
        sig = maximum likelihood sigma for the inputted light curve
        err = the error on sig according to the likelihood curve
    '''
    if n != None and n%printn == 0:
        print(n)
    # Calculate likelihood curve
    L = np.array([np.nanprod((np.exp((-0.5*((testmag - meanmag)**2))/(
            testmagerr**2 + testsig**2)))/(((2*np.pi)**0.5)*
            (testmagerr**2 + testsig**2)**0.5)) for testsig in posvar])
    sig = float(posvar[L==np.nanmax(L)][0]) #sigma value at max L
    if np.sum(L) == 0:
        return sig, np.nan
    else:
        err = np.sqrt(np.average((posvar-np.average(posvar, weights=L))**2, weights=L))
        return sig, err
Ejemplo n.º 14
0
def cum_returns_final_1d_nb(returns: tp.Array1d,
                            start_value: float = 0.) -> float:
    """Total return."""
    out = np.nanprod(returns + 1.)
    if start_value == 0.:
        return out - 1.
    return out * start_value
Ejemplo n.º 15
0
    def predict(self, X, a=None, b=None, k=None, c=None, d=None):
        """
        X is N x M where N is # of guides, and M is max # annotations
        """
        if a is None: a = self.maximum['a']
        if b is None: b = self.maximum['b']
        if k is None: k = self.maximum['k']
        assert c is None
        assert d is None

        warpedX = self.warp_inputs(a, b, X)

        if self.combiner == "nb":
            pass
        elif self.combiner == "nb_modulated":
            num_annot = np.sum(~np.isnan(X), axis=1)
            modulation = 1.0/num_annot**k
            modulation = np.tile(modulation, [6,1]).T
            warpedX = warpedX**modulation
        else:
            raise Exception()

        assert np.nanmin(warpedX) > 0.0
        assert np.nanmax(warpedX) < 1.0
        return np.nanprod(warpedX, axis=1)[:, None]
Ejemplo n.º 16
0
    def featurize(self, X, training=False):
        f_dict = {}
        f_dict["f_prod"] = np.nanprod(X, axis=1)[:, None]
        f_dict["f_sum"] = np.nansum(X, axis=1)[:, None]
        f_dict["f_mean"] = np.nanmean(X, axis=1)[:, None]
        f_dict["f_max"] = np.nanmax(X, axis=1)[:, None]
        f_dict["f_min"] = np.nanmin(X, axis=1)[:, None]
        f_dict["f_count"] = X.shape[1] - np.sum(np.isnan(X), axis=1)[:, None]

        # now make each feature interact with the counts:
        if False:
            for key in f_dict.keys():
                if not (key=="count"):
                    f_dict[key + "_count"] = np.multiply(f_dict["f_count"], f_dict[key])
        self.ordered_keys = []
        f_concat = None
        for key in f_dict.keys():
            self.ordered_keys.append(key)
            dat = f_dict[key]
            if self.normalize_feat:
                #print "normalizing features"
                if training:
                    self.mean = np.mean(dat)
                    self.std = np.mean(dat)
                dat = (dat - self.mean) / self.std
            if f_concat is None:
                f_concat = dat
            else:
                f_concat = np.concatenate((f_concat, dat), axis=1)
        return f_concat
Ejemplo n.º 17
0
 def cross_covariance(self, i: int, j: int, h: np.ndarray) -> np.ndarray:
     if i > j:
         # swap indices (cross-covariance is symmetric)
         i, j = j, i
     return (self.params.rho.values[i, j] *
             np.nanprod(self.params.sigma.values) *
             self.correlation(i, j, h))
Ejemplo n.º 18
0
    def __matmul__(self, other):
        if isinstance(other, Option):
            return self @ Domain(other)

        if self._is_array_option():
            that = self._to_scalar_product()
        else:
            that = self

        if other._is_array_option():
            other = other._to_scalar_product()

        if that._is_scalar_product() and other._is_scalar_product():
            if len(that.cubes) == len(other.cubes):
                cubes = [
                    cube_1 + cube_2
                    for cube_1, cube_2 in zip(that.cubes, other.cubes)
                ]
                weights = np.nanprod(np.stack([that.weights, other.weights]),
                                     axis=0)
                nan_mask = np.logical_and(np.isnan(that.weights),
                                          np.isnan(other.weights))
                weights[nan_mask] = np.nan
                return Domain(domain=cubes, weights=weights)
        raise ValueError("The numbers of domain cubes must conincide.")
Ejemplo n.º 19
0
    def expectation(self, imgs):
        E_z_nc = np.ndarray((imgs.shape[2], self.n_classes))
        for n in range(imgs.shape[2]):
            for c in range(self.n_classes):
                # if (self.mu <= 0.).any():
                # 	print("BAAAAAAAA")

                # E_z_nc[n, c] = np.log(self.pi[c])
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(self.mu[:,:,c]), imgs[:,:, n] ))
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(1 - self.mu[:,:,c]), 1 - imgs[:,:, n] ))

                E_z_nc[n, c] = np.nanprod(
                    np.multiply(
                        np.power(self.mu[:, :, c], imgs[:, :, n]),
                        np.power((1 - self.mu[:, :, c]), (1 - imgs[:, :, n]))))
                # plt.imshow(np.multiply(
                # 			np.power( self.mu[:,:,c], imgs[:,:, n] ),
                # 			np.power( (1 - self.mu[:, :, c]), (1 - imgs[:,:, n]) )
                # 			))
                # plt.colorbar()
                # print(E_z_nc[n,c])
                # plt.show()
                E_z_nc[n, c] *= self.pi[c]

        # E_z_nc = np.log(E_z_nc)

        for c in range(self.n_classes):
            # E_z_nc[:,c] -= np.log(np.sum(np.exp(E_z_nc), axis=1))
            E_z_nc[:, c] /= np.sum(E_z_nc, axis=1)

        return np.nan_to_num(E_z_nc)
Ejemplo n.º 20
0
def naive_bayes(data):
    naive_probs = np.empty((10))
    for j in range(0, 10):
        nz_values = data[j]
        c = np.nanprod(nz_values * 500, dtype=np.float64)
        naive_probs[j] = c
    return naive_probs
Ejemplo n.º 21
0
def cum_returns_final(returns, starting_value=2000):
    """
    Compute total returns from simple returns.

    Parameters
    ----------
    returns : pd.DataFrame, pd.Series, or np.ndarray
       Noncumulative simple returns of one or more timeseries.
    starting_value : float, optional
       The starting returns.

    Returns
    -------
    total_returns : pd.Series, np.ndarray, or float
        If input is 1-dimensional (a Series or 1D numpy array), the result is a
        scalar.
    """
    if len(returns) == 0:
        return np.nan

    result = np.nanprod(returns + 1, axis=0)

    if starting_value == 0:
        result -= 1
    else:
        result *= starting_value

    return result    
Ejemplo n.º 22
0
def join_count_tables(table_info, parsed_join_clauses, count_tables, join_how):
    g = make_join_graph(parsed_join_clauses)
    df_ret = None
    for edge in nx.dfs_edges(g, source=parsed_join_clauses[0][0]):
        t1, t2 = edge
        join_columns = g.edges[edge]["join_columns"]
        cs1 = join_columns[t1]  # columns to join
        cs2 = join_columns[t2]  # columns to join
        if df_ret is None:
            df_ret = count_tables[t1].add_prefix(t1 + ":")
        log.info("Joining {} and {} on {}".format(
            t1, t2, ", ".join([c1 + "=" + c2 for c1, c2 in zip(cs1, cs2)])))
        # NOTE: Since we are traversing the join graph in a DFS order, it is
        # guaranteed that at this point `df_ret` already contains `t1.c1`.
        df_ret = df_ret.merge(
            count_tables[t2].add_prefix(t2 + ":"),
            how=join_how,
            left_on=["{}:{}".format(t1, c) for c in cs1],
            right_on=["{}:{}".format(t2, c) for c in cs2],
        )

    # NOTE: `np.nanprod()` treats `np.nan` as 1.
    df_ret["cnt"] = np.nanprod([df_ret[f"{t}:cnt"] for t in table_info],
                               axis=0)
    return df_ret
Ejemplo n.º 23
0
Archivo: utils.py Proyecto: Kipre/files
 def time_dependent_terms(k):
     current_filter = sparse_filters.copy()
     values = list(predicted_values[k]) + list(
         not_predicted_time_dependent[k])
     for i, val in enumerate(values):
         current_filter[:, i * max_degree:(i + 1) * max_degree] *= val
     return np.nanprod(current_filter, axis=1)
Ejemplo n.º 24
0
 def simulate(self):
     if self.HistoryRate is not None:
         RateMean = np.nanprod(1 + self.HistoryRate)**(
             self.NoP / self.HistoryRate.shape[0]) - 1  # 收益率的历史均值
         RateSigma = np.nanstd(self.HistoryRate,
                               ddof=1) * self.NoP**0.5  # 收益率的历史波动率
     else:
         RateMean = self.RateMean
         RateSigma = self.RateSigma
     np.random.seed(self.Seed)
     Rate = np.random.normal(loc=RateMean,
                             scale=RateSigma,
                             size=(self.NPeriod, self.NSample))
     if not self.DropIllegal:
         return Rate
     Mask = (Rate <= -1)
     nIllegal = np.sum(Mask)
     while nIllegal > 0:
         self._Logger.debug("There are %d illegal samples, try again!" %
                            (nIllegal, ))
         Rate[Mask] = np.random.normal(loc=RateMean,
                                       scale=RateSigma,
                                       size=(nIllegal, ))
         Mask = (Rate <= 1)
         nIllegal = np.sum(Mask)
     np.random.seed(None)
     return Rate
Ejemplo n.º 25
0
    def pwflrg_lhs_memberhip_fuzzyfied(self, flrg, sample):
        vals = []
        for ct in range(len(flrg.LHS)):  # fuzz in enumerate(sample):
            vals.append(
                [mv for fset, mv in sample[ct] if fset == flrg.LHS[ct]])

        return np.nanprod(vals)
Ejemplo n.º 26
0
 def fire(self, input):
     mf_vals = np.zeros(self.weights.shape)+np.nan
     for i in range(self.num_inputs):
         x = input[i]
         # given input, evalate membership degree in each partition
         for j in range(len(self.mfs)):
             if self.weights[i,j]:
                 mf_vals[i,j] = self.mfs[j].value(x)
     
     # a rule may not cover all dimensions, i.e.:
     # "IF x1 is low THEN ..." does not cover x0
     # coverage == 1   -> rule covers this dimension
     # coverage == nan -> rule does not cover this dimension
     coverage = 1.0 - np.isnan(mf_vals).prod(axis=1)
     coverage[coverage==0] = np.nan
     
     # this coeff reflects % of dimensions covered by the rule
     coverage_coeff = float(sum(coverage==1)) / coverage.shape[0]
     
     # by multiplying by coverage we exclude 0's
     # that result from np.nansum([nan, nan, nan, ...])
     # that are in uncovred dimensions
     # thus uncovered dimensions are excluded from product
     op_or = np.nansum(mf_vals,axis=1) * coverage
     op_and = np.nanprod(op_or)
     
     # if rule does not cover all dimensions, its firing str
     # is penalized proportionally to # of uncovered dims.
     return op_and
Ejemplo n.º 27
0
 def __mul__(self, other):
     if isinstance(other, float) and np.isnan(other):
         return self
     if self.cubes is None:
         result = other
     elif isinstance(other, (int, float)):
         result = self
         weights = self.weights
         weights[np.isnan(weights)] = 1
         result.weights = weights * other
     elif isinstance(other, Domain):
         if other.cubes is None:
             result = self
         else:
             res = list(product(self.cubes, other.cubes))
             res = [item[0] + item[1] for item in res]
             pairs = np.array(list(product(self.weights, other.weights)))
             weights = np.array([np.nanprod(item) for item in pairs])
             nan_mask = np.array([np.isnan(item).all() for item in pairs])
             weights[nan_mask] = np.nan
         result = Domain(res, weights=weights)
     elif isinstance(other, Option):
         result = self * Domain(other)
     else:
         raise TypeError('Arguments must be numeric, Domains or Options')
     return result
def combined_likelihood(data, likelihood_function=None, likelihood_kwargs={}):
    '''Applies likelihood function to each signal and returns their product

    If there isn't a column dimension, just returns the likelihood.

    Parameters
    ----------
    data : array_like, shape=(n_signals, ...)
    likelihood_function : function
        Likelihood function to be applied to each signal.
        The likelihood function must take data as its first argument.
        All other arguments for the likelihood should be passed
        via `likelihood_kwargs`
    likelihood_kwargs : dict
        Keyword arguments for the likelihood function

    Returns
    -------
    likelihood : array_like, shape=(n_parameters * n_states,)

    '''
    try:
        return np.nanprod(likelihood_function(data, **likelihood_kwargs),
                          axis=0).squeeze()
    except ValueError:
        return likelihood_function(data, **likelihood_kwargs).squeeze()
def evaluate_mark_space(test_marks,
                        training_marks=None,
                        mark_std_deviation=20):
    '''Evaluate the multivariate Gaussian kernel for the mark space
    given training marks.

    For each mark in the training data (`training_marks`), a univariate
    Gaussian is placed with its mean at the value of each mark with
    standard deviation `mark_std_deviation`. The product of the Gaussians
    along the mark dimension yields a multivariate Gaussian kernel
    evaluated at each training spike with a diagonal coviarance matrix.

    Parameters
    ----------
    test_marks : array_like, shape=(n_marks,)
        The marks to be evaluated
    training_marks : shape=(n_training_spikes, n_marks)
        The marks for each spike when the animal is moving
    mark_std_deviation : float, optional
        The standard deviation of the Gaussian kernel in millivolts

    Returns
    -------
    mark_space_estimator : array_like, shape=(n_training_spikes,)

    '''
    n_training_spikes = training_marks.shape[0]
    test_marks = np.tile(test_marks[:, np.newaxis], (1, n_training_spikes)).T
    return np.nanprod(_normal_pdf(test_marks,
                                  mean=training_marks,
                                  std_deviation=mark_std_deviation),
                      axis=1)
Ejemplo n.º 30
0
    def incorporate_scan(self, V, N, trunc_dist, do_plot=False):
        """
        Given a range scan, update the signed distance image and the weights to incorporate the new scan.
        Parameters
        ----------
        V: ndarray(M, 2)
            Points scanned in global coordinates that were actually seen 
            (non-infinite)
        N: ndarray(M, 2)
            Array of corresponding normals in global coordinates
        trunc_dist: float
            Threshold at which to truncate
        """
        if V.size == 0:
            return
        tree = KDTree(V)
        distances, indices = tree.query(self.XGrid, k=1)
        indices = indices.flatten()
        distances = np.reshape(distances, (self.res, self.res))

        ## Step 1: Compute the Signed distance function
        # All the points on V that are closest to the corresponding
        # points on XGrid
        P = V[indices, :]
        N2 = N[indices, :]
        sdf = np.sum((self.XGrid - P) * N2, 1)
        sdf = np.reshape(sdf, (self.res, self.res))
        w = np.zeros_like(sdf)
        w[distances < trunc_dist] = 1

        ## Step 2: Incorporate this signed distance
        ## function into the overall signed distance function
        numerator = np.nanprod(np.array([self.weights, self.SDF]), axis=0)
        numerator = numerator + w * sdf
        self.weights = w + self.weights
        idx = self.weights > 0
        self.SDF[idx] = numerator[idx] / self.weights[idx]
        self.SDF[self.weights == 0] = np.nan

        if do_plot:
            sdf[distances >= trunc_dist] = np.nan
            vmax = np.max(np.abs(sdf[~np.isnan(sdf)]))
            plt.figure(figsize=(15, 5))
            plt.subplot(131)
            plt.imshow(distances)
            plt.gca().invert_yaxis()
            plt.colorbar()
            plt.title("Euclidean Distances of Nearest Neighbor")
            plt.subplot(132)
            plt.imshow(sdf, cmap='seismic', vmin=-vmax, vmax=vmax)
            plt.gca().invert_yaxis()
            plt.colorbar()
            plt.title("Signed distance")
            plt.subplot(133)
            plt.imshow(w)
            plt.gca().invert_yaxis()
            plt.title("Weights")
            plt.colorbar()
            plt.show()
Ejemplo n.º 31
0
def test_nan():
    x = np.array([[1, np.nan, 3, 4],
                  [5, 6, 7, np.nan],
                  [9, 10, 11, 12]])
    d = da.from_array(x, chunks=(2, 2))

    assert_eq(np.nansum(x), da.nansum(d))
    assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
    assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
    assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
    assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
    assert_eq(np.nanvar(x), da.nanvar(d))
    assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
    assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
    assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
    assert_eq(nanprod(x), da.nanprod(d))
Ejemplo n.º 32
0
def test_nan():
    x = np.array([[1, np.nan, 3, 4],
                  [5, 6, 7, np.nan],
                  [9, 10, 11, 12]])
    d = da.from_array(x, blockshape=(2, 2))

    assert eq(np.nansum(x), da.nansum(d))
    assert eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
    assert eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
    assert eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
    assert eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
    assert eq(np.nanvar(x), da.nanvar(d))
    assert eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
    assert eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
    assert eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
    with ignoring(AttributeError):
        assert eq(np.nanprod(x), da.nanprod(d))
Ejemplo n.º 33
0
def array_nanprod(arr):
    return np.nanprod(arr)
Ejemplo n.º 34
0
 def time_nanprod(self, array_size, percent_nans):
     np.nanprod(self.arr)
Ejemplo n.º 35
0
 def test_nanprod(self):
     tgt = np.prod(self.mat)
     for mat in self.integer_arrays():
         assert_equal(np.nanprod(mat), tgt)
Ejemplo n.º 36
0
def msssim(
    img0,
    img1,
    nlevels=5,
    sigma=1.2,
    L=1.0,
    K=(0.01, 0.03),
    alpha=4,
    beta_gamma=None
):
    """Multi-Scale Structural SIMilarity index (MS-SSIM).

    Parameters
    ----------
    img0 : array
    img1 : array
        Two images for comparison.
    nlevels : int
        The max number of levels to analyze
    sigma : float
        Sets the standard deviation of the gaussian filter. This setting
        determines the minimum scale at which quality is assessed.
    L : scalar
        The dynamic range of the data. This value is 1 for float
        representations and 2^bitdepth for integer representations.
    K : 2-tuple
        A list of two constants which help prevent division by zero.
    alpha : float
        The exponent which weights the contribution of the luminance term.
    beta_gamma : list
        The exponent which weights the contribution of the contrast and
        structure terms at each level.

    Returns
    -------
    metrics : dict
        A dictionary with image quality information organized by scale.
        ``metric[scale] = (mean_quality, quality_map)``
        The valid range for SSIM is [-1, 1].


    References
    ----------
    Multi-scale Structural Similarity Index (MS-SSIM)
    Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multi-scale structural
    similarity for image quality assessment," Invited Paper, IEEE Asilomar
    Conference on Signals, Systems and Computers, Nov. 2003
    """
    _full_reference_input_check(img0, img1, sigma, nlevels, L)
    # The relative imporance of each level as determined by human experiment
    if beta_gamma is None:
        beta_gamma = np.array([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) * 4
    assert nlevels < 6, "Not enough beta_gamma weights for more than 5 levels"
    scales = np.zeros(nlevels)
    maps = [None] * nlevels
    scale, luminance, ssim_map = ssim(
        img0,
        img1,
        sigma=sigma,
        L=L,
        K=K,
        scale=sigma,
        alpha=alpha,
        beta_gamma=0
    )
    original_shape = np.array(img0.shape)
    for level in range(0, nlevels):
        scale, ssim_mean, ssim_map = ssim(
            img0,
            img1,
            sigma=sigma,
            L=L,
            K=K,
            scale=sigma,
            alpha=0,
            beta_gamma=beta_gamma[level]
        )
        # Always take the direct ratio between original and downsampled maps
        # to prevent resizing mismatch for odd sizes
        ratio = original_shape / np.array(ssim_map.shape)
        scales[level] = scale * ratio[0]
        maps[level] = ndimage.zoom(ssim_map, ratio, prefilter=False, order=0)

        if level == nlevels - 1:
            break
        # Downsample (using ndimage.zoom to prevent sampling bias)
        # Images become half the size
        img0 = ndimage.zoom(img0, 0.5)
        img1 = ndimage.zoom(img1, 0.5)

    map = luminance * np.nanprod(maps, axis=0)
    ms_ssim_mean = np.nanmean(map)
    return scales, ms_ssim_mean, map
Ejemplo n.º 37
0
def stack_prod(arrs, nodata=None):
    """see stack_stats"""
    a = check_stack(arrs)
    if nodata is not None:
        a = mask_stack(a, nodata=nodata)
    return np.nanprod(a, axis=0)