Beispiel #1
0
def test_memory_leak():
    import resource

    arr = np.arange(1).reshape((1, 1))

    starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    for i in range(1000):
        for axis in [None, 0, 1]:
            bn.nansum(arr, axis=axis)
            bn.nanargmax(arr, axis=axis)
            bn.nanargmin(arr, axis=axis)
            bn.nanmedian(arr, axis=axis)
            bn.nansum(arr, axis=axis)
            bn.nanmean(arr, axis=axis)
            bn.nanmin(arr, axis=axis)
            bn.nanmax(arr, axis=axis)
            bn.nanvar(arr, axis=axis)

    ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    diff = ending - starting
    diff_bytes = diff * resource.getpagesize()
    print(diff_bytes)
    # For 1.3.0 release, this had value of ~100kB
    assert diff_bytes == 0
Beispiel #2
0
    def _gettoptrace(self, lpars, t2p, classifier, plusprob):
        """
        Prepare the contents of the top trace
        :return: None
        """

        # Only add value if top trace is ripple or photometry
        if isinstance(lpars['top-trace'], str):
            if lpars['top-trace'][:4] == 'phot':
                phot = t2p.photometry(tracetype=lpars['top-tracetype'])
                if len(phot) > 0:
                    phot -= np.min(phot)
                    phot /= np.max(phot)
                    self._toptraces['photometry'] = phot
            elif lpars['top-trace'][:3] == 'rip':
                ripple = t2p.ripple()
                if len(ripple) > 0:
                    ripple -= np.min(ripple)
                    ripple /= 80
                    ripple = np.clip(ripple, 0, 1)
                    self._toptraces['ripple'] = ripple
            elif lpars['top-trace'][:3] == 'tem':
                self._toptraces['pop-activity'] = \
                    classifier['priors']['plus']/plusprob
                print(nanmax(classifier['priors']['plus'][10000:]/plusprob),
                      plusprob, nanmax(classifier['priors']['plus'][10000:]))
Beispiel #3
0
def test_memory_leak() -> None:
    import resource

    arr = np.arange(1).reshape((1, 1))

    n_attempts = 3
    results = []

    for _ in range(n_attempts):
        starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

        for _ in range(1000):
            for axis in [None, 0, 1]:
                bn.nansum(arr, axis=axis)
                bn.nanargmax(arr, axis=axis)
                bn.nanargmin(arr, axis=axis)
                bn.nanmedian(arr, axis=axis)
                bn.nansum(arr, axis=axis)
                bn.nanmean(arr, axis=axis)
                bn.nanmin(arr, axis=axis)
                bn.nanmax(arr, axis=axis)
                bn.nanvar(arr, axis=axis)

        ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

        diff = ending - starting
        diff_bytes = diff * resource.getpagesize()
        # For 1.3.0 release, this had value of ~100kB
        if diff_bytes:
            results.append(diff_bytes)
        else:
            break

    assert len(results) < n_attempts
    def fit(self, X, y):
        X_y = self._check_params(X, y)
        self.X = X_y[0]
        self.y = X_y[1].reshape((-1, 1))
        n, p = X.shape

        S = []    # list of selected features
        F = range(p)    # list of unselected features

        if self.n_features != 'auto':
            feature_mi_matrix = np.zeros((self.n_features, p))
        else:
            feature_mi_matrix = np.zeros((n, p))
        feature_mi_matrix[:] = np.nan
        S_mi = []

        # Find the first feature
        k_min = 3
        range_k = 7
        xy_MI = np.empty((range_k, p))
        for i in range(range_k):
            xy_MI[i, :] = self._get_first_mi_vector(i + k_min)
        xy_MI = bn.nanmedian(xy_MI, axis=0)

        S, F = self._add_remove(S, F, bn.nanargmax(xy_MI))
        S_mi.append(bn.nanmax(xy_MI))

        if self.verbose > 0:
            self._info_print(S, S_mi)

        # Find the next features
        if self.n_features == 'auto':
            n_features = np.inf
        else:
            n_features = self.n_features

        while len(S) < n_features:
            s = len(S) - 1
            feature_mi_matrix[s, F] = self._get_mi_vector(F, S[-1])
            fmm = feature_mi_matrix[:len(S), F]
            if bn.allnan(bn.nanmean(fmm, axis=0)):
                break
            MRMR = xy_MI[F] - bn.nanmean(fmm, axis=0)
            if np.isnan(MRMR).all():
                break
            selected = F[bn.nanargmax(MRMR)]
            S_mi.append(bn.nanmax(bn.nanmin(fmm, axis=0)))
            S, F = self._add_remove(S, F, selected)
            if self.verbose > 0:
                self._info_print(S, S_mi)
            if self.n_features == 'auto' and len(S) > 10:
                MI_dd = signal.savgol_filter(S_mi[1:], 9, 2, 1)
                if np.abs(np.mean(MI_dd[-5:])) < 1e-3:
                    break
        self.n_features_ = len(S)
        self.ranking_ = S
        self.mi_ = S_mi

        return self
def photo_gain(p, trans, photo, res, inf_gb=False):
    """
    Calculates the photosynthetic C gain of a plant, where the
    photosynthetic rate (A) is evaluated over the array of leaf water
    potentials (P) and, thus transpiration (E), and normalized by the
    instantaneous maximum A over the full range of E.

    Arguments:
    ----------
    p: recarray object or pandas series or class containing the data
        time step's met data & params

    trans: array
        transpiration [mol m-2 s-1], values depending on the possible
        leaf water potentials (P) and the Weibull parameters b, c

    photo: string
        either the Farquhar model for photosynthesis, or the Collatz
        model

    res: string
        either 'low' (default), 'med', or 'high' to run the optimising
        solver

    inf_gb: bool
        if True, gb is prescrived and very large

    Returns:
    --------
    gain: array
        unitless instantaneous photosynthetic gains for possible values
        of Ci minimized over the array of E

    A: array
        gross photosynthetic assimilation rate [umol m-2 s-1] for
        possible values of Ci minimized over the array of E (or P)

    Ci: array
        intercellular CO2 concentration [Pa] for which A(P) is minimized
        to be as close as possible to the A predicted by either the
        Collatz or the Farquhar photosynthetis model

    """

    # get all Cis(P)
    Ci, mask = Ci_sup_dem(p, trans, photo=photo, res=res, inf_gb=inf_gb)

    try:
        A_P = A_trans(p, trans[mask], Ci, inf_gb=inf_gb)  # A supply
        gain = A_P / bn.nanmax(A_P)  # photo gain, soil P excluded

        if bn.nanmax(A_P) < 0.:  # when resp >> An everywhere
            gain *= -1.

    except ValueError:  # if trans is "pre-opimised" for
        gain = 0.

    return gain, Ci, mask
Beispiel #6
0
def zproject(stack):
    try:
        zproj = bottleneck.nanmax(stack, axis=0)
    except MemoryError:
        nframes = stack.shape[0]
        nseqs = 32
        nsubseqs = int(nframes)/nseqs
        zproj = bottleneck.nanmax(np.array([
            bottleneck.nanmax(
                stack[nseq*nsubseqs:(nseq+1)*nsubseqs, :, :],
                axis=0)
            for nseq in range(nseqs)
        ]), axis=0)

    return zproj
def max_of_wave(f, x):
    data = []
    for i in range(len(Speed_Dating_df[f])):
        if Speed_Dating_df['wave'][i] == x:
            data.append(Speed_Dating_df[f][i])
    max_data = round(bn.nanmax(data), 2)
    return max_data
Beispiel #8
0
def calc_aspect(xs, ys):
    import bottleneck as bn

    points = np.vstack([xs, ys])
    cov = np.cov(points)
    w, v = np.linalg.eig(cov)
    return bn.nanmin(w) / bn.nanmax(w)
Beispiel #9
0
def mul_handle_nan(R, R1, R2, domain):
    if all(np.isfinite(R1)) and all(np.isfinite(R2)):
        return R
    RR = R.resolve()[0]
    R2_is_scalar = isscalar(R2)
    ind = logical_or(np.isnan(RR[0]), np.isnan(RR[1]))
    #        ind_z1 = logical_or(lb1 == 0, ub1 == 0)
    #        ind_z2 = logical_or(lb2 == 0, ub2 == 0)
    #        ind_i1 = logical_or(np.isinf(lb1), np.isinf(ub1))
    #        ind_i2 = logical_or(np.isinf(lb2), np.isinf(ub2))
    #        ind = logical_or(logical_and(ind_z1, ind_i2), logical_and(ind_z2, ind_i1))
    if any(ind):
        lb1, ub1 = R1
        lb2, ub2 = (R2, R2) if R2_is_scalar or R2.size == 1 else R2
        lb1, lb2, ub1, ub2 = lb1[ind], lb2[ind], ub1[ind], ub2[ind]
        R1, R2 = R1[:, ind], R2[:, ind]
        t = np.vstack((lb1 * lb2, ub1 * lb2, lb1 * ub2, ub1 * ub2))
        t_min, t_max = np.atleast_1d(nanmin(t, 0)), np.atleast_1d(nanmax(t, 0))

        # !!!!!!!!!!!!!!!!1 TODO: check it
        t = np.vstack((t_min, t_max))
        update_mul_inf_zero(R1, R2, t)
        t_min, t_max = t

        definiteRange_Tmp = \
        R.definiteRange if type(R.definiteRange) == bool or R.definiteRange.size == 1\
        else R.definiteRange[ind]
        R_Tmp_nan = boundsurf(surf({}, t_min), surf({}, t_max),
                              definiteRange_Tmp, domain)
        R = R_Tmp_nan if all(ind) \
        else boundsurf_join((ind, logical_not(ind)), (R_Tmp_nan, R.extract(logical_not(ind))))
    return R
Beispiel #10
0
def singleVar(content):
    return dict(min=nanmin(content),
                max=nanmax(content),
                mean=nanmean(content),
                median=nanmedian(content),
                valid=numpy.sum(numpy.isfinite(content)) * 100.0 /
                content.size)
Beispiel #11
0
def mul_handle_nan(R, R1, R2, domain):
    if all(np.isfinite(R1)) and all(np.isfinite(R2)):
        return R
    RR = R.resolve()[0]
    R2_is_scalar = isscalar(R2)
    ind = logical_or(np.isnan(RR[0]), np.isnan(RR[1]))
#        ind_z1 = logical_or(lb1 == 0, ub1 == 0)
#        ind_z2 = logical_or(lb2 == 0, ub2 == 0)
#        ind_i1 = logical_or(np.isinf(lb1), np.isinf(ub1))
#        ind_i2 = logical_or(np.isinf(lb2), np.isinf(ub2))
#        ind = logical_or(logical_and(ind_z1, ind_i2), logical_and(ind_z2, ind_i1))
    if any(ind):
        lb1, ub1 = R1
        lb2, ub2 = (R2, R2) if R2_is_scalar or R2.size == 1 else R2
        lb1, lb2, ub1, ub2 = lb1[ind], lb2[ind], ub1[ind], ub2[ind] 
        R1, R2 = R1[:, ind], R2[:, ind]
        t = np.vstack((lb1 * lb2, ub1 * lb2, lb1 * ub2, ub1 * ub2))
        t_min, t_max = np.atleast_1d(nanmin(t, 0)), np.atleast_1d(nanmax(t, 0))
        
        # !!!!!!!!!!!!!!!!1 TODO: check it
        t = np.vstack((t_min, t_max))
        update_mul_inf_zero(R1, R2, t)
        t_min, t_max = t
        
        definiteRange_Tmp = \
        R.definiteRange if type(R.definiteRange) == bool or R.definiteRange.size == 1\
        else R.definiteRange[ind]
        R_Tmp_nan = boundsurf(surf({}, t_min), surf({}, t_max), definiteRange_Tmp, domain)
        R = R_Tmp_nan if all(ind) \
        else boundsurf_join((ind, logical_not(ind)), (R_Tmp_nan, R.extract(logical_not(ind))))
    return R
Beispiel #12
0
def normalize(img, in_place=False, max_val=1):
    if in_place:
        img -= bn.nanmin(img)
    else:
        img = img - bn.nanmin(img)
    img /= max_val * bn.nanmax(img)
    return img
def computeResidualViscCoeffs(RES, QM, VFLW, DX, DZ, DXD, DZD, DX2, DZ2):

    # Compute a filter length...
    #DL = 0.5 * (DX + DZ)
    #DL = DX * DZ
    DXZ = DXD * DZD
    DL = mt.sqrt(DXZ)

    # Compute absolute value of residuals
    ARES = np.abs(RES)

    # Normalize the residuals (U and W only!)
    for vv in range(2):
        if QM[vv] > 0.0:
            ARES[:, vv] *= (1.0 / QM[vv])
        else:
            ARES[:, vv] *= 0.0

    # Get the maximum in the residuals (unit = 1/s)
    QRES_MAX = DXZ * bn.nanmax(ARES, axis=1)

    # Compute flow speed plus sound speed coefficients
    QMAX = DL * VFLW
    #QMAX = bn.nanmax(DL * VWAV) # USE THE TOTAL MAX NORM

    # Limit DynSGS to upper bound
    compare = np.stack((QRES_MAX, QMAX), axis=1)
    QRES_CF = bn.nanmin(compare, axis=1)

    return (np.expand_dims(QRES_CF, 1), np.expand_dims(QMAX, 1))
Beispiel #14
0
def scale(values, min=0, max=1):
    """Return values scaled to [min, max]"""
    minval = np.float_(bn.nanmin(values))
    ptp = bn.nanmax(values) - minval
    if ptp == 0:
        return np.clip(values, min, max)
    return (-minval + values) / ptp * (max - min) + min
Beispiel #15
0
def spline_set_knots(x, num_knots, min_points_per_knot=3):

	knots = np.linspace(nanmin(x), nanmax(x), num_knots+2)

	hist, bin_edges = np.histogram(x, bins=knots, normed=False)

	print(knots)
	print(hist)
	print(bin_edges)

	indx = hist > min_points_per_knot
	bin_edges = bin_edges[indx]

	hist, bin_egdes = np.histogram(x, bins=bin_edges, normed=False)

	print(knots)
	print(hist)
	print(bin_edges)

	# Remove knots if there is not at least 3 points between them:
	newknots = array([], dtype='float64')
	for i in range(len(knots)-1):
		indx_data_between_knots = (knots[i] < x) & (x < knots[i+1])
		if sum(indx_data_between_knots) > min_points_per_knot:
			newknots = append(newknots, knots[i])

	knots = newknots
Beispiel #16
0
	def get_probabilities(self, store=True):
		"""
		Get the probabilities associated with each feature. This technique
		uses the max across probabilities to form the global probabilities.
		This method should be called after fitting the SP.
		
		@param store: If True, the probabilities are stored internally. Set to
		False to reduce memory.
		
		@return: Return the probabilities.
		"""
		
		# Get the probabilities
		prob = np.zeros(self.ninputs)
		for i in xrange(self.ninputs):
			# Find all of the potential synapses for this input
			valid = self.syn_map == i
			
			# Find the max permanence across each of the potential synapses
			try:
				prob[i] = bn.nanmax(self.p[valid])
			except ValueError:
				prob[i] = 0. # Occurs for missing connections
		
		# Store the probabilities
		if store: self.prob = prob
		
		return prob
Beispiel #17
0
def updateNodes(nodesToUpdate, fo):
    if len(nodesToUpdate) == 0: return
    a_tmp = array([node.a for node in nodesToUpdate])
    Tmp = a_tmp
    Tmp[Tmp>fo] = fo                

    o_tmp = array([node.o for node in nodesToUpdate])
    Tmp -= o_tmp
    Tmp[Tmp<1e-300] = 1e-300
    Tmp[o_tmp>fo] = nan
    tnlh_all_new =  - log2(Tmp)
    
    del Tmp, a_tmp
    
    tnlh_all_new += vstack([node.tnlhf for node in nodesToUpdate])#tnlh_fixed[ind_update]
    
    tnlh_curr_best = nanmin(tnlh_all_new, 1)

    o_tmp[o_tmp > fo] = -inf
    M = atleast_1d(nanmax(o_tmp, 1))
    for j, node in enumerate(nodesToUpdate): 
        node.fo = fo
        node.tnlh_curr = tnlh_all_new[j]
        node.tnlh_curr_best = tnlh_curr_best[j]
        node.th_key = M[j]
def getStatsTS(X, Y, quantile=10, window=500, minCnt=250):
    """
    X: Input factor, shape should be 40320*1082
    Y: Existing factor, price
    Calculate the return of 10, 20 ,30 by
    Standardized Return_i = (Price_t+i-Price_t)/Price_t/i
    """
    def calcFwdRet(price, window=30):
        """
        """
        fwd = np.roll(price, -window, axis=0)
        fwd[-window:, :] = np.nan

        return fwd / price - 1

    print('Now Calculating IC and IR matrix, start counting...')
    t0 = time.time()
    X = np.asarray(X)
    Y = np.asarray(Y)
    Y_ = np.zeros(Y.shape)
    for i in range(len(Y) - 30):
        for j in range(Y.shape[1]):
            Y_[i, j] = (Y[i + 30, j] - Y[i, j]) / Y[i, j] / 30

    Y = Y_
    if X.shape != Y.shape:
        print(X.shape)
        print(Y.shape)
        raise
    N = len(X)
    IC = np.zeros((N, ))

    bottom = 1.0 / quantile
    top = 1 - bottom

    # ts rank
    X = bn.move_rank(X, window=window, min_count=minCnt, axis=0)
    print(np.isnan(X).sum())
    # norm to [0, 1]
    X = 0.5 * (X + 1)

    # get common data
    X = np.where((~np.isnan(X) & (~np.isnan(Y))), X, np.nan)
    Y = np.where((~np.isnan(X) & (~np.isnan(Y))), Y, np.nan)
    # cross-rank Y
    Y_rk = bn.nanrankdata(Y, axis=1)
    Y_rk /= bn.nanmax(Y_rk, axis=1)[:, np.newaxis]

    # ls
    LS = np.nanmean(np.where(X > top, Y, np.nan), axis=1) \
         - np.nanmean(np.where(X < bottom, Y, np.nan), axis=1)

    # Loop
    for ii in range(N):
        IC[ii] = np.corrcoef(X[ii][~np.isnan(X[ii])],
                             Y_rk[ii][~np.isnan(Y_rk[ii])])[0, 1]

    t1 = time.time()
    print("total time used for IC and LS matrix calculation is:", (t1 - t0))
    return IC, LS
Beispiel #19
0
    def maximum(self, domain, domain_ind = slice(None)):
        c = self.c
        oovars = set(self.d.keys()) | set(self.d2.keys())
        Vals = domain.values()
        n = np.asarray(Vals[0][0] if type(Vals) == list else next(iter(Vals))[0]).size
        active_domain_ind = type(domain_ind)==np.ndarray
        r = np.zeros(domain_ind.size if type(domain_ind)==np.ndarray else n) + c
        for k in oovars:
            l, u = domain[k][0][domain_ind], domain[k][1][domain_ind]
            d1, d2 = self.d.get(k, 0.0), self.d2.get(k, None)
            if active_domain_ind:
                if type(d1) == np.ndarray and d1.size != 1:
                    d1 = d1[domain_ind]
                if type(d2) == np.ndarray and d2.size != 1:
                    d2 = d2[domain_ind]
            if d2 is None:
                r += where(d1 < 0, l, u) * d1
                continue
            rr = np.vstack(((d2 * l + d1)*l, (d2*u + d1)*u))
#            rr.sort(axis=0)
#            r_min, r_max = rr
            r_max = nanmax(rr, axis=0)
            tops = -d1 / (2.0 * d2)
            ind_inside = logical_and(l < tops,  tops < u)
            if any(ind_inside):
                top_vals = (d2*tops + d1) * tops
                ind_m = logical_and(ind_inside, r_max<top_vals)
                r_max = where(ind_m, top_vals, r_max)
            r += r_max
        return r
Beispiel #20
0
    def get_probabilities(self, store=True):
        """
		Get the probabilities associated with each feature. This technique
		uses the max across probabilities to form the global probabilities.
		This method should be called after fitting the SP.
		
		@param store: If True, the probabilities are stored internally. Set to
		False to reduce memory.
		
		@return: Return the probabilities.
		"""

        # Get the probabilities
        prob = np.zeros(self.ninputs)
        for i in xrange(self.ninputs):
            # Find all of the potential synapses for this input
            valid = self.syn_map == i

            # Find the max permanence across each of the potential synapses
            try:
                prob[i] = bn.nanmax(self.p[valid])
            except ValueError:
                prob[i] = 0.  # Occurs for missing connections

        # Store the probabilities
        if store: self.prob = prob

        return prob
Beispiel #21
0
 def maximum(self, domain, domain_ind=slice(None)):
     c = self.c
     oovars = set(self.d.keys()) | set(self.d2.keys())
     Vals = domain.values()
     n = np.asarray(Vals[0][0] if type(Vals) ==
                    list else next(iter(Vals))[0]).size
     active_domain_ind = type(domain_ind) == np.ndarray
     r = np.zeros(domain_ind.size if type(domain_ind) ==
                  np.ndarray else n) + c
     for k in oovars:
         l, u = domain[k][0][domain_ind], domain[k][1][domain_ind]
         d1, d2 = self.d.get(k, 0.0), self.d2.get(k, None)
         if active_domain_ind:
             if type(d1) == np.ndarray and d1.size != 1:
                 d1 = d1[domain_ind]
             if type(d2) == np.ndarray and d2.size != 1:
                 d2 = d2[domain_ind]
         if d2 is None:
             r += where(d1 < 0, l, u) * d1
             continue
         rr = np.vstack(((d2 * l + d1) * l, (d2 * u + d1) * u))
         #            rr.sort(axis=0)
         #            r_min, r_max = rr
         r_max = nanmax(rr, axis=0)
         tops = -d1 / (2.0 * d2)
         ind_inside = logical_and(l < tops, tops < u)
         if any(ind_inside):
             top_vals = (d2 * tops + d1) * tops
             ind_m = logical_and(ind_inside, r_max < top_vals)
             r_max = where(ind_m, top_vals, r_max)
         r += r_max
     return r
Beispiel #22
0
def scale(values, min=0, max=1):
    """Return values scaled to [min, max]"""
    minval = np.float_(bn.nanmin(values))
    ptp = bn.nanmax(values) - minval
    if ptp == 0:
        return np.clip(values, min, max)
    return (-minval + values) / ptp * (max - min) + min
Beispiel #23
0
def updateNodes(nodesToUpdate, fo):
    if len(nodesToUpdate) == 0: return
    a_tmp = array([node.a for node in nodesToUpdate])
    Tmp = a_tmp
    Tmp[Tmp > fo] = fo

    o_tmp = array([node.o for node in nodesToUpdate])
    Tmp -= o_tmp
    Tmp[Tmp < 1e-300] = 1e-300
    Tmp[o_tmp > fo] = nan
    tnlh_all_new = -log2(Tmp)

    del Tmp, a_tmp

    tnlh_all_new += vstack([node.tnlhf for node in nodesToUpdate
                            ])  #tnlh_fixed[ind_update]

    tnlh_curr_best = nanmin(tnlh_all_new, 1)

    o_tmp[o_tmp > fo] = -inf
    M = atleast_1d(nanmax(o_tmp, 1))
    for j, node in enumerate(nodesToUpdate):
        node.fo = fo
        node.tnlh_curr = tnlh_all_new[j]
        node.tnlh_curr_best = tnlh_curr_best[j]
        node.th_key = M[j]
Beispiel #24
0
 def quickMinMax(self, data):
     """
     Estimate the min/max values of *data* by subsampling.
     Returns [(min, max), ...] with one item per channel
     """
     while data.size > 1e6:
         ax = np.argmax(data.shape)
         sl = [slice(None)] * data.ndim
         sl[ax] = slice(None, None, 2)
         data = data[sl]
         
     cax = self.axes['c']
     if cax is None:
         return [(float(nanmin(data)), float(nanmax(data)))]
     else:
         return [(float(nanmin(data.take(i, axis=cax))), 
                  float(nanmax(data.take(i, axis=cax)))) for i in range(data.shape[-1])]
Beispiel #25
0
def _phase3(self):
    """
	Normal phase 3, but with tracking the boost changes. Double commented lines
	are new.
	"""

    # Update permanences
    self.p = np.clip(
        self.p + (self.c_pupdate * self.y[:, 0:1] * self.x[self.syn_map] -
                  self.pdec * self.y[:, 0:1]), 0, 1)

    if self.disable_boost is False:
        # Update the boosting mechanisms
        if self.global_inhibition:
            min_dc = np.zeros(self.ncolumns)
            min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
        else:
            min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)

        ## Save pre-overlap boost info
        boost = list(self.boost)

        # Update boost
        self._update_active_duty_cycle()
        self._update_boost(min_dc)
        self._update_overlap_duty_cycle()

        ## Write out overlap boost changes
        with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
            writer = csv.writer(f)
            writer.writerow([self.iter, bn.nanmean(boost != self.boost)])

        # Boost permanences
        mask = self.overlap_dc < min_dc
        mask.resize(self.ncolumns, 1)
        self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)

        ## Write out permanence boost info
        with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
         as f:
            writer = csv.writer(f)
            writer.writerow([self.iter, bn.nanmean(mask)])

    # Trim synapses
    if self.trim is not False:
        self.p[self.p < self.trim] = 0
Beispiel #26
0
 def quickMinMax(self, data):
     """
     Estimate the min/max values of *data* by subsampling.
     Returns [(min, max), ...] with one item per channel
     """
     while data.size > 1e6:
         ax = np.argmax(data.shape)
         sl = [slice(None)] * data.ndim
         sl[ax] = slice(None, None, 2)
         data = data[sl]
         
     cax = self.axes['c']
     if cax is None:
         return [(float(nanmin(data)), float(nanmax(data)))]
     else:
         return [(float(nanmin(data.take(i, axis=cax))), 
                  float(nanmax(data.take(i, axis=cax)))) for i in range(data.shape[-1])]
Beispiel #27
0
 def callback(*_, **kwargs):
     pvo = kwargs['cb_info'][1]
     # pvo._args['timestamp'] = _time.time()
     tstamps[pvo.index] = pvo.timestamp
     maxi = _bn.nanmax(tstamps)
     mini = _bn.nanmin(tstamps)
     if (maxi - mini) < max_spread:
         ready_evt.set()
Beispiel #28
0
def _phase3(self):
	"""
	Normal phase 3, but with tracking the boost changes. Double commented lines
	are new.
	"""
	
	# Update permanences
	self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
		self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
	
	if self.disable_boost is False:
		# Update the boosting mechanisms
		if self.global_inhibition:
			min_dc = np.zeros(self.ncolumns)
			min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
		else:
			min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
		
		## Save pre-overlap boost info
		boost = list(self.boost)
		
		# Update boost
		self._update_active_duty_cycle()
		self._update_boost(min_dc)
		self._update_overlap_duty_cycle()
	
		## Write out overlap boost changes
		with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
	
		# Boost permanences
		mask = self.overlap_dc < min_dc
		mask.resize(self.ncolumns, 1)
		self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
	
		## Write out permanence boost info
		with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
			as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(mask)])
	
	# Trim synapses
	if self.trim is not False:
		self.p[self.p < self.trim] = 0
def get_levels(img):
    """ Compute levels. Account for NaN values. """
    while img.size > 2**16:
        img = img[::2, ::2]
    mn, mx = bottleneck.nanmin(img), bottleneck.nanmax(img)
    if mn == mx:
        mn = 0
        mx = 255
    return [mn, mx]
Beispiel #30
0
    def compute(self, today, assets, out, data):
        drawdowns = fmax.accumulate(data, axis=0) - data
        drawdowns[isnan(drawdowns)] = NINF
        drawdown_ends = nanargmax(drawdowns, axis=0)

        # TODO: Accelerate this loop in Cython or Numba.
        for i, end in enumerate(drawdown_ends):
            peak = nanmax(data[:end + 1, i])
            out[i] = (peak - data[end, i]) / data[end, i]
def least_square_method(dspt):
    npol = 6
    com = np.array([bn.nanmean(dspt.lon), bn.nanmean(dspt.lat)])
    timeseries = False
    ncc = dspt.lon.size
    dlon = []
    dlat = []
    for i in range(ncc):
        # haversine(p1,p2)
        dlon.append(
            haversine([dspt.lon[i], com[1]], com) * 1000 *
            np.sign(dspt.lon[i] - com[0]))
        dlat.append(
            haversine([com[0], dspt.lat[i]], com) * 1000 *
            np.sign(dspt.lat[i] - com[1]))

    dlon = np.array(dlon)
    dlat = np.array(dlat)
    if not timeseries:
        R = np.mat(np.vstack((np.ones((ncc)), dlon, dlat)).T)
        u0 = np.mat(dspt.u.values).T
        v0 = np.mat(dspt.v.values).T

        if (np.isnan(u0).sum() == 0) & (np.isnan(v0).sum()
                                        == 0) & (np.isnan(R).sum() == 0):
            A, _, _, _ = la.lstsq(R, u0)
            B, _, _, _ = la.lstsq(R, v0)
        else:
            A = np.nan * np.ones(ncc)
            B = np.nan * np.ones(ncc)

    points = np.vstack([dlon, dlat])
    if (np.isfinite(dlon).sum() == npol) and (np.isfinite(dlat).sum() == npol):
        # careful with nans
        cov = np.cov(points)
        w, v = np.linalg.eig(cov)
        aspect = bn.nanmin(w) / bn.nanmax(w)

        if aspect < 0.99:
            ind = bn.nanargmax(w)
            angle = np.arctan(v[ind, 1] / v[ind, 0]) * 180 / np.pi
            if (angle < 0):
                angle += 360.
        else:
            angle = np.nan
    else:
        aspect = np.nan
        angle = np.nan

    dspt['ux'] = float(A[1])
    dspt['uy'] = float(A[2])
    dspt['vx'] = float(B[1])
    dspt['vy'] = float(B[2])
    dspt['aspect'] = aspect
    dspt['angle'] = angle

    return dspt
Beispiel #32
0
    def compute(self, today, assets, out, data):
        drawdowns = fmax.accumulate(data, axis=0) - data
        drawdowns[isnan(drawdowns)] = NINF
        drawdown_ends = nanargmax(drawdowns, axis=0)

        # TODO: Accelerate this loop in Cython or Numba.
        for i, end in enumerate(drawdown_ends):
            peak = nanmax(data[:end + 1, i])
            out[i] = (peak - data[end, i]) / data[end, i]
Beispiel #33
0
def mul_interval(self, other, isOtherOOFun, Prod, domain, dtype):

    lb1_ub1, definiteRange = self._interval(domain, dtype, ia_surf_level=2)

    if isOtherOOFun:
        lb2_ub2, definiteRange2 = other._interval(domain,
                                                  dtype,
                                                  ia_surf_level=2)
        definiteRange = logical_and(definiteRange, definiteRange2)
    else:
        lb2_ub2 = other

    if type(lb2_ub2) in (boundsurf,
                         boundsurf2) or type(lb1_ub1) in (boundsurf,
                                                          boundsurf2):
        if type(lb2_ub2) in (boundsurf,
                             boundsurf2) and type(lb1_ub1) in (boundsurf,
                                                               boundsurf2):
            resolveSchedule = domain.resolveSchedule.get(Prod, ())
            r = lb1_ub1.__mul__(lb2_ub2, resolveSchedule)
        else:
            r = lb1_ub1 * lb2_ub2
        r.definiteRange = definiteRange
        return r, r.definiteRange
    elif isscalar(other) or (type(other) == ndarray and other.size == 1):
        r = lb1_ub1 * other if other >= 0 else lb1_ub1[::-1] * other
        return r, definiteRange

    lb1, ub1 = lb1_ub1
    lb2, ub2 = lb2_ub2 if isOtherOOFun else (other, other)

    firstPositive = all(lb1 >= 0)
    firstNegative = all(ub1 <= 0)
    secondPositive = all(lb2 >= 0)
    secondNegative = all(ub2 <= 0)
    if firstPositive and secondPositive:
        t = vstack((lb1 * lb2, ub1 * ub2))
    elif firstNegative and secondNegative:
        t = vstack((ub1 * ub2, lb1 * lb2))
    elif firstPositive and secondNegative:
        t = vstack((lb2 * ub1, lb1 * ub2))
    elif firstNegative and secondPositive:
        t = vstack((lb1 * ub2, lb2 * ub1))
        #t = vstack((lb1 * other, ub1 * other) if other >= 0 else (ub1 * other, lb1 * other))
    elif isOtherOOFun:
        t = vstack(
            (lb1 * lb2, ub1 * lb2, lb1 * ub2, ub1 * ub2))  # TODO: improve it
        t = vstack((nanmin(t, 0), nanmax(t, 0)))
    else:
        t = vstack((lb1 * other, ub1 * other))  # TODO: improve it
        t.sort(axis=0)

    #assert isinstance(t_min, ndarray) and isinstance(t_max, ndarray), 'Please update numpy to more recent version'
    if isOtherOOFun:
        update_mul_inf_zero(lb1_ub1, lb2_ub2, t)

    return t, definiteRange
Beispiel #34
0
def factor_fillna_to_max(factor, na_benchmark):
    x_m = factor.values.copy()
    row_max = bn.nanmax(x_m, axis=1).reshape(-1, 1)
    max_mat = np.hstack([
        row_max,
    ] * x_m.shape[1])
    loc = np.isnan(x_m) & ~np.isnan(na_benchmark.values)
    x_m[loc] = max_mat[loc]
    return pd.DataFrame(x_m, columns=factor.columns, index=factor.index)
Beispiel #35
0
 def quickMinMax(self, data):
     """
     Estimate the min/max values of *data* by subsampling.
     """
     while data.size > 1e6:
         ax = np.argmax(data.shape)
         sl = [slice(None)] * data.ndim
         sl[ax] = slice(None, None, 2)
         data = data[sl]
     return nanmin(data), nanmax(data)
Beispiel #36
0
 def quickMinMax(self, data):
     """
     Estimate the min/max values of *data* by subsampling.
     """
     while data.size > 1e6:
         ax = np.argmax(data.shape)
         sl = [slice(None)] * data.ndim
         sl[ax] = slice(None, None, 2)
         data = data[sl]
     return nanmin(data), nanmax(data)
Beispiel #37
0
    def _phase3(self):
        """
		Execute phase 3 of the SP region. This phase is used to conduct
		learning.
		
		Note - This should only be called after phase 2 has been called.
		"""

        # Notes:
        # 1. logical_not is faster than invert
        # 2. Multiplication is faster than bitwise_and which is faster than
        #    logical_not
        # 3. Slightly different format than original definition
        #    (in the comment) to get even more speed benefits
        """
		x = self.x[self.syn_map]
		self.p = np.clip(self.p + self.y[:, 0:1] * (x * self.pinc -
			np.logical_not(x) * self.pdec), 0, 1)
		"""
        self.p = np.clip(
            self.p + (self.c_pupdate * self.y[:, 0:1] * self.x[self.syn_map] -
                      self.pdec * self.y[:, 0:1]), 0, 1)

        if self.disable_boost is False:
            # Update the boosting mechanisms
            if self.global_inhibition:
                min_dc = np.zeros(self.ncolumns)
                min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
            else:
                min_dc = self.c_mdc * bn.nanmax(
                    self.neighbors * self.active_dc, 1)
            self._update_active_duty_cycle()
            self._update_boost(min_dc)
            self._update_overlap_duty_cycle()

            # Boost permanences
            mask = self.overlap_dc < min_dc
            mask.resize(self.ncolumns, 1)
            self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)

        # Trim synapses
        if self.trim is not False:
            self.p[self.p < self.trim] = 0
Beispiel #38
0
    def _phase2(self):
        """
		Execute phase 2 of the SP region. This phase is used to compute the
		active columns.
		
		Note - This should only be called after phase 1 has been called and
		after the inhibition radius and neighborhood have been updated.
		"""

        # Shift the outputs
        self.y[:, 1:] = self.y[:, :-1]
        self.y[:, 0] = 0

        # Calculate k
        #   - For a column to be active its overlap must be above the overlap
        #     value of the k-th largest column in its neighborhood.
        k = self._get_num_cols()

        if self.global_inhibition:
            # The neighborhood is all columns, thus the set of active columns
            # is simply columns that have an overlap above the k-th largest
            # in the entire region

            # Compute the winning column indexes
            if self.learn:
                # Randomly break ties
                ix = bn.argpartsort(
                    -self.overlap[:, 0] -
                    self.prng.uniform(.1, .2, self.ncolumns), k)[:k]
            else:
                # Choose the same set of columns each time
                ix = bn.argpartsort(-self.overlap[:, 0], k)[:k]

            # Set the active columns
            self.y[ix, 0] = self.overlap[ix, 0] > 0
        else:
            # The neighborhood is bounded by the inhibition radius, therefore
            # each column's neighborhood must be considered

            for i in xrange(self.ncolumns):
                # Get the neighbors
                ix = np.where(self.neighbors[i])[0]

                # Compute the minimum top overlap
                if ix.shape[0] <= k:
                    # Desired number of candidates is at or below the desired
                    # activity level, so find the overall max
                    m = max(bn.nanmax(self.overlap[ix, 0]), 1)
                else:
                    # Desired number of candidates is above the desired
                    # activity level, so find the k-th largest
                    m = max(-bn.partsort(-self.overlap[ix, 0], k + 1)[k], 1)

                # Set the column activity
                if self.overlap[i, 0] >= m: self.y[i, 0] = True
Beispiel #39
0
	def _phase3(self):
		"""
		Execute phase 3 of the SP region. This phase is used to conduct
		learning.
		
		Note - This should only be called after phase 2 has been called.
		"""
		
		# Notes:
		# 1. logical_not is faster than invert
		# 2. Multiplication is faster than bitwise_and which is faster than
		#    logical_not
		# 3. Slightly different format than original definition
		#    (in the comment) to get even more speed benefits
		"""
		x = self.x[self.syn_map]
		self.p = np.clip(self.p + self.y[:, 0:1] * (x * self.pinc -
			np.logical_not(x) * self.pdec), 0, 1)
		"""
		self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
			self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
		
		if self.disable_boost is False:
			# Update the boosting mechanisms
			if self.global_inhibition:
				min_dc = np.zeros(self.ncolumns)
				min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
			else:
				min_dc = self.c_mdc * bn.nanmax(self.neighbors *
					self.active_dc, 1)
			self._update_active_duty_cycle()
			self._update_boost(min_dc)
			self._update_overlap_duty_cycle()
			
			# Boost permanences
			mask = self.overlap_dc < min_dc
			mask.resize(self.ncolumns, 1)
			self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
		
		# Trim synapses
		if self.trim is not False:
			self.p[self.p < self.trim] = 0
def _normalize(vec, start, end):
    vec -= bn.nanmin(vec)

    v_max = bn.nanmax(vec)
    if v_max != 0:
        vec /= v_max

    vec *= (end - start) * 0.2
    vec += start + (end - start) * 0.05

    return vec
Beispiel #41
0
	def _phase2(self):
		"""
		Execute phase 2 of the SP region. This phase is used to compute the
		active columns.
		
		Note - This should only be called after phase 1 has been called and
		after the inhibition radius and neighborhood have been updated.
		"""
		
		# Shift the outputs
		self.y[:, 1:] = self.y[:, :-1]
		self.y[:, 0] = 0
		
		# Calculate k
		#   - For a column to be active its overlap must be above the overlap
		#     value of the k-th largest column in its neighborhood.
		k = self._get_num_cols()
		
		if self.global_inhibition:
			# The neighborhood is all columns, thus the set of active columns
			# is simply columns that have an overlap above the k-th largest
			# in the entire region
			
			# Compute the winning column indexes
			if self.learn:				
				# Randomly break ties
				ix = bn.argpartsort(-self.overlap[:, 0] -
					self.prng.uniform(.1, .2, self.ncolumns), k)[:k]
			else:
				# Choose the same set of columns each time
				ix = bn.argpartsort(-self.overlap[:, 0], k)[:k]
			
			# Set the active columns
			self.y[ix, 0] = self.overlap[ix, 0] > 0
		else:
			# The neighborhood is bounded by the inhibition radius, therefore
			# each column's neighborhood must be considered
			
			for i in xrange(self.ncolumns):
				# Get the neighbors
				ix = np.where(self.neighbors[i])[0]
				
				# Compute the minimum top overlap
				if ix.shape[0] <= k:
					# Desired number of candidates is at or below the desired
					# activity level, so find the overall max
					m = max(bn.nanmax(self.overlap[ix, 0]), 1)
				else:
					# Desired number of candidates is above the desired
					# activity level, so find the k-th largest
					m = max(-bn.partsort(-self.overlap[ix, 0], k + 1)[k], 1)
				
				# Set the column activity
				if self.overlap[i, 0] >= m: self.y[i, 0] = True
Beispiel #42
0
def func13(o, a): 
    m, n = o.shape
    n /= 2
#    if case == 1:
#        U1, U2 = a[:, :n].copy(), a[:, n:] 
#        #TODO: mb use nanmax(concatenate((U1,U2),3),3) instead?
#        U1 = where(logical_or(U1<U2, isnan(U1)),  U2, U1)
#        return nanmin(U1, 1)
        
    L1, L2, U1, U2 = o[:, :n], o[:, n:], a[:, :n], a[:, n:] 
#    if case == 2:
    U = where(logical_or(U1<U2, isnan(U1)),  U2, U1)
    L = where(logical_or(L2<L1, isnan(L1)), L2, L1)
    return nanmax(U-L, 1)
Beispiel #43
0
    def __call__(self, data):
        """
        Remove columns with constant values from the data set and return
        the resulting data table.

        Parameters
        ----------
        data : an input data set
        """

        oks = bn.nanmin(data.X, axis=0) != bn.nanmax(data.X, axis=0)
        atts = [data.domain.attributes[i] for i, ok in enumerate(oks) if ok]
        domain = Orange.data.Domain(atts, data.domain.class_vars, data.domain.metas)
        return Orange.data.Table(domain, data)
Beispiel #44
0
def mul_interval(self, other, isOtherOOFun, Prod, domain, dtype):
    
    lb1_ub1, definiteRange = self._interval(domain, dtype, ia_surf_level = 2)

    if isOtherOOFun:
        lb2_ub2, definiteRange2 = other._interval(domain, dtype, ia_surf_level = 2)
        definiteRange = logical_and(definiteRange, definiteRange2)
    else:
        lb2_ub2 = other
        
    if type(lb2_ub2) in (boundsurf, boundsurf2) or type(lb1_ub1) in (boundsurf, boundsurf2):
        if type(lb2_ub2) in (boundsurf, boundsurf2) and type(lb1_ub1) in (boundsurf, boundsurf2):
            resolveSchedule = domain.resolveSchedule.get(Prod, ())
            r = lb1_ub1.__mul__(lb2_ub2, resolveSchedule)
        else:
            r = lb1_ub1 * lb2_ub2
        r.definiteRange = definiteRange
        return r, r.definiteRange
    elif isscalar(other) or (type(other) == ndarray and other.size == 1):
        r = lb1_ub1 * other if other >= 0 else lb1_ub1[::-1] * other
        return r, definiteRange
    
    lb1, ub1 = lb1_ub1
    lb2, ub2 = lb2_ub2 if isOtherOOFun else (other, other)
    
    firstPositive = all(lb1 >= 0)
    firstNegative = all(ub1 <= 0)
    secondPositive = all(lb2 >= 0)
    secondNegative = all(ub2 <= 0)
    if firstPositive and secondPositive:
        t= vstack((lb1 * lb2, ub1 * ub2))
    elif firstNegative and secondNegative:
        t = vstack((ub1 * ub2, lb1 * lb2))
    elif firstPositive and secondNegative:
        t = vstack((lb2 * ub1, lb1 * ub2))
    elif firstNegative and secondPositive:
        t = vstack((lb1 * ub2, lb2 * ub1))
        #t = vstack((lb1 * other, ub1 * other) if other >= 0 else (ub1 * other, lb1 * other))
    elif isOtherOOFun:
        t = vstack((lb1 * lb2, ub1 * lb2, lb1 * ub2, ub1 * ub2))# TODO: improve it
        t = vstack((nanmin(t, 0), nanmax(t, 0)))
    else:
        t = vstack((lb1 * other, ub1 * other))# TODO: improve it
        t.sort(axis=0)
        
    #assert isinstance(t_min, ndarray) and isinstance(t_max, ndarray), 'Please update numpy to more recent version'
    if isOtherOOFun:
        update_mul_inf_zero(lb1_ub1, lb2_ub2, t)
    
    return t, definiteRange
Beispiel #45
0
    def interval(domain, dtype):
        lb_ub, definiteRange = inp._interval(domain, dtype)
        lb, ub = lb_ub[0], lb_ub[1]
        ind1, ind2 = lb < 0.0, ub > 0.0
        ind = logical_and(ind1, ind2)
        tmp = vstack((lb, ub))
        TMP = func(tmp)
        t_min, t_max = atleast_1d(nanmin(TMP, 0)), atleast_1d(nanmax(TMP, 0))
        if any(ind):
            F0 = func(0.0)
            t_min[atleast_1d(logical_and(ind, t_min > F0))] = F0
            t_max[atleast_1d(logical_and(ind, t_max < F0))] = F0

        return vstack((t_min, t_max)), definiteRange
Beispiel #46
0
def div_interval(self, other, Div, domain, dtype):
    
    lb2_ub2, definiteRange2 = other._interval(domain, dtype, ia_surf_level = 2)

    secondIsBoundsurf = isinstance(lb2_ub2, boundsurf)
    
    lb1_ub1, definiteRange1 = self._interval(domain, dtype, ia_surf_level = 2)# if type(lb2_ub2)==ndarray else 1)
    firstIsBoundsurf = type(lb1_ub1) in (boundsurf, boundsurf2)
#    if type(lb1_ub1) == boundsurf2:
#        lb1_ub1 = lb1_ub1.to_linear()
    
    # TODO: mention in doc definiteRange result for 0 / 0
    definiteRange = logical_and(definiteRange1, definiteRange2)
    
    tmp = None
    if not firstIsBoundsurf and secondIsBoundsurf:
        # TODO: check handling zeros
        if not hasattr(other, '_inv'):
            other._inv = other ** -1 #1.0/other
#            other._inv.engine_convexity = other._inv.engine_monotonity = -1
        Tmp = pow_const_interval(other, other._inv, -1, domain, dtype)[0]
        if isinstance(Tmp, boundsurf):
            tmp = lb1_ub1 * Tmp#lb2_ub2 ** -1
    elif firstIsBoundsurf and not secondIsBoundsurf:# and (t1_positive or t1_negative or t2_positive or t2_negative):
        # TODO: handle zeros
        Tmp2 = 1.0 / lb2_ub2
        Tmp2.sort(axis=0)
        tmp = lb1_ub1 * Tmp2
        #tmp = lb1_ub1 * (1.0 / tmp2[::-1]) 
    elif firstIsBoundsurf and secondIsBoundsurf:
        tmp = lb1_ub1.__div__(lb2_ub2, domain.resolveSchedule.get(Div, ()))
    if tmp is not None:
        if type(tmp) in (boundsurf, boundsurf2):
            tmp.definiteRange = definiteRange
            return tmp, tmp.definiteRange
        else:
            return tmp, definiteRange

    tmp1 = lb1_ub1.resolve()[0] if firstIsBoundsurf else lb1_ub1

    tmp2 = lb2_ub2.resolve()[0] if secondIsBoundsurf else lb2_ub2

    lb1, ub1 = tmp1[0], tmp1[1]
    lb2, ub2 = tmp2[0], tmp2[1]

    tmp = vstack((td(lb1, lb2), td(lb1, ub2), td(ub1, lb2), td(ub1, ub2)))
    r = vstack((nanmin(tmp, 0), nanmax(tmp, 0)))
    update_div_zero(lb1, ub1, lb2, ub2, r)
    return r, definiteRange
Beispiel #47
0
def func11(y, e, nlhc, indTC, residual, o, a, _s, p): 
    m, n = y.shape
    if p.probType == "IP":
        w = arange(m)
        # TODO: omit recalculation from func1
        ind = nanargmin(a[:, 0:n] - o[:, 0:n] + a[:, n:] - o[:, n:], 1)
        sup_inf_diff = 0.5*(a[w, ind] - o[w, ind] + a[w, n+ind] - o[w, n+ind])
        diffao = a - o
        minres_ind = nanargmin(diffao, 1) 
        minres = diffao[w, minres_ind]
        complementary_minres = diffao[w, where(minres_ind<n, minres_ind+n, minres_ind-n)]
        volume = prod(e-y, 1)
        volumeResidual = volume * sup_inf_diff
        F = 0.25 * (a[w, ind] + o[w, ind] + a[w, n+ind] + o[w, n+ind])
        return [si(IP_fields, sup_inf_diff[i], minres[i], minres_ind[i], complementary_minres[i], y[i], e[i], o[i], a[i], _s[i], F[i], volume[i], volumeResidual[i]) for i in range(m)]
        
    else:
        
        residual = None
        tmp = asarray(a)-asarray(o)
        tmp[tmp<1e-300] = 1e-300
        nlhf = log2(tmp)#-log2(p.fTol)
#        nlhf[a==inf] = 1e300# to make it not inf and nan
#        nlhf[o==-inf] = 1e300# to make it not inf and nan
        if nlhf.ndim == 3: # in MOP
            nlhf = nlhf.sum(axis=1)
        
        if p.probType == "MOP":
            # make correct o,a wrt each target
            return [si(MOP_Fields, y[i], e[i], nlhf[i], 
                          nlhc[i] if nlhc is not None else None, 
                          indTC[i] if indTC is not None else None, 
                          residual[i] if residual is not None else None, 
                          [o[i][k] for k in range(p.nf)], [a[i][k] for k in range(p.nf)], 
                          _s[i]) for i in range(m)]
        else:
            s, q = o[:, 0:n], o[:, n:2*n]
            Tmp = nanmax(where(q<s, q, s), 1)
            
            nlhf[logical_and(isinf(a), isinf(nlhf))] = 1e300
            assert p.probType in ('GLP', 'NLP', 'NSP', 'SNLE', 'NLSP', 'MINLP')
        
#            residual = None

            return [si(Fields, Tmp[i], y[i], e[i], nlhf[i], 
                          nlhc[i] if nlhc is not None else None, 
                          indTC[i] if indTC is not None else None, 
                          residual[i] if residual is not None else None, 
                          o[i], a[i], _s[i]) for i in range(m)]
Beispiel #48
0
    def __div__(self, other, resolveSchedule=()):
        isBoundSurf = isinstance(other, boundsurf)
        assert isBoundSurf
        
        r = aux_mul_div_boundsurf((self, other), operator.truediv, resolveSchedule)
        
#        return r 
#        ind_inf_z = logical_or(logical_or(R2[0]==0, R2[1]==0), logical_or(isinf(R1[0]), isinf(R1[1])))
        #(R2[0]==0) | (R2[1]==0) | (isinf(R2[0])) | (isinf(R2[1])) | (isinf(R1[0])) | isinf(R1[1])
        
        isBoundsurf = isinstance(r, boundsurf)
        rr = r.resolve()[0] if isBoundsurf else r#[0]
        
#        import pylab, numpy
#        xx = numpy.linspace(-1, 0, 1000)
#        t=r.l.d.keys()[0]
#        tmp=r
#        pylab.plot(xx, tmp.l.d2.get(t, 0.0)*xx**2+ tmp.l.d.get(t, 0.0)*xx+ tmp.l.c, 'r')
#        pylab.plot(xx, tmp.u.d2.get(t, 0.0)*xx**2+ tmp.u.d.get(t, 0.0)*xx+ tmp.u.c, 'b')
#        pylab.grid()
#        pylab.show()
        
        
        # nans may be from other computations from a level below, although
        ind_nan = logical_or(isnan(rr[0]), isnan(rr[1]))
        if not any(ind_nan) or not isBoundsurf:
            return r #if isBoundsurf else rr

        Ind_finite = where(logical_not(ind_nan))[0]
        r_finite = r.extract(Ind_finite)
        ind_nan = where(ind_nan)[0]
        R1 = self.resolve()[0]
        R2 = other.resolve()[0]
        lb1, ub1, lb2, ub2 = R1[0, ind_nan], R1[1, ind_nan], R2[0, ind_nan], R2[1, ind_nan]
        tmp = np.vstack((td(lb1, lb2), td(lb1, ub2), td(ub1, lb2), td(ub1, ub2)))
        R = np.vstack((nanmin(tmp, 0), nanmax(tmp, 0)))
        update_div_zero(lb1, ub1, lb2, ub2, R)
        b = boundsurf(surf({}, R[0]), surf({}, R[1]), False, self.domain)
        r = boundsurf_join((ind_nan, Ind_finite), (b, r_finite))
        definiteRange = logical_and(self.definiteRange, other.definiteRange)
        r.definiteRange = definiteRange
        return r 
Beispiel #49
0
	def reconstruct_input(self, x=None):
		"""
		Reconstruct the original input using only the stored permanences and
		the set of active columns. The maximization of probabilities approach
		is used. This method must be called after fitting the SP.
		
		@param x: The set of active columns or None if the SP was never fitted.
		"""
		
		# Check input
		if x is None: x = self.column_activations
		if x is None: return None
		
		# Reshape x if needed
		ravel = False
		if len(x.shape) == 1:
			ravel = True
			x = x.reshape(1, x.shape[0])
		
		# Get the input mapping
		imap = [np.where(self.syn_map == i) for i in xrange(self.ninputs)]
		
		# Get the reconstruction
		x2 = np.zeros((x.shape[0], self.ninputs))
		for i, xi in enumerate(x):
			# Mask off permanences not relevant to this input
			y = self.p * xi.reshape(self.ncolumns, 1)
			
			# Remap permanences to input domain
			for j in xrange(self.ninputs):
				# Get the max probability across the current input space
				try:
					x2[i][j] = bn.nanmax(y[imap[j]])
				except ValueError:
					x2[i][j] = 0. # Occurs for missing connections
				
				# Threshold back to {0, 1}
				x2[i][j] = 1 if x2[i][j] >= self.syn_th else 0
		
		return x2 if not ravel else x2.ravel()
Beispiel #50
0
def pow_oofun_interval(self, other, domain, dtype): 
    # TODO: handle discrete cases
    lb1_ub1, definiteRange1 = self._interval(domain, dtype, ia_surf_level = 2)
    lb2_ub2, definiteRange2 = other._interval(domain, dtype, ia_surf_level = 2)
    if isinstance(lb1_ub1, boundsurf) or isinstance(lb2_ub2, boundsurf):
        r = (lb2_ub2 * lb1_ub1.log()).exp()
        return r, r.definiteRange
    
    lb1, ub1 = lb1_ub1#[0], lb1_ub1[1]
    lb2, ub2 = lb2_ub2#[0], lb2_ub2[1]
    T = vstack((lb1 ** lb2, lb1** ub2, ub1**lb2, ub1**ub2))
    t_min, t_max = nanmin(T, 0), nanmax(T, 0)
    definiteRange = logical_and(definiteRange1, definiteRange2)
    
    ind1 = lb1 < 0
    if any(ind1):
        definiteRange = logical_and(definiteRange, logical_not(ind1))
        ind2 = ub1 >= 0
        t_min[atleast_1d(logical_and(logical_and(ind1, ind2), logical_and(t_min > 0.0, ub2 > 0.0)))] = 0.0
        t_max[atleast_1d(logical_and(ind1, logical_not(ind2)))] = nan
        t_min[atleast_1d(logical_and(ind1, logical_not(ind2)))] = nan
    return vstack((t_min, t_max)), definiteRange
Beispiel #51
0
    def apply(self, experiment):
        """Applies the binning to an experiment.
        
        Parameters
        ----------
        experiment : Experiment
            the old_experiment to which this op is applied
            
        Returns
        -------
            a new experiment, the same as old_experiment but with a new
            column the same as the operation name.  The bool is True if the
            event's measurement in self.channel is greater than self.low and
            less than self.high; it is False otherwise.
        """
        if not experiment:
            raise util.CytoflowOpError("no experiment specified")
        
        if not self.name:
            raise util.CytoflowOpError("name is not set")
        
        if self.name in experiment.data.columns:
            raise util.CytoflowOpError("name {0} is in the experiment already"
                                  .format(self.name))
            
        if self.bin_count_name and self.bin_count_name in experiment.data.columns:
            raise util.CytoflowOpError("bin_count_name {0} is in the experiment already"
                                  .format(self.bin_count_name))
        
        if not self.channel:
            raise util.CytoflowOpError("channel is not set")
        
        if self.channel not in experiment.data.columns:
            raise util.CytoflowOpError("channel {0} isn't in the experiment"
                                  .format(self.channel))
              
        if self.num_bins is Undefined and self.bin_width is Undefined:
            raise util.CytoflowOpError("must set either bin number or width")
        
        if self.num_bins is Undefined \
           and not (self.scale == "linear" or self.scale == "log"):
            raise util.CytoflowOpError("Can only use bin_width with linear or log scale") 
        
        scale = util.scale_factory(self.scale, experiment, self.channel)
        scaled_data = scale(experiment.data[self.channel])
            
        channel_min = bn.nanmin(scaled_data)
        channel_max = bn.nanmax(scaled_data)
        
        num_bins = self.num_bins if self.num_bins is not Undefined else \
                   (channel_max - channel_min) / self.bin_width

        bins = np.linspace(start = channel_min, stop = channel_max,
                           num = num_bins)
            
        # bins need to be internal; drop the first and last one
        bins = bins[1:-1]
            
        new_experiment = experiment.clone()
        new_experiment.add_condition(self.name,
                                     "int",
                                     np.digitize(scaled_data, bins))
        
        # if we're log-scaled (for example), don't label data that isn't
        # showable on a log scale!
        new_experiment.data.ix[np.isnan(scaled_data), self.name] = np.NaN
        
        # keep track of the bins we used, for pretty plotting later.
        new_experiment.metadata[self.name]["bin_scale"] = self.scale
        new_experiment.metadata[self.name]["bins"] = bins
        
        if self.bin_count_name:
            # TODO - this is a HUGE memory hog?!
            agg_count = new_experiment.data.groupby(self.name).count()
            agg_count = agg_count[agg_count.columns[0]]
            
            # have to make the condition a float64, because if we're in log
            # space there may be events that have NaN as the bin number.
            
            new_experiment.add_condition(
                self.bin_count_name,
                "float64",
                new_experiment[self.name].map(agg_count))
        
        new_experiment.history.append(self.clone_traits())
        return new_experiment
Beispiel #52
0
def r14(p, nlhc, residual, definiteRange, y, e, vv, asdf1, C, r40, g, nNodes,  \
         r41, fTol, Solutions, varTols, _in, dataType, \
         maxNodes, _s, indTC, xRecord):

    isSNLE = p.probType in ('NLSP', 'SNLE')

    maxSolutions, solutions, coords = Solutions.maxNum, Solutions.solutions, Solutions.coords
    if len(p._discreteVarsNumList):
        y, e = adjustDiscreteVarBounds(y, e, p)

    
    o, a, r41 = r45(y, e, vv, p, asdf1, dataType, r41, nlhc)
    fo_prev = float(0 if isSNLE else min((r41, r40 - (fTol if maxSolutions == 1 else 0))))
    if fo_prev > 1e300:
        fo_prev = 1e300
    y, e, o, a, _s, indTC, nlhc, residual = func7(y, e, o, a, _s, indTC, nlhc, residual)    

    if y.size == 0:
        return _in, g, fo_prev, _s, Solutions, xRecord, r41, r40
    
    nodes = func11(y, e, nlhc, indTC, residual, o, a, _s, p)
    #nodes, g = func9(nodes, fo_prev, g, p)
    #y, e = func4(y, e, o, a, fo)
    

    if p.solver.dataHandling == 'raw':
        
        tmp = o.copy()
        tmp[tmp > fo_prev] = -inf
        M = atleast_1d(nanmax(tmp, 1))
        for i, node in enumerate(nodes):
            node.th_key = M[i]
            
        if not isSNLE:
            for node in nodes:
                node.fo = fo_prev       
        if nlhc is not None:
            for i, node in enumerate(nodes): node.tnlhf = node.nlhf + node.nlhc
        else:
            for i, node in enumerate(nodes): node.tnlhf = node.nlhf # TODO: improve it
            
        an = hstack((nodes, _in))
        
        #tnlh_fixed = vstack([node.tnlhf for node in an])
        tnlh_fixed_local = vstack([node.tnlhf for node in nodes])#tnlh_fixed[:len(nodes)]

        tmp = a.copy()

        
        tmp[tmp>fo_prev] = fo_prev
        tmp2 = tmp - o
        tmp2[tmp2<1e-300] = 1e-300
        tmp2[o > fo_prev] = nan
        tnlh_curr = tnlh_fixed_local - log2(tmp2)
        tnlh_curr_best = nanmin(tnlh_curr, 1)
        for i, node in enumerate(nodes):
            node.tnlh_curr = tnlh_curr[i]
            node.tnlh_curr_best = tnlh_curr_best[i]
        
        # TODO: use it instead of code above
        #tnlh_curr = tnlh_fixed_local - log2(where() - o)
    else:
        tnlh_curr = None
    
    # TODO: don't calculate PointVals for zero-p regions
    PointVals, PointCoords = getr4Values(vv, y, e, tnlh_curr, asdf1, C, p.contol, dataType, p) 

    if PointVals.size != 0:
        xk, Min = r2(PointVals, PointCoords, dataType)
    else: # all points have been removed by func7
        xk = p.xk
        Min = nan

    if r40 > Min:
        r40 = Min
        xRecord = xk.copy()# TODO: is copy required?
    if r41 > Min:
        r41 = Min
    
    fo = float(0 if isSNLE else min((r41, r40 - (fTol if maxSolutions == 1 else 0))))
        
    if p.solver.dataHandling == 'raw':
        
        if fo != fo_prev and not  isSNLE:
            fos = array([node.fo for node in an])
            
            #prev
            #ind_update = where(fos > fo + 0.01* fTol)[0]
            
            #new
            th_keys = array([node.th_key for node in an])
            delta_fos = fos - fo
            ind_update = where(10 * delta_fos > fos - th_keys)[0]
            
            nodesToUpdate = an[ind_update]
            update_nlh = True if ind_update.size != 0 else False
#                  print 'o MB:', float(o_tmp.nbytes) / 1e6
#                  print 'percent:', 100*float(ind_update.size) / len(an) 
            if update_nlh:
#                    from time import time
#                    tt = time()
                updateNodes(nodesToUpdate, fo)
#                    if not hasattr(p, 'Time'):
#                        p.Time = time() - tt
#                    else:
#                        p.Time += time() - tt
                    
            tmp = asarray([node.key for node in an])
            r10 = where(tmp > fo)[0]
            if r10.size != 0:
                mino = [an[i].key for i in r10]
                mmlf = nanmin(asarray(mino))
                g = nanmin((g, mmlf))

        NN = atleast_1d([node.tnlh_curr_best for node in an])
        r10 = logical_or(isnan(NN), NN == inf)
       
        if any(r10):
            ind = where(logical_not(r10))[0]
            an = an[ind]
            #tnlh = take(tnlh, ind, axis=0, out=tnlh[:ind.size])
            #NN = take(NN, ind, axis=0, out=NN[:ind.size])
            NN = NN[ind]

        if not isSNLE or p.maxSolutions == 1:
            #pass
            astnlh = argsort(NN)
            an = an[astnlh]
            
#        print(an[0].nlhc, an[0].tnlh_curr_best)
        # Changes
#        if NN.size != 0:
#            ind = searchsorted(NN, an[0].tnlh_curr_best+1)
#            tmp1, tmp2 = an[:ind], an[ind:]
#            arr = [node.key for node in tmp1]
#            Ind = argsort(arr)
#            an = hstack((tmp1[Ind], tmp2))
        #print [node.tnlh_curr_best for node in an[:10]]
    
    else: #if p.solver.dataHandling == 'sorted':
        if isSNLE and p.maxSolutions != 1: 
            an = hstack((nodes, _in))
        else:
            nodes.sort(key = lambda obj: obj.key)

            if len(_in) == 0:
                an = nodes
            else:
                arr1 = [node.key for node in _in]
                arr2 = [node.key for node in nodes]
                r10 = searchsorted(arr1, arr2)
                an = insert(_in, r10, nodes)
#                if p.debug:
#                    arr = array([node.key for node in an])
#                    #print arr[0]
#                    assert all(arr[1:]>= arr[:-1])

    if maxSolutions != 1:
        Solutions = r46(o, a, PointCoords, PointVals, fTol, varTols, Solutions)
        
        p._nObtainedSolutions = len(solutions)
        if p._nObtainedSolutions > maxSolutions:
            solutions = solutions[:maxSolutions]
            p.istop = 0
            p.msg = 'user-defined maximal number of solutions (p.maxSolutions = %d) has been exeeded' % p.maxSolutions
            return an, g, fo, None, Solutions, xRecord, r41, r40
    
    #p.iterfcn(xk, Min)
    p.iterfcn(xRecord, r40)
    if p.istop != 0: 
        return an, g, fo, None, Solutions, xRecord, r41, r40
    if isSNLE and maxSolutions == 1 and Min <= fTol:
        # TODO: rework it for nonlinear systems with non-bound constraints
        p.istop, p.msg = 1000, 'required solution has been obtained'
        return an, g, fo, None, Solutions, xRecord, r41, r40
    
    an, g = func9(an, fo, g, p)

    nn = maxNodes#1 if asdf1.isUncycled and all(isfinite(o)) and p._isOnlyBoxBounded and not p.probType.startswith('MI') else maxNodes

    an, g = func5(an, nn, g, p)
    nNodes.append(len(an))

    return an, g, fo, _s, Solutions, xRecord, r41, r40
Beispiel #53
0
    def plot(self, experiment, **kwargs):
        """Plot a faceted histogram view of a channel"""
        
        if not experiment:
            raise util.CytoflowViewError("No experiment specified")
        
        if not self.channel:
            raise util.CytoflowViewError("Must specify a channel")
        
        if self.channel not in experiment.data:
            raise util.CytoflowViewError("Channel {0} not in the experiment"
                                    .format(self.channel))
        
        if self.xfacet and self.xfacet not in experiment.conditions:
            raise util.CytoflowViewError("X facet {0} not in the experiment"
                                    .format(self.xfacet))
        
        if self.yfacet and self.yfacet not in experiment.conditions:
            raise util.CytoflowViewError("Y facet {0} not in the experiment"
                                    .format(self.yfacet))
        
        if self.huefacet and self.huefacet not in experiment.conditions:
            raise util.CytoflowViewError("Hue facet {0} not in the experiment"
                                    .format(self.huefacet))

        if self.subset:
            try:
                data = experiment.query(self.subset).data.reset_index()
            except:
                raise util.CytoflowViewError("Subset string '{0}' isn't valid"
                                        .format(self.subset))
                
            if len(experiment.data) == 0:
                raise util.CytoflowViewError("Subset string '{0}' returned no events"
                                        .format(self.subset))
        else:
            data = experiment.data
        
        # get the scale
        scale = util.scale_factory(self.scale, experiment, self.channel)
        scaled_data = scale(data[self.channel])
        
        #print scaled_data
        
        kwargs.setdefault('histtype', 'stepfilled')
        kwargs.setdefault('alpha', 0.5)
        kwargs.setdefault('antialiased', True)

        # estimate a "good" number of bins; see cytoflow.utility.num_hist_bins
        # for a reference.
        
        num_bins = util.num_hist_bins(scaled_data)
        
        # clip num_bins to (50, 1000)
        num_bins = max(min(num_bins, 1000), 50)
        
        xmin = bottleneck.nanmin(scaled_data)
        xmax = bottleneck.nanmax(scaled_data)
                    
        if (self.huefacet 
            and "bins" in experiment.metadata[self.huefacet]
            and experiment.metadata[self.huefacet]["bin_scale"] == self.scale):
            # if we color facet by the result of a BinningOp and we don't
            # match the BinningOp bins with the histogram bins, we get
            # gnarly aliasing.
            
            # each color gets at least one bin.  however, if the estimated
            # number of bins for the histogram is much larger than the
            # number of colors, sub-divide each color into multiple bins.
            bins = experiment.metadata[self.huefacet]["bins"]
            bins = np.append(bins, xmax)
            
            num_hues = len(data[self.huefacet].unique())
            bins_per_hue = math.ceil(num_bins / num_hues)
            
            new_bins = [xmin]
            for end in [b for b in bins if (b > xmin and b <= xmax)]:
                new_bins = np.append(new_bins,
                                     np.linspace(new_bins[-1],
                                                 end,
                                                 bins_per_hue + 1,
                                                 endpoint = True)[1:])

            bins = scale.inverse(new_bins)
        else:
            bin_width = (xmax - xmin) / num_bins
            bins = scale.inverse(np.arange(xmin, xmax, bin_width))
            bins = np.append(bins, scale.inverse(xmax))
            
        # take care of a rare rounding error, where the last observation is
        # a liiiitle bit more than the last bin, which makes plt.hist() puke
        bins[-1] += 1
                    
        kwargs.setdefault('bins', bins) 
        
        # mask out the data that's not in the scale domain
        data = data[~np.isnan(scaled_data)]

        g = sns.FacetGrid(data, 
                          size = 6,
                          aspect = 1.5,
                          col = (self.xfacet if self.xfacet else None),
                          row = (self.yfacet if self.yfacet else None),
                          hue = (self.huefacet if self.huefacet else None),
                          col_order = (np.sort(data[self.xfacet].unique()) if self.xfacet else None),
                          row_order = (np.sort(data[self.yfacet].unique()) if self.yfacet else None),
                          hue_order = (np.sort(data[self.huefacet].unique()) if self.huefacet else None),
                          legend_out = False,
                          sharex = False,
                          sharey = False)
        
        # set the scale for each set of axes; can't just call plt.xscale() 
        for ax in g.axes.flatten():
            ax.set_xscale(self.scale, **scale.mpl_params)  
                  
        g.map(plt.hist, self.channel, **kwargs)
        
        # if we have a hue facet and a lot of hues, make a color bar instead
        # of a super-long legend.
        
        if self.huefacet:
            current_palette = mpl.rcParams['axes.color_cycle']
            if len(g.hue_names) > len(current_palette):
                plot_ax = plt.gca()
                cmap = mpl.colors.ListedColormap(sns.color_palette("husl", 
                                                                   n_colors = len(g.hue_names)))
                cax, _ = mpl.colorbar.make_axes(plt.gca())
                norm = mpl.colors.Normalize(vmin = np.min(g.hue_names), 
                                            vmax = np.max(g.hue_names), 
                                            clip = False)
                mpl.colorbar.ColorbarBase(cax, 
                                          cmap = cmap, 
                                          norm = norm, 
                                          label = self.huefacet)
                plt.sca(plot_ax)
            else:
                g.add_legend(title = self.huefacet)
Beispiel #54
0
    def simulate(self, tick=1, parameters=[]):
        """


        parameters will be a list of params on each edge.

        """
        # pandas is very convenient but slower than numpy
        # The dataFrame instanciation is costly as well.
        # For small models, it has a non-negligeable cost.

        # inhibitors will be changed if not ON
        #self.tochange = [x for x in self.model.nodes() if x not in self.stimuli_names
        #            and x not in self.and_gates]

        # what about a species that is both inhibited and measured
        testVal = 1e-3

        values = self.values.copy()

        if self.debug:
            self.debug_values = []
        self.residuals = []
        self.penalties = []

        self.count = 0
        self.nSp = len(values)
        residual = 1.

        frac = 1.2
        # #FIXME +1 is to have same resrults as in CellnOptR
        # It means that if due to the cycles, you may not end up with same results.
        # this happends if you have cyvles with inhbititions
        # and an odd number of edges. 
        if reactions is None:
            reactions = self.model.buffer_reactions
        self.number_edges = len(reactions)

        # 10 % time here
        #predecessors = self.reactions_to_predecessors(reactions)
        predecessors = defaultdict(collections.deque)
        for r in reactions:
            k,v = self._reac2pred[r]
            predecessors[k].extend(v)

        # speed up
        keys = self.values.keys()
        length_predecessors = dict([(node, len(predecessors[node])) for node in keys])

        #self._length_predecessors = length_predecessors
        # if there is an inhibition/drug, the node is 0
        values = self.values.copy()
        for inh in self.inhibitors_names:
            if length_predecessors[inh] == 0:
                #values[inh] = np.array([np.nan for x in range(0,self.N)])
                #values[inh] = np.array([0 for x in range(0,self.N)])
                values[inh] = np.zeros(self.N)

        while (self.count < self.nSp * frac +1.) and residual > testVal: 
            self.previous = values.copy()
            #self.X0 = pd.DataFrame(self.values)
            #self.X0 = self.values.copy()
            # compute AND gates first. why
            for node in self.and_gates:
                # replace na by large number so that min is unchanged
                # THere are always predecessors
                if length_predecessors[node] != 0:
                    values[node] = bn.nanmin(np.array([values[x] for x in predecessors[node]]), axis=0)
                else:
                    #assert 1==0, "%s %s" % (node, predecessors[node])
                    values[node] = self.previous[node]

            for node in self.tochange:
                # easy one, just the value of predecessors
                #if len(self.predecessors[node]) == 1:
                #    self.values[node] = self.values[self.predecessors[node][0]].copy()
                if length_predecessors[node] == 0:
                    pass # nothing to change
                else:
                    # TODO: if only one input, no need for that, just propagate signal.
                    dummy = np.array([values[x] if (x,node) not in self.toflip 
                        else 1 - values[x] for x in  predecessors[node]])
                    values[node] = bn.nanmax(dummy,  axis=0)

                # take inhibitors into account
                if node in self.inhibitors_names:
                    # if inhibitors is on (1), multiply by 0
                    # if inhibitors is not active, (0), does nothing.
                    values[node] *= 1 - self.inhibitors[node].values
            # here NAs are set automatically to zero because of the int16 cast
            # but it helps speeding up a bit the code by removig needs to take care
            # of NAs. if we use sumna, na are ignored even when 1 is compared to NA            
            self.m1 = np.array([self.previous[k] for k in keys ], dtype=np.int16)
            self.m2 = np.array([values[k] for k in keys ], dtype=np.int16)
            #residual = bn.nansum(np.square(self.m1 - self.m2))
            #residual = np.nansum(np.square(self.m1 - self.m2))
            residual = np.nansum(np.square(self.m1 - self.m2))


            # TODO stop criteria should account for the length of the species to the
            # the node itself so count < nSp should be taken into account whatever is residual.
            #
            if self.debug:
                self.debug_values.append(self.previous.copy())
           
            self.residuals.append(residual)
            self.count += 1

        if self.debug is True:
            # add the latest values simulated in the while loop
            self.debug_values.append(values.copy())

        # Need to set undefined values to NAs
        self.simulated[self.time] = np.array([values[k] 
            for k in self.data.df.columns ], dtype=float)#.transpose()

        self.prev = {}
        self.prev[self.time] = np.array([self.previous[k] 
            for k in self.data.df.columns ], dtype=float)#.transpose()

        mask = self.prev[self.time] != self.simulated[self.time]
        self.simulated[self.time][mask] = np.nan

        self.simulated[self.time] = self.simulated[self.time].transpose()
Beispiel #55
0
	def update(self):
		#get merge-extract
		for n,m in enumerate(self.show_merge):
			if self.show_merge_as_density[m]:
				self.merge_extract = self.densityMatrix[m][tuple(self.basis_dim_plot_range)]
			else:
				self.merge_extract = self.mergeMatrix[m][tuple(self.basis_dim_plot_range)]
			for b in range(len(self._basis_dim)-1,-1,-1):
				#basis dim to concentrate
				if b not in self.show_basis:
					pos_corr = self.concentrate_basis_dim[:b].count("pos")
					if self.concentrate_basis_dim[b] == "sum":
						self.merge_extract = bn.nansum(self.merge_extract,b-pos_corr)
					elif self.concentrate_basis_dim[b] == "mean":
						self.merge_extract = bn.nanmean(self.merge_extract,b-pos_corr)
					elif self.concentrate_basis_dim[b] == "max":
						self.merge_extract = bn.nanmax(self.merge_extract,b-pos_corr)
					elif self.concentrate_basis_dim[b] == "min":
						self.merge_extract = bn.nanmin(self.merge_extract,b-pos_corr)

			for b in range(len(self._basis_dim)-2,-1,-1):
				# check from end to start whether to roll-axis
				# the time-axis has to be the last one
				# dont roll the last basis-dim (start with len(self._basis_dim)-2 )
				basis_time_index = None
				if b not in self.show_basis and self.concentrate_basis_dim[b] == "time":
					#reshape the matrix
					self.merge_extract = np.rollaxis(self.merge_extract,b,0)
					basis_time_index = b
					break # dont needto continue iterating because only one dim can be 'time'

			if len(self.show_basis) == 1:
				basis_extract = self.basisMatrix[self.show_basis[0]][self.basis_dim_plot_range[self.show_basis[0]]]

				if self.scale_plot == True:
					self.plot.enableAutoRange('xy', True)
				else:
					if self.enableAutoRangeX:
						self.plot.enableAutoRange('x', True)
						#self.plot.setXRange(
							#self._basis_dim[self.show_basis[0]]._include_range[0],
							#self._basis_dim[self.show_basis[0]]._include_range[1])
					if self.enableAutoRangeY:
						self.plot.enableAutoRange('y', True)

				if self.transpose_axes:
					self.curves[n].setData(self.merge_extract, basis_extract)
				else:
					self.curves[n].setData(basis_extract, self.merge_extract)

			elif len(self.show_basis) >=2:
				#calc scale and zero-position for axes-tics
				x0=self._basis_dim[self.show_basis[0]]._include_range[0]
				x1=self._basis_dim[self.show_basis[0]]._include_range[1]
				y0=self._basis_dim[self.show_basis[1]]._include_range[0]
				y1=self._basis_dim[self.show_basis[1]]._include_range[1]
				xscale = (x1-x0) / self._basis_dim[self.show_basis[0]].resolution
				yscale = (y1-y0) / self._basis_dim[self.show_basis[1]].resolution
				args = {'pos':[x0, y0], 'scale':[xscale, yscale]}
				if self.transpose_axes:
					args = {'pos':[y0, x0], 'scale':[yscale, xscale]}

				#set time-ticks
				if basis_time_index != None:
					args["xvals"] = self.basisMatrix[basis_time_index]

				if self.enableAutoRangeX:
					self.view.enableAutoRange('x', True)
					#self.view.setXRange(**tuple(self._basis_dim[self.show_basis[0]]._include_range))#[0],
						#self._basis_dim[self.show_basis[0]]._include_range[1])
				if self.enableAutoRangeY:
					self.view.enableAutoRange('y', True)

				#bydefault autoLevel (the colorlevel of the merge-dims) == True
				#(calc. by pyqtgraph)
				#thus it only can process array without nan-values the calc. colorlevel
				#is wrong when the real values are boyond the nan-replacement(zero)
				#therefore i calc the colorlevel by my self in case nans arein the array:
				anynan = bn.anynan(self.merge_extract)
				if anynan:
					
					mmin = bn.nanmin(self.merge_extract)
					mmax = bn.nanmax(self.merge_extract)
					if np.isnan(mmin):
						mmin,mmax=0,0
					self.plot.setLevels(mmin, mmax)
					args["autoLevels"]= False
					##the following line dont work with my version of pyQtGraph
					#args["levels"]= [mmin,mmax]#np.nanmin(merge_extract), np.nanmax(merge_extract))
				self.merge_extract = _utils.nanToZeros(self.merge_extract)

				if self.transpose_axes:
					self.plot.setImage(self.merge_extract.transpose(),
						autoRange=self.scale_plot,**args)
				else:
					self.plot.setImage(self.merge_extract,
						autoRange=self.scale_plot,**args)
				if anynan: # scale the histogramm to the new range
					self.plot.ui.histogram.vb.setYRange(mmin,mmax)

		self.scale_plot = False
Beispiel #56
0
def to16bit(array):
    """Convert an array to 16 bit."""
    return (old_div((65535.0 * array), nanmax(array))).astype("uint16")
Beispiel #57
0
def to8bit(array):
    """Convert an array to 8 bit."""
    return (old_div((255.0 * array), nanmax(array))).astype("uint8")
Beispiel #58
0
def iqg(Self, domain, dtype = float, lb=None, ub=None, UB = None):
    if type(domain) != ooPoint:
        domain = ooPoint(domain, skipArrayCast=True)
        domain.isMultiPoint=True
    domain.useSave = True
    r0 = Self.interval(domain, dtype, resetStoredIntervals = False)
    
    r0.lb, r0.ub = atleast_1d(r0.lb).copy(), atleast_1d(r0.ub).copy() # is copy required?
    
    # TODO: get rid of useSave
    domain.useSave = False
    
    # TODO: rework it with indexation of required data
    if lb is not None and ub is not None:
        ind = logical_or(logical_or(r0.ub < lb, r0.lb > ub), all(logical_and(r0.lb >= lb, r0.ub <= ub)))
    elif UB is not None:
        ind = r0.lb > UB
    else:
        ind = None
    
    useSlicing = False
    
    if ind is not None:
        if all(ind):
            return {}, r0
        j = where(~ind)[0]
        #DOESN'T WORK FOR FIXED OOVARS AND DefiniteRange != TRUE YET
        if 0 and j.size < 0.85*ind.size:  # at least 15% of values to skip
            useSlicing = True
            tmp = []
            for key, val in domain.storedIntervals.items():
                Interval, definiteRange = val
                if type(definiteRange) not in (bool, bool_):
                    definiteRange = definiteRange[j]
                tmp.append((key, (Interval[:, j], definiteRange)))
            _storedIntervals = dict(tmp)
            
            Tmp = []
            for key, val in domain.storedSums.items():
                # TODO: rework it
                R0, DefiniteRange0 = val.pop(-1)
                #R0, DefiniteRange0 = val[-1]
                R0 = R0[:, j]
                if type(DefiniteRange0) not in (bool, bool_):
                    DefiniteRange0 = DefiniteRange0[j]
                tmp = []
                for k,v in val.items():
                    # TODO: rework it
#                        if k is (-1): continue
                    v = v[:, j]
                    tmp.append((k,v))
                val = dict(tmp)
                val[-1] = (R0, DefiniteRange0)
                Tmp.append((key,val))
            _storedSums = dict(Tmp)
            #domain.storedSums = dict(tmp)
            
            Tmp = []
            for key, val in domain.items():
                lb_,ub_ = val
                # TODO: rework it when lb, ub will be implemented as 2-dimensional
                Tmp.append((key, (lb_[j],ub_[j])))
            dictOfFixedFuncs = domain.dictOfFixedFuncs
            domain2 = ooPoint(Tmp, skipArrayCast=True)
            domain2.storedSums = _storedSums
            domain2.storedIntervals = _storedIntervals
            domain2.dictOfFixedFuncs = dictOfFixedFuncs
            domain2.isMultiPoint=True
            domain = domain2
            
    domain.useAsMutable = True
    
    r = {}
    Dep = (Self._getDep() if not Self.is_oovar else set([Self])).intersection(domain.keys())
    
    for i, v in enumerate(Dep):
        domain.modificationVar = v
        r_l, r_u = _iqg(Self, domain, dtype, r0)
        if useSlicing and r_l is not r0:# r_l is r0 when array_equal(lb, ub)
            lf1, lf2, uf1, uf2 = r_l.lb, r_u.lb, r_l.ub, r_u.ub
            Lf1, Lf2, Uf1, Uf2 = Copy(r0.lb), Copy(r0.lb), Copy(r0.ub), Copy(r0.ub)
            Lf1[:, j], Lf2[:, j], Uf1[:, j], Uf2[:, j] = lf1, lf2, uf1, uf2
            r_l.lb, r_u.lb, r_l.ub, r_u.ub = Lf1, Lf2, Uf1, Uf2
            if type(r0.definiteRange) not in (bool, bool_):
                d1, d2 = r_l.definiteRange, r_u.definiteRange
                D1, D2 = atleast_1d(r0.definiteRange).copy(), atleast_1d(r0.definiteRange).copy()
                D1[j], D2[j] = d1, d2
                r_l.definiteRange, r_u.definiteRange = D1, D2
            
        r[v] = r_l, r_u
        if not Self.isUncycled:
            lf1, lf2, uf1, uf2 = r_l.lb, r_u.lb, r_l.ub, r_u.ub
            lf, uf = nanmin(vstack((lf1, lf2)), 0), nanmax(vstack((uf1, uf2)), 0)
            if i == 0:
                L, U = lf.copy(), uf.copy()
            else:
                L[L<lf] = lf[L<lf].copy()
                U[U>uf] = uf[U>uf].copy()
    if not Self.isUncycled:
        for R in r.values():
            r1, r2 = R
            if type(r1.lb) != np.ndarray:
                r1.lb, r2.lb, r1.ub, r2.ub = atleast_1d(r1.lb), atleast_1d(r2.lb), atleast_1d(r1.ub), atleast_1d(r2.ub)
            r1.lb[r1.lb < L] = L[r1.lb < L]
            r2.lb[r2.lb < L] = L[r2.lb < L]
            r1.ub[r1.ub > U] = U[r1.ub > U]
            r2.ub[r2.ub > U] = U[r2.ub > U]
        
        r0.lb[r0.lb < L] = L[r0.lb < L]
        r0.ub[r0.ub > U] = U[r0.ub > U]
        
    # for more safety
    domain.useSave = True
    domain.useAsMutable = False
    domain.modificationVar = None 
    domain.storedIntervals = {}
    
    return r, r0
Beispiel #59
0
def func1(tnlhf, tnlhf_curr, residual, y, e, o, a, _s_prev, p, indT):
    m, n = y.shape
    w = arange(m)
    
    if p.probType == 'IP':
        oc_modL, oc_modU = o[:, :n], o[:, n:]
        ac_modL, ac_modU = a[:, :n], a[:, n:]
#            # TODO: handle nans
        mino = where(oc_modL < oc_modU, oc_modL, oc_modU)
        maxa = where(ac_modL < ac_modU, ac_modU, ac_modL)
    
        # Prev
        tmp = a[:, 0:n]-o[:, 0:n]+a[:, n:]-o[:, n:]
        t = nanargmin(tmp,1)
        d = 0.5*tmp[w, t]
        
        
        #New
#        tmp = a - o
#        t_ = nanargmin(tmp,1)
#        t = t_% n
#        d = tmp[w, t_]

#        ind = 2**(-n) >= (_s_prev - d)/asarray(d, 'float64')
        ind = 2**(1.0/n) * d >= _s_prev
        #new
#        ind = 2**(1.0/n) * d >= nanmax(maxa-mino, 1)
        
        #ind = 2**(-n) >= (_s_prev - _s)/asarray(_s, 'float64')
    
        #s2 = nanmin(maxa - mino, 1)
        #print (abs(s2/_s))
        
        # Prev
        _s = nanmin(maxa - mino, 1)
        
        # New
        #_s = nanmax(maxa - mino, 1)
#        _s = nanmax(a - o, 1)
        
        #ind = _s_prev  <= _s + ((2**-n / log(2)) if n > 15 else log2(1+2**-n)) 
        indD = logical_not(ind)
        indD = ind
        indD = None
        #print len(where(indD)[0]), len(where(logical_not(indD))[0])
#    elif p.probType == 'MOP':
#
#        raise 'unimplemented'
    else:
        if p.solver.dataHandling == 'sorted':
            _s = func13(o, a)
            t = nanargmin(a, 1) % n
            d = nanmax([a[w, t] - o[w, t], 
                    a[w, n+t] - o[w, n+t]], 0)
            
            ## !!!! Don't replace it by (_s_prev /d- 1) to omit rounding errors ###
            #ind = 2**(-n) >= (_s_prev - d)/asarray(d, 'float64')
            
            #NEW
            ind = d  >=  _s_prev / 2 ** (1.0e-12/n)
            #ind = d  >=  _s_prev / 2 ** (1.0/n)
            indD = empty(m, bool)
            indD.fill(True)
            #ind.fill(False)
            ###################################################
        elif p.solver.dataHandling == 'raw':
            if p.probType == 'MOP':
                t = p._t[:m]
                p._t = p._t[m:]
                d = _s = p.__s[:m]
                p.__s = p.__s[m:]
            else:
#                tnlh_1, tnlh_2 = tnlhf[:, 0:n], tnlhf[:, n:]
#                TNHLF_min =  where(logical_or(tnlh_1 > tnlh_2, isnan(tnlh_1)), tnlh_2, tnlh_1)
#               # Set _s
#                _s = nanmin(TNHLF_min, 1)
                T = tnlhf_curr
                tnlh_curr_1, tnlh_curr_2 = T[:, 0:n], T[:, n:]
                TNHL_curr_min =  where(logical_or(tnlh_curr_1 < tnlh_curr_2, isnan(tnlh_curr_2)), tnlh_curr_1, tnlh_curr_2)
                t = nanargmin(TNHL_curr_min, 1)
                T = tnlhf
                d = nanmin(vstack(([T[w, t], T[w, n+t]])), 0)
                _s = d

            #OLD
            #!#!#!#! Don't replace it by _s_prev - d <= ... to omit inf-inf = nan !#!#!#
            #ind = _s_prev  <= d + ((2**-n / log(2)) if n > 15 else log2(1+2**-n)) 
            #ind = _s_prev - d <= ((2**-n / log(2)) if n > 15 else log2(1+2**-n)) 
            
            #NEW
            if any(_s_prev < d):
                pass
            ind = _s_prev  <= d + 1.0/n
#            T = TNHL_curr_min
            #ind2 = nanmin(TNHL_curr_min, 0)
            
            indQ = d >= _s_prev - 1.0/n 
            #indQ = logical_and(indQ, False)
            indD = logical_or(indQ, logical_not(indT))
#            print _s_prev[:2], d[:2]
            #print len(where(indD)[0]), len(where(indQ)[0]), len(where(indT)[0])
            #print _s_prev - d
            ###################################################
            #d = ((tnlh[w, t]* tnlh[w, n+t])**0.5)
        else:
            assert 0

    if any(ind):
        r10 = where(ind)[0]
        #print('r10:', r10)
#        print _s_prev
#        print ((_s_prev -d)*n)[r10]
#        print('ind length: %d' % len(where(ind)[0]))
#        print where(ind)[0].size
        #bs = e[ind] - y[ind]
        #t[ind] = nanargmax(bs, 1) # ordinary numpy.argmax can be used as well
        bs = e[r10] - y[r10]
        t[r10] = nanargmax(bs, 1) # ordinary numpy.argmax can be used as well

    return t, _s, indD