Пример #1
0
    def testNanCumReduction(self):
        raw = np.random.randint(5, size=(8, 8, 8))
        raw[:2, 2:4, 4:6] = np.nan

        arr = tensor(raw, chunk_size=3)

        res1 = self.executor.execute_tensor(nancumsum(arr, axis=1),
                                            concat=True)
        res2 = self.executor.execute_tensor(nancumprod(arr, axis=1),
                                            concat=True)
        expected1 = np.nancumsum(raw, axis=1)
        expected2 = np.nancumprod(raw, axis=1)
        np.testing.assert_array_equal(res1[0], expected1)
        np.testing.assert_array_equal(res2[0], expected2)

        raw = sps.random(8, 8, density=.1, format='lil')
        raw[:2, 2:4] = np.nan

        arr = tensor(raw, chunk_size=3)

        res1 = self.executor.execute_tensor(nancumsum(arr, axis=1),
                                            concat=True)[0]
        res2 = self.executor.execute_tensor(nancumprod(arr, axis=1),
                                            concat=True)[0]
        expected1 = np.nancumsum(raw.A, axis=1)
        expected2 = np.nancumprod(raw.A, axis=1)
        self.assertTrue(np.allclose(res1, expected1))
        self.assertTrue(np.allclose(res2, expected2))
Пример #2
0
def Tr_DMCA(rain_array, flow_array, max_window):
    import numpy as np
    import math

    #ROUTINE
    rain_int=np.nancumsum(rain_array, axis=0) #cumulating rainfall timeseries (Eq.1)
    flow_int=np.nancumsum(flow_array, axis=0) #cumulating streamflow timeseries (Eq.2)
    T=len(rain_array) #length of the timeseries
    n=0 #counter for the windows tested
    rho=np.empty(int(max_window/2))*np.nan #initializing rho coefficient as a NaN vector
    
    for window in range(3, max_window,2):
        rain_mean= np.convolve(rain_int, np.ones(window), 'valid') / window #moving average on the integrated rainfall timeseries (Eq.5)
        flow_mean= np.convolve(flow_int, np.ones(window), 'valid') / window #moving average on the integrated streamflow timeseries (Eq.6)
        flutt_rain=rain_int[int(float(window)/2+0.5-1):int(len(rain_int)-float(window)/2+0.5)]-rain_mean
        F_rain=(1/float(T-window+1))*np.nansum((np.square(flutt_rain))) #Squared rainfall fluctuations (Eq.3)
        flutt_flow=flow_int[int(float(window)/2+0.5-1):int(len(flow_int)-float(window)/2+0.5)]-flow_mean
        F_flow=(1/float(T-window+1))*np.nansum((np.square(flutt_flow))) #Squared streamflow fluctuations (Eq.4)
        F_rain_flow=(1/float(T-window+1))*np.nansum(np.multiply(flutt_rain,flutt_flow)) #Bivariate rainfall-streamflow fluctuations (Eq.7)
        if np.logical_or(F_rain==0 ,F_rain==0):
            rho[n]=np.nan #avoiding division by 0
        else:
            rho[n]=F_rain_flow/(math.sqrt(F_rain)*math.sqrt(F_flow)) #DMCA-based correlation coefficent (Eq.8)
        n=n+1
        
    #OUTPUT
    position_minimum=np.where(rho==np.nanmin(rho))
    catchment_response_time=float(position_minimum[0][0])+1
    return catchment_response_time;      
Пример #3
0
def intervals_cumsum(iv_daily, exp=2.):
    """
    Returns intervals computed as cumulative sum of iv.
    """
    iv = {}
    iv['Intervals'] = []
    iv['Median'] = list(np.nancumsum(iv_daily['Median']))
    iv['Mean'] = list(np.nancumsum(iv_daily['Mean']))
    for local_iv_daily in iv_daily['Intervals']:
        local_iv = {}
        p = local_iv_daily['Percentage']
        local_iv['Percentage'] = p
        local_iv['Low Interval'] = []
        local_iv['High Interval'] = []

        def cumsum_iv(iv, mean):
            iv = np.array(iv)
            mean = np.array(mean)
            return (np.nancumsum(abs(mean - iv)**exp))**(1. / exp)

        def cumsum_low(low, mean, meancum):
            meancum = np.array(meancum)
            return list(meancum - cumsum_iv(low, mean))

        def cumsum_high(high, mean, meancum):
            meancum = np.array(meancum)
            return list(meancum + cumsum_iv(high, mean))

        local_iv['Low Interval'] = cumsum_low(local_iv_daily['Low Interval'],
                                              iv_daily['Mean'], iv['Mean'])
        local_iv['High Interval'] = cumsum_high(
            local_iv_daily['High Interval'], iv_daily['Mean'], iv['Mean'])

        iv['Intervals'].append(local_iv)
    return iv
Пример #4
0
def getQminTimeSeries(tms, dis, startDate, endDate):
  cnd = np.logical_and(tms >= startDate, tms <= endDate)
  tms = tms[cnd]
  dis = dis[cnd, :]
  # computing the moving average
  nma = 7
  discs = np.nancumsum(dis, axis=0)
  nancountcs = np.nancumsum(np.isnan(dis), axis=0)
  dissum = discs[nma:,:] - discs[:-nma,:]
  nnan = nancountcs[nma:,:] - nancountcs[:-nma,:]
  ndis = (nma - nnan).astype(float)
  assert np.all(ndis >= 0)
  ndis[ndis == 0] = np.nan
  dis_ = dissum/ndis
  ii = int(np.floor(nma/2))
  tms_ = tms[ii:-ii-1]

  estYrs = np.array([t.year for t in tms_])
  yrs = np.unique(estYrs)
  ny = len(yrs)
  disMin = np.ones([ny, dis_.shape[1]])*np.nan

  for iy in range(ny):
    y = yrs[iy]
    cndy = estYrs == y
    disy = dis_[cndy, :]
   #nanratio = np.sum(np.isnan(disy), 0) / len(disy)
    disMiny = np.nanmin(disy, 0)
   #disMiny[nanratio > .5] = np.nan
    disMin[iy, :] = disMiny
  return yrs, disMin
Пример #5
0
 def __QS_end__(self):
     if not self._isStarted: return 0
     super().__QS_end__()
     AR = self._Output["异常收益率"]# 单时点累积异常收益率
     CAR = np.nancumsum(AR, axis=1)# 向前累积异常收益率
     FBCAR = np.c_[np.fliplr(np.nancumsum(np.fliplr(AR[:, :self.EventPreWindow+1]), axis=1))[:, :self.EventPreWindow], np.nancumsum(AR[:, self.EventPreWindow:], axis=1)]# 向前向后累积异常收益率
     AR_Var = np.full(AR.shape, fill_value=np.nan)
     CAR_Var = np.full(AR.shape, fill_value=np.nan)
     FBCAR_Var = np.full(AR.shape, fill_value=np.nan)
     for i in range(AR.shape[1]):
         ei = np.zeros(AR.shape[1])
         ei[i] = 1
         AR_Var[:, i] = np.dot(np.dot(self._Output["异常协方差"], ei), ei)
         ei[:i] = 1
         CAR_Var[:, i] = np.dot(np.dot(self._Output["异常协方差"], ei), ei)
         ei[:] = 0
         ei[i:self.EventPreWindow+1] = 1
         ei[self.EventPreWindow:i+1] = 1
         FBCAR_Var[:, i] = np.dot(np.dot(self._Output["异常协方差"], ei), ei)
     AR_Avg, AR_Avg_Var = np.nanmean(AR, axis=0), np.nansum(AR_Var, axis=0) / np.sum(~np.isnan(AR_Var), axis=0)**2
     CAR_Avg, CAR_Avg_Var = np.nanmean(CAR, axis=0), np.nansum(CAR_Var, axis=0) / np.sum(~np.isnan(CAR_Var), axis=0)**2
     FBCAR_Avg, FBCAR_Avg_Var = np.nanmean(FBCAR, axis=0), np.nansum(FBCAR_Var, axis=0) / np.sum(~np.isnan(FBCAR_Var), axis=0)**2
     self._Output["J1统计量"] = {"异常收益率": pd.DataFrame(AR_Avg, index=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), columns=["单时点"])}
     self._Output["J1统计量"]["异常收益率"]["向前累积"] = CAR_Avg
     self._Output["J1统计量"]["异常收益率"]["向前向后累积"] = FBCAR_Avg
     self._Output["J1统计量"]["J1"] = pd.DataFrame(AR_Avg / AR_Avg_Var**0.5, index=self._Output["J1统计量"]["异常收益率"].index, columns=["单时点"])
     self._Output["J1统计量"]["J1"]["向前累积"] = CAR_Avg / CAR_Avg_Var**0.5
     self._Output["J1统计量"]["J1"]["向前向后累积"] = FBCAR_Avg / FBCAR_Avg_Var**0.5
     self._Output["J1统计量"]["p值"] = pd.DataFrame(norm.sf(np.abs(self._Output["J1统计量"]["J1"].values)), index=self._Output["J1统计量"]["J1"].index, columns=self._Output["J1统计量"]["J1"].columns)
     SAR, SCAR, SFBCAR = AR / AR_Var**0.5, CAR / CAR_Var**0.5, FBCAR / FBCAR_Var**0.5
     SAR[np.isinf(SAR)] = SCAR[np.isinf(SCAR)] = SFBCAR[np.isinf(SFBCAR)] = np.nan
     SAR_Avg, SCAR_Avg, SFBCAR_Avg = np.nanmean(SAR, axis=0), np.nanmean(SCAR, axis=0), np.nanmean(SFBCAR, axis=0)
     self._Output["J2统计量"] = {"标准化异常收益率": pd.DataFrame(SAR_Avg, index=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), columns=["单时点"])}
     self._Output["J2统计量"]["标准化异常收益率"]["向前累积"] = SCAR_Avg
     self._Output["J2统计量"]["标准化异常收益率"]["向前向后累积"] = SFBCAR_Avg
     self._Output["J2统计量"]["J2"] = pd.DataFrame(SAR_Avg * (np.sum(~np.isnan(SAR), axis=0) * (self.EstSampleLen - 4) / (self.EstSampleLen - 2))**0.5, index=self._Output["J2统计量"]["标准化异常收益率"].index, columns=["单时点"])
     self._Output["J2统计量"]["J2"]["向前累积"] = SCAR_Avg * (np.sum(~np.isnan(SCAR), axis=0) * (self.EstSampleLen - 4) / (self.EstSampleLen - 2))**0.5
     self._Output["J2统计量"]["J2"]["向前向后累积"] = SFBCAR_Avg * (np.sum(~np.isnan(SFBCAR), axis=0) * (self.EstSampleLen - 4) / (self.EstSampleLen - 2))**0.5
     self._Output["J2统计量"]["p值"] = pd.DataFrame(norm.sf(np.abs(self._Output["J2统计量"]["J2"].values)), index=self._Output["J2统计量"]["J2"].index, columns=self._Output["J2统计量"]["J2"].columns)
     N = np.sum(~np.isnan(AR), axis=0)
     self._Output["J3统计量"] = {"J3": pd.DataFrame((np.sum(AR>0, axis=0)/N - 0.5)*N**0.5/0.5, index=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), columns=["单时点"])}
     N = np.sum(~np.isnan(CAR), axis=0)
     self._Output["J3统计量"] ["J3"]["向前累积"] = (np.sum(CAR>0, axis=0)/N - 0.5)*N**0.5/0.5
     N = np.sum(~np.isnan(FBCAR), axis=0)
     self._Output["J3统计量"] ["J3"]["向前向后累积"] = (np.sum(FBCAR>0, axis=0)/N - 0.5)*N**0.5/0.5
     self._Output["J3统计量"]["p值"] = pd.DataFrame(norm.sf(np.abs(self._Output["J3统计量"]["J3"].values)), index=self._Output["J3统计量"]["J3"].index, columns=self._Output["J3统计量"]["J3"].columns)
     AR = AR[np.sum(np.isnan(AR), axis=1)==0, :]
     N = AR.shape[0]
     L2 = self.EventPreWindow+1+self.EventPostWindow
     K = np.full_like(AR, np.nan)
     K[np.arange(N).repeat(L2), np.argsort(AR, axis=1).flatten()] = (np.arange(L2*N) % L2) + 1
     J4 = np.nansum(K-(L2+1)/2, axis=0) / N
     J4 = J4 / (np.nansum(J4**2) / L2)**0.5
     self._Output["J4统计量"] = {"J4": pd.DataFrame(J4, index=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), columns=["单时点"])}
     self._Output["J4统计量"]["p值"] = pd.DataFrame(norm.sf(np.abs(self._Output["J4统计量"]["J4"].values)), index=self._Output["J4统计量"]["J4"].index, columns=self._Output["J4统计量"]["J4"].columns)
     Index = pd.MultiIndex.from_arrays(self._Output.pop("事件记录")[:,:2].T, names=["ID", "时点"])
     self._Output["正常收益率"] = pd.DataFrame(self._Output["正常收益率"], columns=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), index=Index).reset_index()
     self._Output["异常收益率"] = pd.DataFrame(self._Output["异常收益率"], columns=np.arange(-self.EventPreWindow, 1+self.EventPostWindow), index=Index).reset_index()
     self._Output.pop("异常协方差")
     return 0
    def kineticMC(self, nu, sigma0):

        if self.trigger == False:  # If stress is too large, rates may overflow. Therefore, we need to check whether it the stress is too large.
            sum_till_list = np.nancumsum(self.Rate)
            #print("Sum list:",sum_till_list[:40])
            rho1 = random.random()
            rho2 = random.random()

            self.t_b = -log(rho1) / sum_till_list[-1]
            ran_tot_rate = rho2 * sum_till_list[-1]

            self.element_fail_number = binarySearch(sum_till_list,
                                                    ran_tot_rate)
        else:  # If stress is too large, we can cancell out stress to generate a new fake stress.
            sum_till_list = np.nancumsum(self.Rate)
            rho2 = random.random()
            ran_tot_rate = rho2 * sum_till_list[-1]
            self.element_fail_number = binarySearch(sum_till_list,
                                                    ran_tot_rate)
            self.t_b = 0

        if self.StressRedistribution:
            self.StressRedis(self.element_fail_number, nu, sigma0)

        return (self.element_fail_number, self.t_b)
Пример #7
0
    def _continuation(return_arr):

        climbing_up = np.nancumsum(return_arr >= 0)
        climbing_down = np.nancumsum(return_arr < 0)
        continuous_growing_counts = itemfreq(climbing_down)
        continuous_falling_counts = itemfreq(climbing_up)

        return continuous_growing_counts[:, -1].max(
        ), continuous_falling_counts[:, -1].max()
def save_file_cumsum(ui, t_max, mb_matrix):
    project_name = str(ui.le_id001.text())
    project_dir = str(ui.le_id002.text())
    external_validation_path = os.path.join(project_dir, project_name,
                                            'validation', 'external')
    epoch_glac = (int(ui.de_id007.date().toString("yyyy")),
                  int(ui.de_id008.date().toString("yyyy")))
    epoch_geod = (int(ui.de_id005.date().toString("yyyy")),
                  int(ui.de_id006.date().toString("yyyy")))
    tlims = [
        np.min((epoch_glac[0], epoch_geod[0])),
        np.max((epoch_glac[1], epoch_geod[1]))
    ]
    if tlims[1] > t_max:
        tlims[1] = t_max
    t = range(tlims[0], tlims[1] + 1)
    # area
    area = np.copy(mb_matrix['area'])
    if ui.rb_id069.isChecked():
        area = area * 0 + ui.dsb_id001.value()
    elif ui.rb_id071.isChecked():
        area = area * 0 + ui.dsb_id016.value()
    mjd = []
    kg = []
    kg_cal = []
    for ti in t:
        t = date_functions.ymd2mjd(ti, 10, 1)
        yi = mb_matrix['ma'][ti] * (area[ti] * 1e6) * (
            density / 1e3)  # 1 m w.e. = 1000 kg m^-2 rho^-1
        yi_cal = mb_matrix['ma_calibrated'][ti] * (area[ti] * 1e6) * (density /
                                                                      1e3)
        mjd.append(t)
        kg.append(yi)
        kg_cal.append(yi_cal)
    kg = np.nancumsum(kg)
    kg_cal = np.nancumsum(kg_cal)
    ix = kg == 0
    kg[ix] = np.nan
    ix = kg_cal == 0
    kg_cal[ix] = np.nan
    data = {'wgms': kg, 'wgms_calibrated': kg_cal}
    for key in ('wgms', 'wgms_calibrated'):
        time_series_file = os.path.join(external_validation_path,
                                        'time_series', '%s.txt' % key)
        with open(time_series_file, 'w') as f:
            f.write('%25s %25s %25s\n' %
                    ('time (mjd)', 'mass (kg)', 'sigma (kg)'))
        for ix in range(len(kg)):
            mjdi = mjd[ix]
            with open(time_series_file, 'a+') as f:
                f.write('%+25.16e %+25.16e %+25.16e\n' %
                        (mjdi, data[key][ix], 0))
def getScore(x_test, y_test, model, final, history):
    global metric_LSTM_accuracy
    final['Predictions'] = model.predict(x_test)
    final['Buy_Order'] = 0
    final.loc[final['Predictions'] > 0, 'Buy_Order'] = 1
    final.loc[final['Predictions'] < 0, 'Buy_Order'] = -1

    metric_LSTM_accuracy = accuracy_score(final['Signal'],final['Buy_Order'])

    final['Strategy_Return'] = final['Buy_Order']*final['Daily_Return']

    final['Cumulative_Strategy_Return'] = 0
    final['Cumulative_Market_Return'] = 0

    final.iloc[:, final.columns.get_loc('Cumulative_Strategy_Return')] = np.nancumsum(final['Strategy_Return'][:])
    final.iloc[:, final.columns.get_loc('Cumulative_Market_Return')] = np.nancumsum(final['Daily_Return'][:])

    plt.figure(3)
    plt.clf()
    plt.plot(final['Cumulative_Strategy_Return'][:], color = 'g', label = 'Strategy Returns')
    plt.plot(final['Cumulative_Market_Return'][:], color = 'r', label = 'Market Returns')
    plt.legend(loc = 'best')
    plt.title('LSTM Predicted Return vs Market Return', fontsize = 32)
    plt.xlabel('Date', fontsize = 30)
    plt.ylabel('Cumulative Return', fontsize = 30)
    plt.grid(which = 'both', axis = 'both')
    plt.show

    plt.figure(4)
    plt.clf()
    plt.plot(history.history['acc'], color = 'g', label = 'Training Accuracy')
    plt.plot(history.history['val_acc'], color = 'r', label = 'Validation Accuracy')
    plt.legend(loc = 'best')
    plt.title('LSTM Accuracy vs Epoch', fontsize = 32)
    plt.xlabel('Epoch', fontsize = 30)
    plt.ylabel('Accuracy', fontsize = 30)
    plt.grid(which = 'both', axis = 'both')
    plt.show

    plt.figure(5)
    plt.clf()
    plt.plot(history.history['loss'], color = 'g', label = 'Training Loss')
    plt.plot(history.history['val_loss'], color = 'r', label = 'Validation Loss')
    plt.legend(loc = 'best')
    plt.title('LSTM Loss vs Epoch', fontsize = 32)
    plt.xlabel('Epoch', fontsize = 30)
    plt.ylabel('Loss', fontsize = 30)
    plt.grid(which = 'both', axis = 'both')
    plt.show

    return final
Пример #10
0
def PlotHodograph(ax,U,V,deltat,legend=True,orientation='EW',type='A'):
    """
            *** Function PlotHodograph ***
    Adds hodograph plot to the hodograph background
            *** Arguments ***
    - ax is an pyplot Axes instance that should contain the proper background
    produced with analysis.Hodograph
    - U and V are 1D arrays containing the measured velocities
    - deltat is the time between each velocity sampling
    * kwargs *
        - legend: plots legend if True.
            default = True
        - orientation: direction of cruisetrack, zonal 'EW' or meridional 'SN'
            default = 'EW'
        - type: eddy type, anticyclonic 'A' or cyclonic 'C'
            default = 'A'
            *** Outputs ***
    No outputs, works on the Axes instance directly
            *** Remarks ***
    """
    ## Plot the data
    # Make hodograph from velocities
    x = (np.nancumsum(U)*deltat)/1000 # /1000 to get km
    y = (np.nancumsum(V)*deltat)/1000 # /1000 to get km
    # Plot the time integration
    hodograph = ax.plot(x,y)
    # Plot the first point
    first_point, = ax.plot(x[0],y[0],'ko',ms=10,label='First data point')
    # Plot the last point
    last_point, = ax.plot(x[-1],y[-1],'kd',ms=10,label='Last data point')
    # Find the virtual center
    if orientation == 'EW' and type == 'A':
        # Anticylonic case and EW section
        y_max = np.nanmax(np.abs(y))
        index = np.where(np.abs(y) == y_max)[0]
        if len(index)>1:
            index = index[0]
        x_center = x[index]
        y_center = y[index]
    elif orientation == 'SN' and type == 'A':
        x_max = np.nanmax(x)
        index = np.where(x == x_max)[0]
        if len(index)>1:
            index = index[0]
        y_center = y[index]
        x_center = x_max
    # Plot the virtual center point
    center, = ax.plot(x_center,y_center,'k*',ms=10,label='Virtual center')
    # Legend
    if legend:
        ax.legend(handles = [first_point,last_point,center],bbox_to_anchor=(1.1, 1))
Пример #11
0
    def test_nancumsum_1(self):

        a = np.nancumsum(1)
        print(a)

        b = np.nancumsum([1])
        print(b)

        c = np.nancumsum([1, np.nan])
        print(c)

        a = np.array([[1, 2], [3, np.nan]])
        d = np.nancumsum(a)
        print(d)


        e = np.nancumsum(a, axis=0)
        print(e)

        f = np.nancumsum([1, np.nan, np.inf])
        print(f)

        g = np.nancumsum([1, np.nan, np.NINF])
        print(g)

        h = np.nancumsum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
        print(h)

        return
Пример #12
0
def triangularMovingAverage(period, data):

    # --- Triangular Moving Average
    # data: array, time series data e.g. daily close prices
    # period: integer, number of periods form time series array to include in calculation

    # --- import libraries
    import numpy as np

    # --- get first non nan index
    for i in range(len(data)):

        if np.isnan(data[i]) == False:

            firstNonNan = i
            break

    # --- get last non nan index
    for i in reversed(range(len(data))):

        if np.isnan(data[i]) == False:

            lastNonNan = i
            break

    # --- calculate SMA
    ret = np.nancumsum(data, dtype=float)
    ret[period:] = ret[period:] - ret[:-period]
    ret = ret[period - 1:] / period

    # --- calculate SMA of SMA
    ret_ss = np.nancumsum(ret, dtype=float)
    ret_ss[period:] = ret_ss[period:] - ret_ss[:-period]
    ret_ss = ret_ss[period - 1:] / period

    # --- return array of number the same length as the input
    ret = np.append(np.zeros(2 * period - 2) + np.nan, ret_ss)

    # --- update zeros with nan
    for i in range(len(data)):

        if i < firstNonNan + (period * 2):

            np.put(ret, i, np.nan)

        elif i >= lastNonNan:

            np.put(ret, i, np.nan)

    return ret
Пример #13
0
    def test_nancumsum(self):
        # Testing a 1D array
        mag_1d = np.array([5., np.nan])
        speeds_1d = Quantity.from_units(mag=mag_1d, units='m/s')
        units_1d = speeds_1d.units
        expected_nancumsum_1d = Quantity._from_qty(mag=np.nancumsum(mag_1d),
                                              units=units_1d)
        np.testing.assert_array_equal(expected_nancumsum_1d,
                                      np.nancumsum(speeds_1d))

        # Testing a 2D array
        mag_2d = np.array([[5., 6.], [7., np.nan]])
        speeds_2d = Quantity.from_units(mag=mag_2d, units='m/s')
        units_2d = speeds_2d.units

        # Axis not specified
        expected_nancumsum_2d = Quantity._from_qty(mag=np.nancumsum(mag_2d),
                                              units=units_2d)
        np.testing.assert_array_equal(expected_nancumsum_2d,
                                      np.nancumsum(speeds_2d))

        # Axis = 0
        axis = 0
        expected_nancumsum_2d = Quantity._from_qty(mag=np.nancumsum(mag_2d, axis=axis),
                                              units=units_2d)
        np.testing.assert_array_equal(expected_nancumsum_2d,
                                      np.nancumsum(speeds_2d, axis=axis))

        # Axis = 1
        axis = 1
        expected_nancumsum_2d = Quantity._from_qty(mag=np.nancumsum(mag_2d, axis=axis),
                                              units=units_2d)
        np.testing.assert_array_equal(expected_nancumsum_2d,
                                      np.nancumsum(speeds_2d, axis=axis))
def gradientTesserae(polylist, tilt = 10.0, position = 0.5, drop=1.0, normalstretch=1.0, scale=0.05):
    render = renderPolygons(adjustedTesserae[0],scale,True)
    render0 = render.shape[0]
    render1 = render.shape[1]
    tileCount = len(polylist[1])
    colorMosaic = np.full((render0,render1),np.NaN)
    renderEdge0 = scipy.ndimage.sobel(render,0)
    renderEdge1 = scipy.ndimage.sobel(render,1)
    renderEdgeMask = np.where((renderEdge0 == 0)&(renderEdge1 == 0),1,0)
    normalMapFront = np.full((render0, render1, 3), np.NaN)
    normalMapBack = np.full((render0, render1, 3), np.NaN)
    v0 = np.repeat([np.arange(0,render0)],render1,0).T
    v1 = np.repeat([np.arange(0,render1)],render0,0)
    for i in range(tileCount):
        tile = renderPolygons(adjustedTesserae[0][i:i+1], scale, True)
        tilemin0 = render0-np.nanargmax(np.nansum(np.nancumsum(np.flip(tile,0), 0),1))
        tilemax0 = np.nanargmax(np.nansum(np.nancumsum(tile, 0),1))
        tilemin1 = render1-np.nanargmax(np.nansum(np.nancumsum(np.flip(tile,1), 1),0))
        tilemax1 = np.nanargmax(np.nansum(np.nancumsum(tile, 1),0))
        angle = random.random()*2*math.pi
        height = random.random()*position*max((tilemax0-tilemin0),(tilemax1-tilemin1))
        tiltAngle = random.random()*2*math.pi*tilt/360
        y = math.sin(angle)*math.sin(tiltAngle)
        x = math.cos(angle)*math.sin(tiltAngle)
        z = math.cos(tiltAngle)
        z0 = (1-height)-abs((y*(tilemax0-tilemin0) + x*(tilemax1-tilemin1))/(2*z))
        x0 = (tilemin1+tilemax1)/2
        y0 = (tilemin0+tilemax0)/2
        color = z0 - ((x*(v1-x0)+y*(v0-y0))/z)
        colorMosaic = np.where(tile>0, color,colorMosaic)
        normalMapFront[:,:,0] = np.where((tile>0)&(renderEdgeMask>0), x, normalMapFront[:,:,0])
        normalMapFront[:,:,1] = np.where((tile>0)&(renderEdgeMask>0), y, normalMapFront[:,:,1])
        normalMapFront[:,:,2] = np.where((tile>0)&(renderEdgeMask>0), z, normalMapFront[:,:,2])
    tileRange = np.nanmax(colorMosaic)-np.nanmin(colorMosaic)
    colorMosaic = np.where(colorMosaic!=colorMosaic, np.nanmin(colorMosaic)-tileRange*drop, colorMosaic)
    sobely = scipy.ndimage.sobel(colorMosaic,0)/8
    sobelx = scipy.ndimage.sobel(colorMosaic,1)/8
    sobelmagnitude = np.sqrt(sobelx**2+sobely**2+1)
    normalMapBack[:,:,0]= np.where(normalMapFront[:,:,0] != normalMapFront[:,:,0], -1*sobelx/sobelmagnitude, normalMapFront[:,:,0])
    normalMapBack[:,:,1]= np.where(normalMapFront[:,:,1] != normalMapFront[:,:,1], -1*sobely/sobelmagnitude, normalMapFront[:,:,1])
    normalMapBack[:,:,2]= np.where(normalMapFront[:,:,2] != normalMapFront[:,:,2], 1/sobelmagnitude, normalMapFront[:,:,2])/normalstretch
    normalmagnitude = np.sqrt(normalMapBack[:,:,0]**2+normalMapBack[:,:,1]**2+normalMapBack[:,:,2]**2)
    normalMapBack[:,:,0] = normalMapBack[:,:,0]/normalmagnitude/2+0.5
    normalMapBack[:,:,1] = -1*normalMapBack[:,:,1]/normalmagnitude/2+0.5
    normalMapBack[:,:,2] = normalMapBack[:,:,2]/normalmagnitude/2+0.5
    normalMap = np.dstack((normalMapBack[:,:,0],normalMapBack[:,:,1],normalMapBack[:,:,2]))
    depthMap = (colorMosaic-np.nanmin(colorMosaic))/(np.nanmax(colorMosaic)-np.nanmin(colorMosaic))
    return depthMap, normalMap
Пример #15
0
def cumsum_nb(a):
    """Cumulative sum."""
    b = np.empty_like(a, dtype=a.dtype)
    for j in range(a.shape[1]):
        b[:, j] = np.nancumsum(a[:, j])
        b[np.isnan(a[:, j]), j] = np.nan
    return b
Пример #16
0
def form_PNLF_CDF(data, PNLF, dM, obs_comp, M_5007, m_5007):
    """Take in a formed PNLF, completeness correct it and calculate the Cumulative distribution function of the incompleteness corrected PNLF.

    Parameters
    ----------
    PNLF : [list / array]
        PNLF as calculated from a given dM and c2, over a series of m_5007 and M_5007 values
    data : [list / array]
        PNe magnitudes
    dM : [float]
        distance modulus value for the PNLF
    obs_comp : [list / array]
        observed completeness profile, supplied as a list of ratio's across a given m_5007.
    M_5007 : [list / array]
        Absolute magnitude, in [OIII], array (-4.53 to 0.53).
    m_5007 : [list / array]
        Apparent magnitude, in [OIII], array (26.0 to 31.0).

    Returns
    -------
    [list / array]
        Cumulative distribution function of the PNLF provided, at dM
    """    
    sorted_data = np.sort(data)
    PNLF_comp_corr = np.array(np.interp(m_5007, M_5007+dM, PNLF)*obs_comp)
    PNLF_comp_corr[PNLF_comp_corr < 0] = 0.0
    PNLF_CDF = np.array(np.interp(sorted_data, m_5007, np.nancumsum(PNLF_comp_corr)/np.nansum(PNLF_comp_corr)))

    return PNLF_CDF
Пример #17
0
def rolling_sum(x: np.ndarray, n: int) -> np.ndarray:
    """
    Fast running sum for numpy array (matrix) along columns.

    Example:
    >>> rolling_sum(column_vector(np.array([[1,2,3,4,5,6,7,8,9], [11,22,33,44,55,66,77,88,99]]).T), n=5)
    
    array([[  nan,   nan],
       [  nan,   nan],
       [  nan,   nan],
       [  nan,   nan],
       [  15.,  165.],
       [  20.,  220.],
       [  25.,  275.],
       [  30.,  330.],
       [  35.,  385.]])

    :param x: input data
    :param n: rolling window size
    :return: rolling sum for every column preceded by nans
    """
    for i in range(0, x.shape[1]):
        ret = np.nancumsum(x[:, i])
        ret[n:] = ret[n:] - ret[:-n]
        x[:, i] = np.concatenate((nans(n - 1), ret[n - 1:]))
    return x
Пример #18
0
def _weighted_quantile(values, quantile, sample_weight):
    """ Very close to numpy.percentile, but supports weights.
    Always overwrite=True, works on arrays with nans.

    # THIS IS NOT ACTUALLY THE PERCENTILE< BUT CLOSE ENOUGH...<

    this was taken from a stackoverflow post:
    https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy

    NOTE: quantiles should be in [0, 1]!

    :param values: numpy.array with data
    :param quantile: array-like with many quantiles needed
    :param sample_weight: array-like of the same length as `array`
    :return: numpy.array with computed quantiles.
    """
    logging.info(f'computing weighted quantile: {quantile}')
    sorter = np.argsort(values, axis=0)
    values = numpy.take_along_axis(values, sorter, axis=0)
    sample_weight = numpy.take_along_axis(sample_weight, sorter, axis=0)
    # check for inf weights, and remove
    sample_weight[numpy.isinf(sample_weight)] = 0.0
    weighted_quantiles = np.nancumsum(sample_weight, axis=0) - 0.5 * sample_weight
    weighted_quantiles /= np.nansum(sample_weight, axis=0)
    ind = np.argmin(weighted_quantiles <= quantile, axis=0)
    return np.take_along_axis(values, np.expand_dims(ind, axis=0), axis=0)[0]
Пример #19
0
def moving_average_filter(before_filter_data):
    N = 100  # number of points to test on each side of point of interest, best if even
    padded_x = np.insert(
        np.insert(
            np.insert(before_filter_data, len(before_filter_data),
                      np.empty(int(N / 2)) * np.nan), 0,
            np.empty(int(N / 2)) * np.nan), 0, 0)
    # print(padded_x)
    n_nan = np.cumsum(np.isnan(padded_x))
    # print(n_nan)
    cumsum = np.nancumsum(padded_x)
    # print(cumsum)
    window_sum = cumsum[N + 1:] - cumsum[:-(
        N + 1
    )] - before_filter_data  # subtract value of interest from sum of all values within window
    # print(window_sum)
    window_n_nan = n_nan[N +
                         1:] - n_nan[:-(N + 1)] - np.isnan(before_filter_data)
    # print(window_n_nan)
    window_n_values = (N - window_n_nan)
    # print(window_n_values)
    movavg = (window_sum) / (window_n_values)
    # print(movavg)
    # print(len(movavg))

    time = np.arange(0, len(movavg), 1)

    return movavg
Пример #20
0
def createSWEProjTrace(i, jDay, lastValue, nanList):
    dailyData = list(i)
    if jDay < 151 and np.isnan(dailyData[151]):
        dailyData[151] = dailyData[150]
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        peakSWEday = np.array(dailyData).argmax()
        meltOut = 0
        for index, day in enumerate(dailyData[peakSWEday:]):
            if day < 0.1:
                meltOut = index + peakSWEday
                break
        if meltOut > jDay:
            projection = list(np.nancumsum(np.diff(dailyData[jDay:meltOut])))
            projTrace = nanList + [lastValue] + [
                lastValue +
                x if abs(x) < lastValue or jDay < peakSWEday else np.nan
                for x in projection
            ]
            projTrace[:] = [x if x >= 0 else np.nan for x in projTrace]
            meltList = [projTrace[-1]]
            for t in range(0, 366 - len(projTrace)):
                meltRate = 0.00008 * (len(projTrace) + t)**2 - 0.0159 * (
                    len(projTrace) + t) + 0.7903
                if meltRate < meltList[-1]:
                    meltList.extend([meltList[-1] - meltRate])
                else:
                    meltList.extend([0])
            projTrace.extend(meltList)
        else:
            projTrace = [np.nan] * 366
    return projTrace
Пример #21
0
def GJ_alpha183_totest(self):
    # #############################################################
    # GJ183 MAX(SUMAC(CLOSE-MEAN(CLOSE,24)))-MIN(SUMAC(CLOSE-MEAN(CLOSE,24)))/STD(CLOSE,24)
    # 含义:过去24天内 close价格相对均值的变动幅度越大
    #
    # 策略方向:买入策略
    # 主要买入:买入波动幅度较大的股票
    # 主要卖出:
    #---------------------------------------------
    # 评价
    # --------------------------------------------
    #
    # 有效性趋势:
    # by: XX
    # last modify:
    # #############################################################

    DELAY = self.DELAY

    d = 24
    GJ183_closeadj = self.ClosePrice[di - DELAY - (d) + 1:di - DELAY +
                                     1, :] * self.adjfactor[di - DELAY -
                                                            (d) + 1:di -
                                                            DELAY + 1, :]

    GJ183_data = np.nancumsum(
        (GJ183_closeadj - np.nanmean(GJ183_closeadj, axis=0, keepdims=True)),
        axis=0)
    GJ183_std = np.nanstd(GJ183_closeadj, axis=0, keepdims=True)

    GJ183 = (np.nanmax(GJ183_data, axis=0, keepdims=True) -
             np.nanmin(GJ183_data, axis=0, keepdims=True)) / GJ183_std

    alpha = GJ183[0, :] * self.Universe_one.iloc[i, :]
    return alpha
Пример #22
0
def obv_np_1d(close: np.ndarray, volume: np.ndarray) -> np.ndarray:
    v = change_np_1d(close, 1)
    v = np.sign(v)
    v = v * volume
    s = np.nancumsum(v)
    s[np.isnan(v)] = np.nan
    return s
Пример #23
0
def Get_ID_BNoutline_Mat(demMat):
    # cellID, bnMat_outline = Get_ID_BNoutline_Mat(demMat)
    Z = demMat * 0 + 1
    Z_flip = np.flipud(Z)
    D1 = np.nancumsum(Z_flip)
    Z_flip1D = np.reshape(Z_flip, np.shape(D1))
    D1[np.isnan(Z_flip1D)] = np.nan
    D1 = np.reshape(D1, np.shape(Z_flip))
    # Series number of valid cells: 0 to number of cells-1
    # from bottom left corner towards right and top
    idMat = np.flipud(D1) - 1
    del Z, Z_flip, D1, Z_flip1D
    D = idMat * 0
    D[np.isnan(idMat)] = -1
    h_hv = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
    D = scipy.signal.convolve2d(D, h_hv, mode='same')
    D[D < 0] = np.nan
    D[0, :] = np.nan
    D[-1, :] = np.nan
    D[:, 0] = np.nan
    D[:, -1] = np.nan
    # boundary cells with valid cell ID are extracted
    bnMat_outline = idMat * 0 - 2  # non-outline cells:-2
    Outline_Cell_index = np.isnan(D) & ~np.isnan(idMat)
    # outline boundary cell
    bnMat_outline[Outline_Cell_index] = 0  # outline cells:0
    return idMat, bnMat_outline
Пример #24
0
def Area_percentile_cheap(x,fx,percentile):
    #Cheap and less precise version of Area_percentile_x without any interpolation
    cy = np.nancumsum(fx)
    cy_norm = cy/cy[-1]
    idx=np.argmin(abs(cy_norm-percentile/100.))

    return x[idx]
Пример #25
0
def test_nancumsum(array):
    a = numpy.array(array)
    ia = inp.array(a)

    result = inp.nancumsum(ia)
    expected = numpy.nancumsum(a)
    numpy.testing.assert_array_equal(expected, result)
Пример #26
0
def test_nancumsum(array):
    np_a = numpy.array(array)
    dpnp_a = dpnp.array(np_a)

    result = dpnp.nancumsum(dpnp_a)
    expected = numpy.nancumsum(np_a)
    numpy.testing.assert_array_equal(expected, result)
Пример #27
0
def trials_until_correct(correct, state, τ=2):
    # mark trial on which state switch occured
    state_switch = np.insert(np.abs(np.diff(state, axis=-1)), 0, 0, axis=-1)

    count = np.zeros(state_switch.shape[0])  # counter trials since the last switch
    allowed = np.ones(state_switch.shape[0])
    cum_corr = np.zeros(correct.shape[:-1])  # counter for corect responses in a sequence
    tuc = np.zeros(correct.shape)  # trials until correct
    for t in range(state_switch.shape[-1]):
        # increase counter if state did not switch, otherwise reset to 1
        count = (count + 1) * (1 - state_switch[..., t]) + state_switch[..., t]

        # if choice was correct increase by 1, otherwise reset to 0
        cum_corr = (cum_corr + 1) * correct[..., t]

        # check if the number of correct choices matches the threshold value
        at_threshold = (cum_corr == τ) * allowed
        allowed = (1 - at_threshold) * allowed * (1 - state_switch[..., t]) + state_switch[..., t]

        # update only valid dimensions for which count is larger than the threshold
        valid = (count >= τ)
        # mark count for valid dimensions (participants) which satisfy the condition
        # all the other elements are set to NaN
        tuc[..., t] = np.where(valid * at_threshold, count, np.nan)

    cum_tuc = np.nancumsum(tuc, axis=-1)
    for i, s in enumerate(state_switch):
        tau = np.diff(np.insert(np.nonzero(s), 0, 0))  # between reversal duration
        d_tuc = np.diff(np.insert(cum_tuc[i, s.astype(bool)], 0, 0))  # between reversal tuc
        # if change in tuc is zero add tau as maximal tuc
        loc = d_tuc == 0  # where tuc did not change
        trials_before_switch = np.arange(tuc.shape[-1])[s.astype(bool)] - 1
        tuc[i, trials_before_switch[loc]] = tau[loc]

    return tuc
def nancumsummation(a, window):
    summ = np.empty_like(a)
    summ[:window - 1, :] = np.nan
    summ[window - 1:, :] = np.squeeze(np.nancumsum(rolling_window(
        a, (window, a.shape[1])),
                                                   axis=2),
                                      axis=1)
    return summ
Пример #29
0
    def compute_boat_track(transect):
        """Computes the shiptrack coordinates, along track distance, and distance made
        good for the selected boat reference.

        Parameters
        ----------
        transect: TransectData
            Object of TransectData

        Returns
        -------
            boat_track: dict
                Dictionary containing shiptrack coordinates (track_x_m, track_y_m), along track distance (distance_m),
                and distance made good (dmg_m)
        """

        # Initialize dictionary
        boat_track = {
            'track_x_m': np.nan,
            'track_y_m': np.nan,
            'distance_m': np.nan,
            'dmg_m': np.nan
        }

        # Compute incremental track coordinates
        boat_vel_selected = getattr(transect.boat_vel,
                                    transect.boat_vel.selected)
        if boat_vel_selected is not None:
            track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
            track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec
        else:
            boat_vel_selected = getattr(transect.boat_vel, 'bt_vel')
            track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
            track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec

        # Check for any valid data
        idx = np.where(np.logical_not(np.isnan(track_x)))
        if len(idx[0]) > 1:
            # Compute variables
            boat_track['distance_m'] = np.nancumsum(
                np.sqrt(track_x**2 + track_y**2))
            boat_track['track_x_m'] = np.nancumsum(track_x)
            boat_track['track_y_m'] = np.nancumsum(track_y)
            boat_track['dmg_m'] = np.sqrt(track_x**2 + track_y**2)

        return boat_track
Пример #30
0
 def test_result_values(self):
     for axis in (-2, -1, 0, 1, None):
         tgt = np.cumprod(_ndat_ones, axis=axis)
         res = np.nancumprod(_ndat, axis=axis)
         assert_almost_equal(res, tgt)
         tgt = np.cumsum(_ndat_zeros, axis=axis)
         res = np.nancumsum(_ndat, axis=axis)
         assert_almost_equal(res, tgt)
Пример #31
0
 def test_result_values(self):
     for axis in (-2, -1, 0, 1, None):
         tgt = np.cumprod(_ndat_ones, axis=axis)
         res = np.nancumprod(_ndat, axis=axis)
         assert_almost_equal(res, tgt)
         tgt = np.cumsum(_ndat_zeros,axis=axis)
         res = np.nancumsum(_ndat, axis=axis)
         assert_almost_equal(res, tgt)
Пример #32
0
def moving_average(a, n=3) :
    """
    Moving average of width n (entries) on a numpy array.
    source: http://stackoverflow.com/questions/14313510/moving-average-function-on-numpy-scipy
    """
    ret = np.nancumsum(a, dtype=float)
    ret[n:] = (ret[n:] - ret[:-n]) / n
    ret[:n] = ret[:n] / (np.arange(n) + 1)
    return ret
def baro_stream_old(vel):
    ### This function calculates the barotropic streamfunction for a nt nz ny nx array returning a nt ny nx array                   
    ### The function works for both 18 and 36km and 9km                                                                                   
    if vel.shape[2] == 192:
        x="/scratch/general/am8e13/results36km"
    elif vel.shape[2] == 384:
        x="/scratch/general/am8e13/results18km"
    elif vel.shape[2] == 768:
        x="/scratch/general/am8e13/results9km"

    os.chdir(x)
    file2read = netcdf.NetCDFFile("grid.nc",'r')
    hfacw = file2read.variables['HFacW']
    hfacw = hfacw[:]*1
    dyg = file2read.variables['dyG']
    dyg = dyg[:]*1
    drf = file2read.variables['drF']
    drf = drf[:]*1
    dydz = np.zeros_like(hfacw)
    psi = np.zeros_like(vel[:,1,:,:])
    # Volume calculation                                                                                                                             
    for i in range(dyg.shape[0]):
        for j in range(dyg.shape[1]):
            for k in range(drf.shape[0]):
                dydz[k,i,j] = drf[k]*dyg[i,j]*hfacw[k,i,j]

    for temp in range(vel.shape[0]):
        utemp = np.zeros_like(hfacw)
        for i in range(dyg.shape[0]):
            for j in range(dyg.shape[1]):
                for k in range(drf.shape[0]):
                    utemp[k,i,j] = dydz[k,i,j]* vel[temp,k,i,j]

        # vertical integration                                                                                                                       
        utemp = np.nansum( utemp, 0 );
        # Integration horizontally                                                                                                                   
        psi[temp,:,:] = np.nancumsum( -utemp, axis = 0 );

    return psi / 10**6
Пример #34
0
def stack_stats(arrs, ax=0, nodata=None):
    """All statistics for arrs
    :
    :  arrs - either a list, tuple of arrays or an array with ndim=3
    :  ax - axis, either, 0 (by band) or (1,2) to get a single value for
    :       each band
    :  nodata - nodata value, numeric or np.nan (will upscale integers)
    """
    arrs = check_stack(arrs)
    a_m = mask_stack(arrs, nodata=nodata)
    nan_sum = np.nansum(a_m, axis=ax)
    nan_min = np.nanmin(a_m, axis=ax)
    nan_mean = np.nanmean(a_m, axis=ax)
    nan_median = np.nanmean(a_m, axis=ax)
    nan_max = np.nanmax(a_m, axis=ax)
    nan_std = np.nanstd(a_m, axis=ax)
    nan_var = np.nanvar(a_m, axis=ax)
    stats = [nan_sum, nan_min, nan_mean, nan_median, nan_max, nan_std, nan_var]
    if len(ax) == 1:
        nan_cumsum = np.nancumsum(a_m, axis=ax)
        stats.append(nan_cumsum)
    return stats
def main():
    plt.rcParams["font.weight"] = "bold"
    plt.rcParams["axes.labelweight"] = "bold"
    # Download data
    i1 = '512'
    i2 = 'spmg'
    r = z.download_cc_data(i1, i2, 50, '0 days', '2 days')

    hl, x, y = b.hist_axis(r, None)
    # y = np.abs(y)
    # x = np.abs(x)
    total_kernel = gaussian_kde(y)
    xspace = np.linspace(np.nanmin(x), np.nanmax(x), 3000)
    total_kernel_array = total_kernel.evaluate(xspace)
    bin = hl[12]
    bin_kernel = gaussian_kde(bin['data'])
    bin_kernel_array = bin_kernel.evaluate(xspace)
    d = bin_kernel_array/np.sqrt(total_kernel_array)
    max_ind = np.argmax(bin_kernel_array)
    ind_valid = (d < 2*d[max_ind])
    #popt, pcov = curve_fit(gaussian, xspace[ind_valid], d[ind_valid],
    #                       p0=[np.abs(bin['sliceMed']), np.abs(bin['sliceMed']/5), bin['sliceMed']], maxfev=10000)
    ind_valid = (d < 1e10)
    popt = scipy.optimize.least_squares(gaussian_func,
                                        [np.abs(bin['sliceMed']), np.abs(bin['sliceMed'] / 5),
                                         bin['sliceMed']],
                                        args=(xspace[ind_valid], d[ind_valid]),
                                        jac='3-point', x_scale='jac', loss='soft_l1', f_scale=.1).x

    xs = xspace[ind_valid]
    y1 = total_kernel_array[ind_valid]/np.max(total_kernel_array[ind_valid])
    y2 = bin_kernel_array[ind_valid]/np.max(bin_kernel_array[ind_valid])
    y3 = d[ind_valid]/np.max(d[ind_valid])
    y4 = gaussian(xspace, *popt)[ind_valid]/np.max(gaussian(xspace, *popt)[ind_valid])

    cdf1 = np.nancumsum(bin_kernel_array)*(xspace[-1] - xspace[0])/3000
    new_arr1 = np.full(cdf1.shape, np.nanmax(cdf1) / 2)
    ind1 = np.isclose(new_arr1, cdf1, rtol=.005)
    cdf2 = np.nancumsum(d)*(xspace[-1] - xspace[0])/3000
    half_max = np.nanmax(cdf2)/2
    new_arr2 = np.full(cdf2.shape, np.nanmax(cdf2)/2)
    ind2 = np.isclose(new_arr2, cdf2, rtol=.005)
    print(ind1.sum())
    print(ind2.sum())

    bin_max = np.argmax(y2)
    d_max = np.argmax(y4)

    f1 = plt.figure(100, figsize=(17, 11))
    ax1 = f1.add_subplot(111)
    ax1.plot(xs, y1, label='Total Sun Distribution')
    ax1.plot(xs, y2, label='Bin Distribution')
    ax1.plot(xs, y3, label='bin/sqrt(total)')
    ax1.set(xlabel=r'$\mathrm{{{0}\ Magnetic\ Flux\ Density\ (Mx/cm^2)}}$'.format(i1.upper()),
            ylabel='Probability Density Normalized to Mode')
    f1.suptitle('Flux Density Distributions', y=.92, weight='bold')
    plt.legend(frameon=False, framealpha=0)
    ax1.set_xlim([0, 300])

    f2 = plt.figure(200, figsize=(17, 11))
    ax2 = f2.add_subplot(111)
    ax2.plot(xs, y1, label='Total Sun Distribution', color='blue')
    ax2.plot(xs, y2, label='Bin Distribution', color='orange')
    ax2.plot(xs, y3, label='Divided Distribution', color='green')
    ax2.axvline(x=xspace[ind1], color='orange', ls='-', zorder=1, label='Bin Median')
    ax2.axvline(x=xspace[ind2], color='green', ls='-', zorder=1, label='Divided Distribution Median')
    ax2.axvline(x=bin['sliceMed'], color='.2', ls='--', zorder=1, label='Location of Bin')
    ax2.set(xlabel=r'$\mathrm{{{0}\ Magnetic\ Flux\ Density\ (Mx/cm^2)}}$'.format(i1.upper()),
            ylabel='Probability Density')
    f2.suptitle('Flux Density Distributions With Medians', y=.92, weight='bold')
    ax2.set_xlim([0, 300])

    plt.legend(frameon=False, framealpha=0)
    f1.savefig('flux_density_distributions.pdf', bbox_inches='tight', pad_inches=.1)
    f2.savefig('flux_density_dist_with_gaussians.pdf', bbox_inches='tight', pad_inches=.1)

    plt.show()
Пример #36
0
def cum_sum(a):
    """Cumulative sum"""
    return np.nancumsum(a)
Пример #37
0
def stack_cumsum(arrs, nodata=None):
    """see stack_stats"""
    a = check_stack(arrs)
    if nodata is not None:
        a = mask_stack(a, nodata=nodata)
    return np.nancumsum(a, axis=0)
Пример #38
0
def array_nancumsum(arr):
    return np.nancumsum(arr)
def main():
    plt.rc('text', usetex=True)
    plt.rcParams["font.weight"] = "bold"
    plt.rcParams["axes.labelweight"] = "bold"

    plt.ion()
    color = color = (81 / 255, 178 / 255, 76 / 255)

    r_100 = u.download_cc_data('spmg', 'spmg', 100, '23 hours', '25 hours')

    fig = plt.figure(figsize=(19, 19 / 3))
    fig.subplots_adjust(top=0.940, bottom=0.11, left=0.125, right=0.89, hspace=0.2, wspace=0.0)
    ax1 = fig.add_subplot(131)
    ax2 = fig.add_subplot(132)
    ax3 = fig.add_subplot(133)

    ccplot.violin_plot(r_100, None, ax1, clr=color, percentiles=[0, 100], axes_swap=False)
    ccplot.violin_plot(r_100, None, ax2, clr=color, percentiles=[0, 100], axes_swap=True)
    ax1.set_ylabel(r'$\mathrm{{Reference\ Flux\ Density\ (Mx/cm^2)}}$')
    ax2.set_xlabel(r'$\mathrm{{Magnetic\ Flux\ Density\ (Mx/cm^2)}}$')
    ax1.annotate(r'$\mathrm{{+24\ hr}}$', xy=(0.5, 1), xycoords='axes fraction', ha='center', color='black')
    ax2.annotate(r'$\mathrm{{-24\ hr}}$', xy=(0.5, 1), xycoords='axes fraction', ha='center', color='black')
    ax2.tick_params(axis='y', left='off', right='off', labelleft='off', labelright='off')
    ax3.yaxis.tick_right()

    for ax, letter in zip(fig.get_axes(), 'abc'):
        ax.annotate(
            '{0}'.format(letter),
            xy=(0, 1), xycoords='axes fraction',
            xytext=(7, -25), textcoords='offset points',
            ha='left', va='bottom', fontsize=19, color='black', family='serif')

    hl, x, y = ccplot.hist_axis(r_100, None)
    total_kernel = gaussian_kde(y)
    xspace = np.linspace(np.nanmin(x), np.nanmax(x), 3000)
    total_kernel_array = total_kernel.evaluate(xspace)
    bin = hl[12]
    bin_kernel = gaussian_kde(bin['data'])
    bin_kernel_array = bin_kernel.evaluate(xspace)
    d = bin_kernel_array/np.sqrt(total_kernel_array)
    max_ind = np.argmax(bin_kernel_array)
    ind_valid = (d < 2*d[max_ind])
    popt = scipy.optimize.least_squares(ccplot.gaussian_func,
                                        [np.abs(bin['sliceMed']), np.abs(bin['sliceMed'] / 5),
                                         bin['sliceMed']],
                                        args=(xspace[ind_valid], d[ind_valid]),
                                        jac='3-point', x_scale='jac', loss='soft_l1', f_scale=.1).x

    xs = xspace[ind_valid]
    y1 = total_kernel_array[ind_valid]/np.max(total_kernel_array[ind_valid])
    y2 = bin_kernel_array[ind_valid]/np.max(bin_kernel_array[ind_valid])
    y3 = d[ind_valid]/np.max(d[ind_valid])
    y4 = gaussian(xspace, *popt)[ind_valid]/np.max(gaussian(xspace, *popt)[ind_valid])

    cdf1 = np.nancumsum(bin_kernel_array)*(xspace[-1] - xspace[0])/3000
    ind1 = np.argmin(cdf1 - np.nanmax(cdf1))
    cdf2 = np.nancumsum(d)*(xspace[-1] - xspace[0])/3000
    ind2 = np.argmin(cdf2 - np.nanmax(cdf2))

    ax3.plot(xs, y1, label='Total Sun Distribution', color='blue')
    ax3.plot(xs, y2, label='Bin Distribution', color='orange')
    ax3.plot(xs, y3, label='Divided Distribution', color='green')
    ax3.axvline(x=xspace[ind1], color='orange', ls='-', zorder=1, label='Bin Median')
    ax3.axvline(x=xspace[ind2], color='green', ls='-', zorder=1, label='Divided Distribution Median')
    ax3.axvline(x=bin['sliceMed'], color='.2', ls='--', zorder=1, label='Location of Bin')
    ax3.set_ylabel(r'$\mathrm{{Probability Density}}$', rotation=270, labelpad=20)
    ax3.yaxis.set_label_position('right')
    ax3.set_xlim([0, 300])
Пример #40
0
 def time_nancumsum(self, array_size, percent_nans):
     np.nancumsum(self.arr)
Пример #41
0
 def test_nancumsum(self):
     tgt = np.cumsum(self.mat)
     for mat in self.integer_arrays():
         assert_equal(np.nancumsum(mat), tgt)
Пример #42
0
 "expovariate": random.expovariate,
 "gammavariate": random.gammavariate,
 "betavariate": random.betavariate,
 "lognormvariate": random.lognormvariate,
 "paretovariate": random.paretovariate,
 "vonmisesvariate": random.vonmisesvariate,
 "weibullvariate": random.weibullvariate,
 "triangular": random.triangular,
 "uniform": random.uniform,
 "nanmean": lambda *args: np.nanmean(args),
 "nanmin": lambda *args: np.nanmin(args),
 "nanmax": lambda *args: np.nanmax(args),
 "nansum": lambda *args: np.nansum(args),
 "nanstd": lambda *args: np.nanstd(args),
 "nanmedian": lambda *args: np.nanmedian(args),
 "nancumsum": lambda *args: np.nancumsum(args),
 "nancumprod": lambda *args: np.nancumprod(args),
 "nanargmax": lambda *args: np.nanargmax(args),
 "nanargmin": lambda *args: np.nanargmin(args),
 "nanvar": lambda *args: np.nanvar(args),
 "mean": lambda *args: np.mean(args),
 "min": lambda *args: np.min(args),
 "max": lambda *args: np.max(args),
 "sum": lambda *args: np.sum(args),
 "std": lambda *args: np.std(args),
 "median": lambda *args: np.median(args),
 "cumsum": lambda *args: np.cumsum(args),
 "cumprod": lambda *args: np.cumprod(args),
 "argmax": lambda *args: np.argmax(args),
 "argmin": lambda *args: np.argmin(args),
 "var": lambda *args: np.var(args)})