Пример #1
0
def main(fund_tx_file, comparision_symbol):

	fund_txn = pd.read_csv(fund_tx_file, parse_dates=[[0, 1, 2]], header=None, index_col=[0])
	fund_txn.sort_index(inplace=True)
	sorted_dates = fund_txn.index
	start_date = sorted_dates[0]
	end_date = sorted_dates[-1] + dt.timedelta(days=1)

	total_daily_rets = fund_txn.iloc[:, -1].astype('f4')
	# print total_daily_rets
	daily_ret = tsu.returnize0(total_daily_rets.copy())
	avg_daily_ret = np.mean(daily_ret)
	std_dev = np.std(daily_ret)
	sharpe = np.sqrt(252) * avg_daily_ret/std_dev
	cum_ret = total_daily_rets[-1]/total_daily_rets[0]

	comp_sym_vol, comp_sym_daily_ret, comp_sym_sharpe, comp_sym_cum_ret = optimizer.simulate(
		start_date, end_date, [comparision_symbol], [1.0])

	print("Details of the Performance of the portfolio :")
	print("Data Range : {} to {}").format(str(start_date + dt.timedelta(hours=16)), 
		str(end_date + dt.timedelta(hours=16)))

	print("Sharpe Ratio of Fund : {}").format(sharpe)
	print("Sharpe Ratio of {} : {}").format(comparision_symbol,comp_sym_sharpe)

	print("Total Return of Fund : {}").format(cum_ret)
	print("Total Return of {} : {}").format(comparision_symbol, comp_sym_cum_ret)

	print("Standard Deviation of Fund : {}").format(std_dev)
	print("Standard Deviation of {} : {}").format(comparision_symbol, comp_sym_vol)

	print("Average Daily Return of Fund : {}").format(avg_daily_ret)
	print("Average Daily Return of {} : {}").format(comparision_symbol, comp_sym_daily_ret)

	# Plot Fund vs comparing symbol
	plt.clf()
	fig = plt.figure(1)
	ax = plt.subplot(111)
	daily_ret_cummulative = np.cumprod(daily_ret + 1, axis=0)

	# Calculate daily returns for comparing symbol
	ldt_timestamps, na_price = optimizer.get_close_price_for_symbols(start_date, 
		end_date, [comparision_symbol])
	na_normalized_price = na_price / na_price[0, :]
	all_sum_daily = np.sum(na_normalized_price, 1)
	comp_sym_daily_ret = tsu.returnize0(all_sum_daily.copy())
	comp_sym_cummulative = np.cumprod(comp_sym_daily_ret + 1, axis=0)

	plt.plot(sorted_dates, daily_ret_cummulative, label='Fund', alpha=0.4)
	plt.plot(sorted_dates, comp_sym_cummulative, label=comparision_symbol)
	plt.ylabel('Cumulative Returns')
	plt.xlabel('Date')
	fig.autofmt_xdate(rotation=45)
	
	box = ax.get_position()
	ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
	# Put a legend to the right of the current axis
	ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
	plt.show()
Пример #2
0
def panda_index(labels, names=None, dtype='|S10'):
    """
    Create a pandas.MultiIndex with row names contained in the nested 
    list `labels` and column names contained in the optional list 
    `names`.
    
    Args:
        labels: nested list of strings
        names: list of strings
    
    Example usage:
        >>> labels = [['wine','water','beer'], [0.2','0.5'], ['to go','for here']]
        >>> names = ['beverage','size','order']
        >>> index = make_index(labels,names)
        >>> index
        
    """
    if names==None:
        names = ['axis{0}'.format(i) for i in range(len(labels))]
    else:
        assert len(labels)==len(names)
    sh = list_shape(labels)
    n_axes = len(labels)
    n_total = np.prod(sh)
    ctile = np.concatenate( ([1],np.cumprod(sh)[:-1]) )
    crep = np.concatenate( (np.cumprod(sh[::-1])[:-1][::-1],[1]) )
    replabels = np.empty((n_axes,n_total), dtype=dtype)
    for i,l in enumerate(labels):
        replabels[i] = np.tile( np.repeat(l,crep[i]), ctile[i] )
    tuples = zip(*replabels)
    return pd.MultiIndex.from_tuples(tuples, names=names)
Пример #3
0
def tiCarryPlot(getTIresult):
    conCarry = getTIresult.ix[:,0]*getTIresult.ix[:,1]
    disCarry = getTIresult.ix[:,0]*getTIresult.ix[:,2]
    cumBetas = np.cumprod(getTIresult.ix[:,0]/100+1)-1
    cumConBetas = np.cumprod(conCarry/100+1)-1
    cumDisBetas = np.cumprod(disCarry/100+1)-1

    fig = plt.figure()

    ax1 = fig.add_subplot(311)
    ax1.set_title('Cumulative Betas')
    cumBetas.plot(style='r',label='Original Beta')
    cumConBetas.plot(style='b', label='Discrete Weights')
    cumDisBetas.plot(style='g', label='Digital Weights')
    plt.legend(loc=2)

    ax2 = fig.add_subplot(312)
    ax2.set_title('Discrete Weights')
    getTIresult.ix[:,1].plot(style='b')
    plt.ylim([0, 1.2])
    plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2])

    ax3 = fig.add_subplot(313)
    ax3.set_title('Digital Weights')
    getTIresult.ix[:,2].plot(style='g')
    plt.ylim([-0.1, 1.1])
    plt.yticks([-0.1, 0.1, 0.3, 0.5, 0.7, 0.9, 1.1])

    fig.tight_layout()
    plt.show()
    def plot2(indx_Exposure, indx_MarketRet):
        plt.clf()

        MarketReturns = plt.subplot2grid((8,8), (0,0), colspan=6, rowspan=8)
        t = np.array(DATEord)

        if indx_Exposure == 2:
            mRet = np.cumprod(1-marketRet[indx_MarketRet + cashOffset])
        else:
            mRet = np.cumprod(1+marketRet[indx_MarketRet + cashOffset])

        MarketReturns.plot(t, mRet, 'b',linewidth=0.5)
        statistics=stats(mRet)
        MarketReturns.set_ylabel('Market Returns')

        statsStr="Sharpe Ratio = {sharpe:.4f}\nSortino Ratio = {sortino:.4f}\n\nPerformance (%/yr) = {returnYearly:.4f}\nVolatility (%/yr)       = {volaYearly:.4f}\n\nMax Drawdown = {maxDD:.4f}\nMAR Ratio         = {mar:.4f}\n\n Max Time off peak =  {maxTimeOffPeak}\n\n\n\n\n\n".format(**statistics)

        MarketReturns.autoscale(tight=True)
        MarketReturns.set_title('Market Returns of %s' %mRetMarkets[indx_MarketRet])
        MarketReturns.set_xlabel('Date')

        # Performance Numbers Textbox
        f.text(.72,.58,statsStr)

        plt.gcf().canvas.draw()
Пример #5
0
    def _update_attributes(self):
        getitem_tuple = ()
        values = []
        self.signal_axes = ()
        self.navigation_axes = ()
        for axis in self._axes:
            # Until we find a better place, take property of the axes
            # here to avoid difficult to debug bugs.
            axis.axes_manager = self
            if axis.slice is None:
                getitem_tuple += (axis.index,)
                values.append(axis.value)
                self.navigation_axes += (axis,)
            else:
                getitem_tuple += (axis.slice,)
                self.signal_axes += (axis,)

        self.signal_axes = self.signal_axes[::-1]
        self.navigation_axes = self.navigation_axes[::-1]
        self._getitem_tuple = getitem_tuple
        self.signal_dimension = len(self.signal_axes)
        self.navigation_dimension = len(self.navigation_axes)
        if self.navigation_dimension != 0:
            self.navigation_shape = tuple([axis.size for axis in self.navigation_axes])
        else:
            self.navigation_shape = ()

        if self.signal_dimension != 0:
            self.signal_shape = tuple([axis.size for axis in self.signal_axes])
        else:
            self.signal_shape = ()
        self.navigation_size = np.cumprod(self.navigation_shape)[-1] if self.navigation_shape else 0
        self.signal_size = np.cumprod(self.signal_shape)[-1] if self.signal_shape else 0
        self._update_max_index()
Пример #6
0
def _indtosub_converter(dims, order='F', onebased=True):
    """Converter for changing linear indexing to subscript indexing

    See also
    --------
    Series.indtosub
    """

    _check_order(order)

    def indtosub_inline_onebased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k)/y) - 1, x) + 1), dimprod))

    def indtosub_inline_zerobased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k+1)/y) - 1, x)), dimprod))

    inline_fcn = indtosub_inline_onebased if onebased else indtosub_inline_zerobased

    if size(dims) > 1:
        if order == 'F':
            dimprod = zip(dims, append(1, cumprod(dims)[0:-1]))
        else:
            dimprod = zip(dims, append(1, cumprod(dims[::-1])[0:-1])[::-1])
        converter = lambda k: inline_fcn(k, dimprod)
    else:
        converter = lambda k: (k,)

    return converter
def build_reflector(dataset, **kwargs):
        '''
        builds reflector
        '''
        
        #some shortcuts
        vp = kwargs['model']['vp']
        rho = kwargs['model']['rho']
        R = kwargs['model']['R']
        sz = kwargs['sz']
        gz = kwargs['gz']
        sx = kwargs['sx']
        gx = kwargs['gx']
        
        numpoints = 100 #used for interpolating through the model
        for g in  gx:
                cmpx = np.floor((g + sx)/2.).astype(np.int) # nearest midpoint
                h = cmpx - sx #half offset
                #the next line extracts the non-zero reflection points at this midpoint
                rp = np.nonzero(R[cmpx,:])[0]
                #and iterates over them
                for cmpz in (rp):
                        #~ print cmpx, cmpz
                        ds = np.sqrt(cmpz**2 + (h)**2)/float(numpoints) # line step distance
                        #predefine outputs
                        amp = 1.0
                        time = 0.0

                        #traveltime from source to cdp
                        vp_down = toolbox.find_points(sx, sz, cmpx, cmpz, numpoints, vp)
                        time += 0

                        #traveltime from cdp to geophone
                        vp_up = toolbox.find_points(cmpx, cmpz, g, gz, numpoints, vp)
                        time += 0

                        #loss due to spherical divergence
                        amp *= 0

                        #transmission losses from source to cdp
                        rho_down = toolbox.find_points(sx, sz, cmpx, cmpz, numpoints, rho)
                        z0s = rho_down * vp_down
                        z1s = toolbox.roll(z0s, 1)
                        correction = np.cumprod(transmission_coefficient(z0s, z1s) )[-1] 
                        amp *= 0
                        #amplitude loss at reflection point
                        correction = R[cmpx,cmpz]
                        amp *= 0
                        #transmission loss from cdp to source
                        rho_up = toolbox.find_points(cmpx, cmpz, g, gz, numpoints, rho)
                        z0s = rho_up * vp_up
                        z1s = toolbox.roll(z0s, 1)
                        correction = np.cumprod(transmission_coefficient(z0s, z1s))[-1]
                        amp *= 0

                        #calculate coordinates
                        
                        #write out data
                        
        return 
Пример #8
0
 def update_attributes(self):
     getitem_tuple = []
     values = []
     self.signal_axes = []
     self.navigation_axes = []
     for axis in self.axes:
         if axis.slice is None:
             getitem_tuple.append(axis.index)
             values.append(axis.value)
             self.navigation_axes.append(axis)
         else:
             getitem_tuple.append(axis.slice)
             self.signal_axes.append(axis)
             
     self._getitem_tuple = getitem_tuple
     self.signal_dimension = len(self.signal_axes)
     self.navigation_dimension = len(self.navigation_axes)
     if self.navigation_dimension != 0:
         self.navigation_shape = [
             axis.size for axis in self.navigation_axes]
     else:
         self.navigation_shape = [0,]
         
     if self.signal_dimension != 0:
         self.signal_shape = [
             axis.size for axis in self.signal_axes]
     else:
         self.signal_shape = [0,]
     self.navigation_size = \
         np.cumprod(self.navigation_shape)[-1]
     self.signal_size = \
         np.cumprod(self.signal_shape)[-1]
     self._update_max_index()
Пример #9
0
def cartesian(nodes, order='C'):
    '''Cartesian product of a list of arrays

    Parameters:
    -----------
    nodes: (list of 1d-arrays)
    order: ('C' or 'F') order in which the product is enumerated

    Returns:
    --------
    out: (2d-array) each line corresponds to one point of the product space
    '''

    nodes = [numpy.array(e) for e in nodes]
    shapes = [e.shape[0] for e in nodes]

    n = len(nodes)
    l = numpy.prod(shapes)
    out = numpy.zeros((l, n))


    if order == 'C':
        repetitions = numpy.cumprod([1] + shapes[:-1])
    else:
        shapes.reverse()
        sh = [1] + shapes[:-1]
        repetitions = numpy.cumprod(sh)
        repetitions = repetitions.tolist()
        repetitions.reverse()

    for i in range(n):
        _repeat_1d(nodes[i], repetitions[i], out[:,i])

    return out
Пример #10
0
def p2_strat():
    data = util.load_insample()
    SO = util.extract_SO(data)
    SC = util.extract_SC(data)
    SH = util.extract_SH(data)
    SL = util.extract_SL(data)
    TVL = util.extract_TVL(data)

    RCC, avrRCC = p1_strat()
    # 1...T
    RCO = SO[1:] / SC[:-1] - 1
    cumRCO = np.cumprod(1 + np.mean(RCO, axis=1))
    powers = 1. / np.asarray(1 + np.arange(RCO.shape[0]))
    avrRCO = np.power(cumRCO, powers) - 1
    # 0...T
    ROC = SC / SO - 1
    cumROC = np.cumprod(1 + np.mean(ROC, axis=1))
    powers = 1. / np.asarray(1 + np.arange(ROC.shape[0]))
    avrROC = np.power(cumROC, powers) - 1
    # 1...T
    ROO = SO[1:] / SO[:-1] - 1
    cumROO = np.cumprod(1 + np.mean(ROO, axis=1))
    powers = 1. / np.asarray(1 + np.arange(ROO.shape[0]))
    avrROO = np.power(cumROO, powers) - 1
    # 0...T
    RVP = (1 / (4 * np.log(2))) * (np.log(SH[1:]) - np.log(SL[1:]))**2
    
    avrTVL = np.zeros(TVL.shape)
    avrRVP = np.zeros(RVP.shape)
    powers = 1. / np.asarray(1 + np.arange(TVL.shape[0]))
    avrTVL[:200, :] = np.cumsum(TVL[:200, :], axis=0)
    avrRVP[:200, :] = np.cumsum(RVP[:200, :], axis=0)
    #avrRVP[:200, :] = np.cumprod(RVP[:200, :], axis=0)
   
    print TVL.shape, RVP.shape
    for i in np.arange(200, TVL.shape[0]):
        avrTVL[i, :] = (avrTVL[i-1, :] - TVL[i-200, :] + TVL[i, :])
        if i < RVP.shape[0]:
            avrRVP[i, :] = (avrRVP[i-1, :] - RVP[i-200, :] + RVP[i, :])
        #avrRVP[i, :] = np.multiply(np.divide(avrRVP[i-1, :], RVP[i-200, :]), RVP[i, :])
    powers[200:] = 1. / 200
    #avrTVL = np.power(avrTVL, powers[:, None])
    avrTVL = np.multiply(avrTVL, powers[:, None])
    avrRVP = np.multiply(avrRVP, powers[:-1, None])
    #avrRVP = np.power(avrRVP, powers[:, None])


    #print 'RCO ', RCO[:10, :10]
    #print 'avrRCO ', avrRCO[:10]
    #print 'ROC ', ROC[:10, :10]
    #print 'avrROC ', avrROC[:10]
    #print 'ROO ', ROO[:10, :10]
    #print 'avrROO ', avrROO[:10]

    print 'RVP ', RVP[:10, :10]
    #print 'avrTVL ', avrTVL[:10, :10]
    print 'avrRVP ', avrRVP[:10, :10]

    return RCO, avrRCO, ROC, avrROC, ROO, avrROO, TVL, avrTVL, RVP, avrRVP
Пример #11
0
 def f(self,X):
     X = reshape(X,self.input_dim)
     n = X.shape[0]
     fval = np.cumprod(np.sqrt(X),axis=1)[:,self.input_dim-1]*np.cumprod(np.sin(X),axis=1)[:,self.input_dim-1]  
     if self.sd ==0:
         noise = np.zeros(n).reshape(n,1)
     else:
         noise = np.random.normal(0,self.sd,n).reshape(n,1)
     return -fval.reshape(n,1) + noise
Пример #12
0
def plot_backtest(config, algos, labels=None):
    """
    @:param config: config dictionary
    @:param algos: list of strings representing the name of algorithms or index of pgportfolio result
    """
    results = []
    for i, algo in enumerate(algos):
        if algo.isdigit():
            results.append(np.cumprod(_load_from_summary(algo, config)))
            logging.info("load index "+algo+" from csv file")
        else:
            logging.info("start executing "+algo)
            results.append(np.cumprod(execute_backtest(algo, config)))
            logging.info("finish executing "+algo)

    start, end = _extract_test(config)
    timestamps = np.linspace(start, end, len(results[0]))
    dates = [datetime.datetime.fromtimestamp(int(ts)-int(ts)%config["input"]["global_period"])
             for ts in timestamps]

    weeks = mdates.WeekdayLocator()
    days = mdates.DayLocator()

    rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"],
                  "size": 8})

    """
    styles = [("-", None), ("--", None), ("", "+"), (":", None),
              ("", "o"), ("", "v"), ("", "*")]
    """
    fig, ax = plt.subplots()
    fig.set_size_inches(9, 5)
    for i, pvs in enumerate(results):
        if len(labels) > i:
            label = labels[i]
        else:
            label = NAMES[algos[i]]
        ax.semilogy(dates, pvs, linewidth=1, label=label)
        #ax.plot(dates, pvs, linewidth=1, label=label)

    plt.ylabel("portfolio value $p_t/p_0$", fontsize=12)
    plt.xlabel("time", fontsize=12)
    xfmt = mdates.DateFormatter("%m-%d %H:%M")
    ax.xaxis.set_major_locator(weeks)
    ax.xaxis.set_minor_locator(days)
    datemin = dates[0]
    datemax = dates[-1]
    ax.set_xlim(datemin, datemax)

    ax.xaxis.set_major_formatter(xfmt)
    plt.grid(True)
    plt.tight_layout()
    ax.legend(loc="upper left", prop={"size":10})
    fig.autofmt_xdate()
    plt.savefig("result.eps", bbox_inches='tight',
                pad_inches=0)
    plt.show()
Пример #13
0
 def eqn8(N, B):
     n = np.arange(N + 1, dtype=np.float64)
     # Create an array containing the factorials. scipy.special.factorial
     # requires SciPy 0.14 (#5064) therefore this is calculated by using
     # numpy.cumprod. This could be replaced by factorial again as soon as
     # older SciPy are not supported anymore but the cumprod alternative
     # might also be a bit faster.
     factorial_n = np.ones(n.shape, dtype=np.float64)
     np.cumprod(n[1:], out=factorial_n[1:])
     return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial_n))
Пример #14
0
    def test_CumprodOp(self):
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        f = theano.function([x], cumprod(x))
        assert np.allclose(np.cumprod(a), f(a))  # Test axis=None

        for axis in range(len(a.shape)):
            f = theano.function([x], cumprod(x, axis=axis))
            assert np.allclose(np.cumprod(a, axis=axis), f(a))
def _generate_mult_process(X, mat, inits):
    """
    Return the array `M` given by `M[t+1]/M[t] = mat[X[t], X[t+1]]`
    with `M[0] = inits[X[0]]`.

    """
    M = np.empty_like(X, dtype=float)
    M[..., 0] = inits[X[..., 0]]
    M[..., 1:] = mat[X[..., :-1], X[..., 1:]]
    np.cumprod(M, axis=-1, out=M)
    return M
Пример #16
0
    def test_cumprod(self):

        q1 = np.array([1, 2, 6]) * u.m
        with pytest.raises(u.UnitsError) as exc:
            q1.cumprod()
        with pytest.raises(u.UnitsError) as exc:
            np.cumprod(q1)

        q2 = np.array([3, 4, 5]) * u.Unit(1)
        assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
        assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
Пример #17
0
def get_n_params(module):
    n_param_learnable = 0
    n_param_frozen = 0

    for param in module.parameters():
        if param.requires_grad:
            n_param_learnable += np.cumprod(param.data.size())[-1]
        else:
            n_param_frozen += np.cumprod(param.data.size())[-1]

    n_param_all = n_param_learnable + n_param_frozen
    return readable_size(n_param_all), readable_size(n_param_learnable)
Пример #18
0
 def __init__(self, arg, shapein=None, shapeout=None, dtype=None,
              **keywords):
     if sp.issparse(arg):
         self.__class__ = pyoperators.linear.SparseOperator
         self.__init__(arg, dtype=None, **keywords)
         return
     if not isinstance(arg, (FSCMatrix, FSRMatrix,
                             FSCBlockMatrix, FSRBlockMatrix,
                             FSCRotation2dMatrix, FSRRotation2dMatrix,
                             FSCRotation3dMatrix, FSRRotation3dMatrix)):
         raise TypeError('The input sparse matrix type is not recognised.')
     if isinstance(arg, (FSCMatrix, FSRMatrix)):
         if shapein is None:
             bshapein = keywords.pop('broadcastable_shapein',
                                     (arg.shape[1],))
         else:
             shapein = tointtuple(shapein)
             test = np.cumprod(shapein) == arg.shape[1]
             try:
                 bshapein = shapein[:ilast(test, lambda x: x) + 1]
             except ValueError:
                 bshapein = (arg.shape[1],)
         self.broadcastable_shapein = bshapein
         if shapeout is None:
             bshapeout = keywords.pop('broadcastable_shapeout',
                                      (arg.shape[0],))
         else:
             shapeout = tointtuple(shapeout)
             test = np.cumprod(shapeout) == arg.shape[0]
             try:
                 bshapeout = shapeout[:ilast(test, lambda x: x) + 1]
             except ValueError:
                 bshapeout = (arg.shape[0],)
         self.broadcastable_shapeout = bshapeout
     else:
         bs = arg.block_shape
         if shapein is None:
             if bs[1] == 1:
                 shapein = arg.shape[1]
             else:
                 shapein = arg.shape[1] // bs[1], bs[1]
         if shapeout is None:
             if bs[0] == 1:
                 shapeout = arg.shape[0]
             else:
                 shapeout = arg.shape[0] // bs[0], bs[0]
     pyoperators.linear.SparseBase.__init__(
         self, arg, dtype=dtype, shapein=shapein, shapeout=shapeout,
         **keywords)
     self.set_rule('T', self._rule_transpose)
     self.set_rule(('T', '.'), self._rule_pTp, CompositionOperator)
Пример #19
0
    def test_cumprod(self):

        q1 = np.array([1, 2, 6]) * u.m
        with pytest.raises(ValueError) as exc:
            q1.cumprod()
        assert 'cannot use cumprod' in exc.value.args[0]

        with pytest.raises(ValueError) as exc:
            np.cumprod(q1)
        assert 'cannot use cumprod' in exc.value.args[0]

        q2 = np.array([3, 4, 5]) * u.Unit(1)
        assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
        assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
Пример #20
0
    def test_CumprodOp(self):
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        # Test axis out of bounds
        self.assertRaises(ValueError, cumprod, x, axis=3)
        self.assertRaises(ValueError, cumprod, x, axis=-4)

        f = theano.function([x], cumprod(x))
        assert np.allclose(np.cumprod(a), f(a))  # Test axis=None

        for axis in range(-len(a.shape), len(a.shape)):
            f = theano.function([x], cumprod(x, axis=axis))
            assert np.allclose(np.cumprod(a, axis=axis), f(a))
Пример #21
0
def get_gauss_means_vec(u1, v1, I):
    n = len(I)
    aux = np.repeat(u1[:,np.newaxis],n, axis=1)

    #calculate x*\exp{-\int_s^t g(v)dv}
    aux[np.tri(n, n, 0)==0] = 1 
    mu1 = np.cumprod(aux, 0)

    aux[np.tri(n, n, -1)==0] = 1 
    y = np.cumprod(aux,0)
    y[np.tri(n, n, 0)==0] = 0
    y = v1*I*y
    mu2 = np.cumsum(y[:,::-1], 1)[:, ::-1]
    return mu1, mu2
Пример #22
0
def simulate(na_rets,portfolio):    
    na_portrets = np.sum(na_rets * portfolio, axis=1)
    na_port_total = np.cumprod(na_portrets + 1)
    na_component_total = np.cumprod(na_rets + 1, axis=0)
    
    days_in_year = len(na_portrets)
    # days_in_year = 250
    
    pf_adr = sum(na_portrets)/days_in_year
    pf_vol = np.std(na_portrets)
    pf_sr = pf_adr/pf_vol * math.sqrt(days_in_year)
    pf_cr = na_port_total[-1]
    
    return pf_adr,pf_vol,pf_sr,pf_cr
Пример #23
0
def assignment_to_indices(A, card):
    """
    :param - A: an assignment
    :param list card: a list of the cardinalities of the variables in the assignment
    """
    A = np.array(A, copy=False)
    card = np.array(card, copy=False)
    C = card.flatten()
    if np.any(np.shape(A) == 1):
        I = np.cumprod(np.concatenate(([1.0], C[:0:-1]))) * (A.T).flatten()
    else:
        B = A[:,::-1]
        I = np.sum(np.tile(np.cumprod(np.concatenate(([1.0], C[:0:-1]))), \
                (B.shape[0], 1)) * B, axis=1)
    return np.array(I, dtype='int32')
Пример #24
0
    def _call_impl(self, t):
        x = (t - self.t_old) / self.h
        if t.ndim == 0:
            p = np.tile(x, self.order + 1)
            p = np.cumprod(p)
        else:
            p = np.tile(x, (self.order + 1, 1))
            p = np.cumprod(p, axis=0)
        y = self.h * np.dot(self.Q, p)
        if y.ndim == 2:
            y += self.y_old[:, None]
        else:
            y += self.y_old

        return y
Пример #25
0
def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
    """Lead field dot products using Legendre polynomial (P_n) series."""
    # Compute the sum occurring in the evaluation.
    # The result is
    #   sums[:]    (2n+1)^2/n beta^n P_n
    n_chunk = 50000000 // (8 * max(n_fact.shape) * 2)
    lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]])
    s0 = np.empty(beta.shape)
    for start, stop in zip(lims[:-1], lims[1:]):
        coeffs = lut_fun(ctheta[start:stop])
        betans = np.tile(beta[start:stop][:, np.newaxis], (1, n_fact.shape[0]))
        np.cumprod(betans, axis=1, out=betans)  # run inplace
        coeffs *= betans
        s0[start:stop] = np.dot(coeffs, n_fact)  # == weighted sum across cols
    return s0
Пример #26
0
def conv_to_price(x):
    if ~np.isnan(x[0]):
        x_idx = 0
    else:
        x_idx = None
        for i_ in range(1, x.shape[0]):
            if ~np.isnan(x[i_]) and np.isnan(x[i_-1]):
                x_idx = i_
                break
    if x_idx == 0:
        return np.cumprod(1+x)
    else:
        y = np.empty(x.shape[0])*np.nan
        y[x_idx: x.shape[0]] = np.cumprod(1+x[x_idx: x.shape[0]])
        return y
Пример #27
0
    def _call_impl(self, t):
        if t.ndim == 0:
            x = (t - self.t_shift) / self.denom
            p = np.cumprod(x)
        else:
            x = (t - self.t_shift[:, None]) / self.denom[:, None]
            p = np.cumprod(x, axis=0)

        y = np.dot(self.D[1:].T, p)
        if y.ndim == 1:
            y += self.D[0]
        else:
            y += self.D[0, :, None]

        return y
Пример #28
0
def ipjfact(n, k=0):
    """
    ipjfact   A Hankel matrix with factorial elements.
          a = ipjfact(n, k) is the matrix with
                    a(i,j) = (i+j)!    (k = 0, default)
                    a(i,j) = 1/(i+j)!  (k = 1)
          both are hankel matrices.
          The determinant and inverse are known explicitly.
          d = det(a) is returned is always returned as in
          a, d = ipjfact(n, k)

          Suggested by P. R. Graves-Morris.

          Reference:
          M.J.C. Gover, The explicit inverse of factorial Hankel matrices,
          Dept. of Mathematics, University of Bradford, 1993.
    """
    c = np.cumprod(np.arange(2, n + 2))
    d = np.cumprod(np.arange(n + 1, 2 * n + 1)) * c[n - 2]

    a = hankel(c, d)

    if k == 1:
        a = 1 / a

    d = 1

    #
    # There appears to be a bug in the implementaton of interger
    # multiply in numpy (note _not_ in Python).  Therefore we use
    # the explicit "cast" to float64 below.
    #
    if k == 0:
        for i in range(1, n):
            d = d * np.prod(np.arange(1, i + 2, dtype='float64')) *  \
                np.prod(np.arange(1, n - i + 1, dtype='float64'))

        d = d * np.prod(np.arange(1, n + 2, dtype='float64'))
    else:
        for i in range(0, n):
            d = d * np.prod(np.arange(1, i, dtype='float64')) /    \
                np.prod(np.arange(1, n + 1 + i, dtype='float64'))

        if  (n * (n - 1) / 2) % 2:
            d = -d
    det_a = d

    return a, det_a
Пример #29
0
 def apply(self):
     self.signal._plot.auto_update_plot = False
     pbar = progressbar(
     maxval = (np.cumprod(self.signal.axes_manager.navigation_shape)[-1]))
     up_to = None
     if self.differential_order == 0:
         f = self.model2plot
     else:
         f = self.diff_model2plot
         if self.crop_diff_axis is True:
             up_to = -self.differential_order
     i = 0
     for index in np.ndindex(
     tuple(self.signal.axes_manager.navigation_shape)):
         self.signal.axes_manager.set_not_slicing_indexes(index)
         self.signal.data[
         self.signal.axes_manager._getitem_tuple][:up_to]\
              = f()
         i += 1
         pbar.update(i)
     pbar.finish()
     if self.differential_order > 0:
         self.signal.axes_manager._slicing_axes[0].offset = \
             self.smooth_diff_line.axis[0]
         self.signal.crop_in_pixels(-1,0,-self.differential_order)
     self.signal._replot()
     self.signal._plot.auto_update_plot = True
Пример #30
0
def nancumprod(a, axis=None, dtype=None, out=None):
    """
    Return the cumulative product of array elements over a given axis treating Not a
    Numbers (NaNs) as one.  The cumulative product does not change when NaNs are
    encountered and leading NaNs are replaced by ones.

    Ones are returned for slices that are all-NaN or empty.

    .. versionadded:: 1.12.0

    Parameters
    ----------
    a : array_like
        Input array.
    axis : int, optional
        Axis along which the cumulative product is computed.  By default
        the input is flattened.
    dtype : dtype, optional
        Type of the returned array, as well as of the accumulator in which
        the elements are multiplied.  If *dtype* is not specified, it
        defaults to the dtype of `a`, unless `a` has an integer dtype with
        a precision less than that of the default platform integer.  In
        that case, the default platform integer is used instead.
    out : ndarray, optional
        Alternative output array in which to place the result. It must
        have the same shape and buffer length as the expected output
        but the type of the resulting values will be cast if necessary.

    Returns
    -------
    nancumprod : ndarray
        A new array holding the result is returned unless `out` is
        specified, in which case it is returned.

    See Also
    --------
    numpy.cumprod : Cumulative product across array propagating NaNs.
    isnan : Show which elements are NaN.

    Examples
    --------
    >>> np.nancumprod(1)
    array([1])
    >>> np.nancumprod([1])
    array([1])
    >>> np.nancumprod([1, np.nan])
    array([ 1.,  1.])
    >>> a = np.array([[1, 2], [3, np.nan]])
    >>> np.nancumprod(a)
    array([ 1.,  2.,  6.,  6.])
    >>> np.nancumprod(a, axis=0)
    array([[ 1.,  2.],
           [ 3.,  2.]])
    >>> np.nancumprod(a, axis=1)
    array([[ 1.,  2.],
           [ 3.,  3.]])

    """
    a, mask = _replace_nan(a, 1)
    return np.cumprod(a, axis=axis, dtype=dtype, out=out)
Пример #31
0
def generate_graph(ret):

    colors = px.colors.sequential.Rainbow
    simple_R = ret["exog_signal"][0, :, :]
    prices = np.cumprod(simple_R + 1, 0)

    num_sim = prices.shape[1]

    x = [j for j in range(prices.shape[0])]

    fig_dist = ff.create_distplot(
        [simple_R[:, i] for i in range(simple_R.shape[1])],
        group_labels=['ret_' + str(i + 1) for i in range(simple_R.shape[1])],
        bin_size=.001)

    fig = make_subplots(rows=5,
                        cols=2,
                        specs=[[{
                            "rowspan": 2
                        }, {
                            "rowspan": 2
                        }], [None, None], [{
                            "rowspan": 2,
                            "colspan": 2
                        }, None], [None, None], [{
                            "colspan": 2
                        }, None]],
                        horizontal_spacing=0.05,
                        vertical_spacing=0.1,
                        subplot_titles=("Returns", "Prices",
                                        "Distribution of Returns"))

    yaxis = [0.1 * i + 1 for i in range(simple_R.shape[1])]

    for i in range(prices.shape[1]):
        # top left
        fig.add_trace(go.Scatter(x=x,
                                 y=simple_R[:, i],
                                 mode='lines',
                                 name='ret_' + str(i + 1),
                                 legendgroup='Sim' + str(i + 1),
                                 marker=dict(color='rgba(0,0,0,0.1)')),
                      row=1,
                      col=1)
        # top right
        fig.add_trace(go.Scatter(x=x,
                                 y=prices[:, i],
                                 mode='lines',
                                 name='price_path' + str(i + 1),
                                 legendgroup='Sim' + str(i + 1),
                                 marker=dict(color=colors[i])),
                      row=1,
                      col=2)
        # middle
        fig.add_trace(go.Histogram(fig_dist['data'][i],
                                   xbins=dict(size=0.002),
                                   legendgroup='Sim' + str(i + 1),
                                   marker=dict(color=colors[i])),
                      row=3,
                      col=1)
        fig.add_trace(go.Scatter(fig_dist['data'][i + num_sim],
                                 line=dict(width=2.5),
                                 legendgroup='Sim' + str(i + 1),
                                 marker=dict(color=colors[i])),
                      row=3,
                      col=1)
        # bottom
        fig.add_trace(go.Scatter(
            x=simple_R[:, i],
            y=[1 - 0.01 * i for j in range(simple_R[:, i].shape[0])],
            mode='markers',
            name='ret_' + str(i + 1),
            legendgroup='Sim' + str(i + 1),
            marker=dict(color=colors[i], symbol='line-ns-open')),
                      row=5,
                      col=1)

    fig.update_layout(height=800,
                      legend=dict(bordercolor="Black", borderwidth=0.5),
                      title_text="specs examples")

    return fig
Пример #32
0
def eventprofiler(df_events_arg,
                  d_data,
                  i_lookback=20,
                  i_lookforward=20,
                  s_filename='study',
                  b_market_neutral=True,
                  b_errorbars=True,
                  s_market_sym='SPY'):
    ''' Event Profiler for an event matix'''
    df_close = d_data['close'].copy()
    df_rets = df_close.copy()

    # Do not modify the original event dataframe.
    df_events = df_events_arg.copy()
    tsu.returnize0(df_rets.values)

    if b_market_neutral == True:
        df_rets = df_rets - df_rets[s_market_sym]
        del df_rets[s_market_sym]
        del df_events[s_market_sym]

    df_close = df_close.reindex(columns=df_events.columns)

    # Removing the starting and the end events
    df_events.values[0:i_lookback, :] = np.NaN
    df_events.values[-i_lookforward:, :] = np.NaN

    # Number of events
    i_no_events = int(np.logical_not(np.isnan(df_events.values)).sum())
    assert i_no_events > 0, "Zero events in the event matrix"
    na_event_rets = "False"

    # Looking for the events and pushing them to a matrix
    for i, s_sym in enumerate(df_events.columns):
        for j, dt_date in enumerate(df_events.index):
            if df_events[s_sym][dt_date] == 1:
                na_ret = df_rets[s_sym][j - i_lookback:j + 1 + i_lookforward]
                if type(na_event_rets) == type(""):
                    na_event_rets = na_ret
                else:
                    na_event_rets = np.vstack((na_event_rets, na_ret))

    if len(na_event_rets.shape) == 1:
        na_event_rets = np.expand_dims(na_event_rets, axis=0)

    # Computing daily rets and retuns
    na_event_rets = np.cumprod(na_event_rets + 1, axis=1)
    na_event_rets = (na_event_rets.T / na_event_rets[:, i_lookback]).T

    # Study Params
    na_mean = np.mean(na_event_rets, axis=0)
    na_std = np.std(na_event_rets, axis=0)
    li_time = range(-i_lookback, i_lookforward + 1)

    # Plotting the chart
    plt.clf()
    plt.axhline(y=1.0, xmin=-i_lookback, xmax=i_lookforward, color='k')
    if b_errorbars == True:
        plt.errorbar(li_time[i_lookback:],
                     na_mean[i_lookback:],
                     yerr=na_std[i_lookback:],
                     ecolor='#AAAAFF',
                     alpha=0.7)
    plt.plot(li_time, na_mean, linewidth=3, label='mean', color='b')
    plt.xlim(-i_lookback - 1, i_lookforward + 1)
    if b_market_neutral == True:
        plt.title('Market Relative mean return of ' +\
                str(i_no_events) + ' events')
        print 'Number of events count in report: ', str(i_no_events)
    else:
        plt.title('Mean return of ' + str(i_no_events) + ' events')
    plt.xlabel('Days')
    plt.ylabel('Cumulative Returns')
    plt.savefig(s_filename, format='pdf')
Пример #33
0
    def generate_netvalues(self):
        # 从头计算净值表,包括全部日期(包含非交易日)
        digits = self.net_digits
        confirmdays = self.confirmdays
        w.start()
        with db_connect(self.netvaldir) as conn_net:
            data = pd.read_sql('SELECT * FROM Net_Values_Base', conn_net)
            sorteddata = data.sort_values(['date'], ascending=[1])
            dates = sorteddata['date']
            # 检查缺失交易日期,如果有缺失则只生成至缺失交易日前一(交易)日; 有交易日就应该有估值表,反之不然
            firstdate = dates.values[0]
            lastdate = dates.values[-1]
            tdays = w.tdays(firstdate, lastdate).Times
            trddays = pd.DataFrame(
                [dt.datetime.strftime(t, '%Y%m%d') for t in tdays])
            misstrd = ~trddays.isin(dates.values)
            trdmiss = trddays[misstrd.values]
            if not trdmiss.empty:
                # 截止到第一个缺失交易日前的一个交易日
                cutdate = dt.datetime.strftime(
                    w.tdaysoffset(-1, trdmiss.values[0][0]).Times[0], '%Y%m%d')
                cutpos = dates <= cutdate
                sorteddata = sorteddata[cutpos]
            dates = sorteddata['date']
            fees = -(sorteddata.
                     loc[:, ['servfee', 'keepfee', 'mangfee', 'earn']].diff())
            fees[fees <= 0] = 0
            fees.loc[0, :] = 0
            paid = fees.sum(axis=1)
            comptot = sorteddata['assettot'] - sorteddata[
                'sell']  # 资产总额扣除应付赎回款
            netreal = sorteddata['assetnet'] / sorteddata[
                'sharenum']  # 真实(单位)净值

            bookearn = sorteddata['earn'].diff()
            bookearn.iloc[0] = 0
            bookearn[bookearn < 0] = 0

            cumret = (
                (sorteddata['assetnet'].values[1:] + bookearn.values[1:]) /
                sorteddata['assetnet'].values[:-1]) * (
                    sorteddata['sharenum'].values[:-1] /
                    sorteddata['sharenum'].values[1:]) - 1
            cumret = np.row_stack([
                np.zeros([1, 1]),
                np.reshape(cumret, [dates.__len__() - 1, 1])
            ])
            netcum = np.cumprod(1 + cumret) * netreal[0]

            sharechg = sorteddata['sharenum'].diff()
            sharechg[0] = 0
            # 和前一个数值相比, 确定 confirm date (T+C) 的位置
            idxchg_TC = sharechg != 0  # type: pd.DataFrame
            idxchg_TC[0] = False
            chgpos_TC = idxchg_TC[idxchg_TC.values].index
            chgdate = dates[chgpos_TC].values
            opendt = [
                w.tdaysoffset(-confirmdays, c).Times[0] for c in chgdate
            ]  # 开放日 in wind format
            opendate = [dt.datetime.strftime(t, '%Y%m%d')
                        for t in opendt]  # 开放日, T
            openidx = dates.isin(opendate)  # 开放日位置
            inout = np.zeros_like(netreal)
            inout2 = np.zeros_like(netreal)
            inout[chgpos_TC] = np.round(netreal[openidx.values].values,
                                        digits) * sharechg[chgpos_TC].values
            idxchg_TCm1 = np.zeros_like(netreal)
            opennum = 0
            opentot = opendate.__len__()
            for dumi in range(dates.__len__()):
                if opennum >= opentot:
                    break
                mydt = dates.values[dumi]
                if mydt > opendate[opennum] and mydt < chgdate[opennum]:
                    inout2[dumi] = inout[chgpos_TC[opennum]]
                    idxchg_TCm1[dumi] = 1
                elif mydt >= chgdate[opennum]:
                    opennum += 1
            # 分子,与确认日对齐
            rets = np.zeros_like(netreal)
            amtchg = np.zeros_like(netreal)
            numerator = (comptot.values + paid.values +
                         idxchg_TCm1 * inout2)[1:]
            denominator = comptot.values[:-1] + (idxchg_TCm1 * inout2 +
                                                 idxchg_TC.values * inout)[1:]
            rets[1:] = numerator / denominator - 1
            amtchg[1:] = comptot.values[
                1:] - comptot.values[:-1] + paid.values[1:] - inout[1:]
            netvals = pd.DataFrame(np.column_stack([
                dates.values, netreal, netcum,
                np.cumprod(1 + rets) * netreal[0], rets, amtchg,
                np.cumsum(amtchg)
            ]),
                                   columns=[
                                       'Date', 'NetSingle', 'NetCumulated',
                                       'NetCompensated', 'Returns', 'AmtChg',
                                       'AmtCumChg'
                                   ])
            sql.to_sql(netvals,
                       name='Net_Values',
                       con=conn_net,
                       if_exists='replace')
            print('Netvalues updated from ' + firstdate + ' to ' +
                  dates.values[-1])
        w.close()
Пример #34
0
    def update_overdispersion_natural(self):
        node = self.nodes['overdispersion_natural']
        uu = self.Nframe['unit']
        bl = self.nodes['baseline'].expected_x()[uu]
        F = self.F_prod()
        G = self.G_prod()
        time_nat = self.time_natural

        # update of post_shape
        cnt_dframe = self.Nframe.sort_values(['unit', 'trial',
                                              'time'])['count']
        # index matrix of 2 columns: Column 1 original, Column 2 sorted
        ind_mat = np.c_[np.array(cnt_dframe.index),
                        np.array(xrange(cnt_dframe.index.shape[0]))]

        # cumulative sum from data
        cnt_cumsum = np.cumsum(np.array(cnt_dframe).reshape(-1,
                                                            time_nat)[:, ::-1],
                               axis=1)[:, ::-1]
        expected_phi = node.expected_x()
        # sort expected values of phi
        exphi_sorted = self._sort_values(ind_mat, expected_phi)
        # sort baseline
        bl_sorted = self._sort_values(ind_mat, bl)

        # sort F values
        if not hasattr(F, '__iter__'):
            F_sorted = F * np.ones(self.M)
        else:
            F_sorted = self._sort_values(ind_mat, F)

        # sort G values
        if not hasattr(G, '__iter__'):
            G_sorted = G * np.ones(self.M)
        else:
            G_sorted = self._sort_values(ind_mat, G)

        cumprod_phi = np.cumprod(exphi_sorted.reshape(-1, time_nat), axis=1)
        prod_phi_F = cumprod_phi * bl_sorted.reshape(-1, time_nat) * F_sorted.reshape(-1, time_nat) * \
                     G_sorted.reshape(-1, time_nat)
        cumsum_phi_F = np.cumsum(prod_phi_F[:, ::-1],
                                 axis=1)[:, ::-1] / exphi_sorted.reshape(
                                     -1, time_nat)

        # create an array for update rule
        reg_prod = np.ones(node.prior_rate.reshape(-1, time_nat).shape[0])

        # create sorted copies of post_rate and post_shape
        postrate_sorted = self._sort_values(ind_mat, node.post_rate).reshape(
            -1, time_nat)
        postshape_sorted = self._sort_values(ind_mat, node.post_shape).reshape(
            -1, time_nat)

        # print "zeta priors: {}, {}, {}".format(node.prior_rate.min(),
        #                                        node.prior_rate.mean(),
        #                                        node.prior_rate.max())
        # print "omega priors: {}, {}, {}".format(node.prior_shape.min(),
        #                                         node.prior_shape.mean(),
        #                                         node.prior_shape.max())

        # print "Old zeta: {}, {}, {}".format(postrate_sorted.min(),
        #                                     postrate_sorted.mean(),
        #                                     postrate_sorted.max())
        # print "Old omega: {}, {}, {}".format(postshape_sorted.min(),
        #                                      postshape_sorted.mean(),
        #                                      postshape_sorted.max())

        # print "Min, Mean, Max of cumsum_phi_F:  {}\t{}\t{} @@@@@@".format(np.min(cumsum_phi_F),
        #                                                                   np.mean(cumsum_phi_F),
        #                                                                   np.max(cumsum_phi_F))

        if node.has_parents:
            print "Overdispersion_natural has parents!"
            # prior_rate = node.prior_rate.expected_x()[uu]
            priorate_sorted = self._sort_values(
                ind_mat,
                node.prior_rate.expected_x()[uu]).reshape(-1, time_nat)

            # prior_shape = node.prior_shape.expected_x()[uu]
            priorshape_sorted = self._sort_values(
                ind_mat,
                node.prior_shape.expected_x()[uu]).reshape(-1, time_nat)

            # # update post shape and post rate
            # node.post_shape, node.post_rate = _reg_omega_zeta(time_nat,
            #     node.post_shape.reshape(-1, time_nat), node.post_rate.reshape(-1, time_nat),
            #     new_omega, new_zeta, prior_shape, prior_rate, cnt_cumsum, cumsum_phi_F, reg_prod)

            for i in range(time_nat):
                # create a temp vector to keep last ratio of rate / shape
                temp_ratio = postrate_sorted[:, i] / postshape_sorted[:, i]
                # update post rate and post shape
                postrate_sorted[:,
                                i] = cumsum_phi_F[:,
                                                  i] * reg_prod + priorate_sorted[:,
                                                                                  i]
                postshape_sorted[:,
                                 i] = cnt_cumsum[:, i] + priorshape_sorted[:,
                                                                           i]
                # update regressive product
                reg_prod *= temp_ratio * (postshape_sorted[:, i] /
                                          postrate_sorted[:, i])

        else:
            # create sorted copies of prior_rate and prior_shape
            priorate_sorted = self._sort_values(ind_mat,
                                                node.prior_rate).reshape(
                                                    -1, time_nat)
            priorshape_sorted = self._sort_values(ind_mat,
                                                  node.prior_shape).reshape(
                                                      -1, time_nat)

            for i in range(time_nat):
                # create a temp vector to keep last ratio of rate / shape
                temp_ratio = postrate_sorted[:, i] / postshape_sorted[:, i]
                # update post rate and post shape
                postrate_sorted[:,
                                i] = cumsum_phi_F[:,
                                                  i] * reg_prod + priorate_sorted[:,
                                                                                  i]
                postshape_sorted[:,
                                 i] = cnt_cumsum[:, i] + priorshape_sorted[:,
                                                                           i]
                # update regressive product
                reg_prod *= temp_ratio * (postshape_sorted[:, i] /
                                          postrate_sorted[:, i])

        node.post_shape = self._unsort_values(ind_mat,
                                              postshape_sorted.ravel())
        node.post_rate = self._unsort_values(ind_mat, postrate_sorted.ravel())
Пример #35
0
    def test_cumprod(self):
        self.assertRaises(ValueError, np.cumprod, self.q)

        q = [10, .1, 5, 50] * pq.dimensionless
        self.assertQuantityEqual(np.cumprod(q), [10, 1, 5, 250])
Пример #36
0
def cumprod(x, axis=0):
    return np.cumprod(x, axis=axis)
Пример #37
0
def compute_equity(timestamps, starting_equity, returns):
    ''' Given starting equity, timestamps and returns, create a numpy array of equity at each date'''
    return starting_equity * np.cumprod(1. + returns)
Пример #38
0
    "nansum": lambda *args: np.nansum(args),
    "nanstd": lambda *args: np.nanstd(args),
    "nanmedian": lambda *args: np.nanmedian(args),
    "nancumsum": lambda *args: np.nancumsum(args),
    "nancumprod": lambda *args: np.nancumprod(args),
    "nanargmax": lambda *args: np.nanargmax(args),
    "nanargmin": lambda *args: np.nanargmin(args),
    "nanvar": lambda *args: np.nanvar(args),
    "mean": lambda *args: np.mean(args),
    "min": lambda *args: np.min(args),
    "max": lambda *args: np.max(args),
    "sum": lambda *args: np.sum(args),
    "std": lambda *args: np.std(args),
    "median": lambda *args: np.median(args),
    "cumsum": lambda *args: np.cumsum(args),
    "cumprod": lambda *args: np.cumprod(args),
    "argmax": lambda *args: np.argmax(args),
    "argmin": lambda *args: np.argmin(args),
    "var": lambda *args: np.var(args)
})


class FeatureFunc:
    """
    Parameters
    ----------
    expression : str
        An expression string
    args : List[Tuple[str, Orange.data.Variable]]
        A list of (`name`, `variable`) tuples where `name` is the name of
        a variable as used in `expression`, and `variable` is the variable
Пример #39
0
 def perform(self, node, inputs, output_storage):
     x = inputs[0]
     z = output_storage[0]
     z[0] = np.cumprod(x, axis=self.axis)
Пример #40
0
    def __init__(self,
                 x_dim,
                 z_dim,
                 dataset_size,
                 batch_size=64,
                 gf_dim=64,
                 df_dim=64,
                 prior_std=1.0,
                 J=1,
                 M=1,
                 num_classes=1,
                 eta=2e-4,
                 num_layers=4,
                 alpha=0.01,
                 lr=0.0002,
                 optimizer='adam',
                 wasserstein=False,
                 ml=False,
                 J_d=None):

        assert len(x_dim) == 3, "invalid image dims"
        c_dim = x_dim[2]
        self.is_grayscale = (c_dim == 1)
        self.optimizer = optimizer.lower()
        self.dataset_size = dataset_size
        self.batch_size = batch_size

        self.K = num_classes
        self.x_dim = x_dim
        self.z_dim = z_dim

        self.gf_dim = gf_dim
        self.df_dim = df_dim
        self.c_dim = c_dim
        self.lr = lr

        # Bayes
        self.prior_std = prior_std
        self.num_gen = J
        self.num_disc = J_d if J_d is not None else 1
        self.num_mcmc = M
        self.eta = eta
        self.alpha = alpha
        # ML
        self.ml = ml
        if self.ml:
            assert self.num_gen == 1 and self.num_disc == 1 and self.num_mcmc == 1, "invalid settings for ML training"

        self.noise_std = np.sqrt(2 * self.alpha * self.eta)

        def get_strides(num_layers, num_pool):
            interval = int(math.floor(num_layers / float(num_pool)))
            strides = np.array([1] * num_layers)
            strides[0:interval * num_pool:interval] = 2
            return strides

        self.num_pool = 4
        self.max_num_dfs = 512
        self.gen_strides = get_strides(num_layers, self.num_pool)
        self.disc_strides = self.gen_strides
        num_dfs = np.cumprod(np.array([self.df_dim] +
                                      list(self.disc_strides)))[:-1]
        num_dfs[num_dfs >= self.max_num_dfs] = self.max_num_dfs  # memory
        self.num_dfs = list(num_dfs)
        self.num_gfs = self.num_dfs[::-1]

        self.construct_from_hypers(gen_strides=self.gen_strides,
                                   disc_strides=self.disc_strides,
                                   num_gfs=self.num_gfs,
                                   num_dfs=self.num_dfs)

        self.build_bgan_graph()
        self.build_test_graph()
Пример #41
0
def show_portfolio_future_plot(gbm_sim, init_cap, days_sim, hist_data):
    """
    Returns the plot of possible future projections from gbm_sim, at the 5%, 50%, 95% confidence

    :param gbm_sim: pd.DataFrame - simulation results from utils.simulate_gbm() 
    :param init_cap: np.float64 - initial capital 
    :param days_sim: int - number of days to simulate 
    :param hist_data: pd.DataFrame - the historical prices (to get the last day of data)

    :returns: Bokeh.line - 3 Lines outlining the % chance of getting above certain values
    """
    if gbm_sim is not None:

        last_day = hist_data.index.max()
        dates = [last_day + timedelta(days=i) for i in range(days_sim)]

        sim_res = get_sim_results_stats(gbm_sim)

        bottom = sim_res.filter(['net_asset_change']).quantile(.05) / len(
            gbm_sim)  #arithmetic average daily returns
        middle = sim_res.filter(['net_asset_change'
                                 ]).quantile(.5) / len(gbm_sim)
        top = sim_res.filter(['net_asset_change']).quantile(.95) / len(gbm_sim)

        # print(bottom)

        bottom_ind_value = init_cap * np.cumprod([1 + bottom] * days_sim)
        middle_ind_value = init_cap * np.cumprod([1 + middle] * days_sim)
        top_ind_value = init_cap * np.cumprod([1 + top] * days_sim)

        ind_value = pd.DataFrame(
            data={
                "bottom_ind_value": bottom_ind_value,
                "middle_ind_value": middle_ind_value,
                "top_ind_value": top_ind_value
            })
        ind_value['dates'] = dates
        source = ColumnDataSource(ind_value)

        plot_proj = figure(x_axis_type='datetime',
                           height=250,
                           tools="reset, save, wheel_zoom, pan")
        plot_proj.sizing_mode = "scale_width"
        plot_proj.grid.grid_line_alpha = 0
        plot_proj.xaxis.axis_label = 'Date'
        plot_proj.yaxis.axis_label = 'Indicative Value'
        plot_proj.ygrid.band_fill_color = None
        plot_proj.ygrid.band_fill_alpha = 0
        plot_proj.yaxis.formatter = NumeralTickFormatter(format="$0,0")
        plot_proj.xaxis.minor_tick_line_color = None

        plot_proj.line(x="dates",
                       y="bottom_ind_value",
                       color='#006565',
                       source=source,
                       legend_label='5th Percentile',
                       line_width=1.5)
        r1 = plot_proj.line(x="dates",
                            y="middle_ind_value",
                            color='#008c8c',
                            source=source,
                            legend_label='50th Percentile',
                            line_width=1.5)
        plot_proj.line(x="dates",
                       y="top_ind_value",
                       color='#00eeee',
                       source=source,
                       legend_label='95% Percentile',
                       line_width=1.5)

        hover = HoverTool(tooltips=[
            ('Date', '@dates{%F}'),
            ("Projected Value, 5% chance of having more than",
             '$@top_ind_value{0,0.00}'),
            ("Projected Value, 50% chance of having more than",
             '$@middle_ind_value{0,0.00}'),
            ("Projected Value, 95% chance of having more than",
             '$@bottom_ind_value{0,0.00}')
        ],
                          formatters={"@dates": "datetime"})
        hover.renderers = [r1]
        hover.mode = 'vline'

        plot_proj.add_tools(hover)

        plot_proj.legend.location = "top_left"
        return plot_proj
y=np.zeros(10)
np.power(2, x, out=y[::2])
print(y)

# reduce(), accumulate()
x = np.arange(1, 6)
np.add.reduce(x)
np.multiply.reduce(x)
np.add.accumulate(x)
np.multiply.accumulate(x)

np.sum(x)
np.prod(x)
np.cumsum(x)
np.cumprod(x)

# outer products
x = np.arange(1, 6)
np.multiply.outer(x, x) # product table

L = np.random.random(100)
sum(L)
np.sum(L)

big_array = np.random.rand(10000000)
min(big_array)
max(big_array)
np.min(big_array)
np.max(big_array)
print(big_array.min(), big_array.max(), big_array.sum())
Пример #43
0
    def setup_DA_params(self):
        """
        - we increase roation angle from [-15, 15] to [-30, 30]
        - scale range is now (0.7, 1.4), was (0.85, 1.25)
        - we don't do elastic deformation anymore

        :return:
        """

        self.deep_supervision_scales = [[1, 1, 1]] + list(
            list(i) for i in 1 / np.cumprod(
                np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]

        if self.threeD:
            self.data_aug_params = default_3D_augmentation_params
            self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi,
                                                  30. / 360 * 2. * np.pi)
            self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi,
                                                  30. / 360 * 2. * np.pi)
            self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi,
                                                  30. / 360 * 2. * np.pi)
            if self.do_dummy_2D_aug:
                self.data_aug_params["dummy_2D"] = True
                self.print_to_log_file("Using dummy2d data augmentation")
                self.data_aug_params["elastic_deform_alpha"] = \
                    default_2D_augmentation_params["elastic_deform_alpha"]
                self.data_aug_params["elastic_deform_sigma"] = \
                    default_2D_augmentation_params["elastic_deform_sigma"]
                self.data_aug_params[
                    "rotation_x"] = default_2D_augmentation_params[
                        "rotation_x"]
        else:
            self.do_dummy_2D_aug = False
            if max(self.patch_size) / min(self.patch_size) > 1.5:
                default_2D_augmentation_params['rotation_x'] = (-15. / 360 *
                                                                2. * np.pi,
                                                                15. / 360 *
                                                                2. * np.pi)
            self.data_aug_params = default_2D_augmentation_params
        self.data_aug_params[
            "mask_was_used_for_normalization"] = self.use_mask_for_norm

        if self.do_dummy_2D_aug:
            self.basic_generator_patch_size = get_patch_size(
                self.patch_size[1:], self.data_aug_params['rotation_x'],
                self.data_aug_params['rotation_y'],
                self.data_aug_params['rotation_z'],
                self.data_aug_params['scale_range'])
            self.basic_generator_patch_size = np.array(
                [self.patch_size[0]] + list(self.basic_generator_patch_size))
            patch_size_for_spatialtransform = self.patch_size[1:]
        else:
            self.basic_generator_patch_size = get_patch_size(
                self.patch_size, self.data_aug_params['rotation_x'],
                self.data_aug_params['rotation_y'],
                self.data_aug_params['rotation_z'],
                self.data_aug_params['scale_range'])
            patch_size_for_spatialtransform = self.patch_size

        self.data_aug_params["scale_range"] = (0.7, 1.4)
        self.data_aug_params["do_elastic"] = False
        self.data_aug_params['selected_seg_channels'] = [0]
        self.data_aug_params[
            'patch_size_for_spatialtransform'] = patch_size_for_spatialtransform

        self.data_aug_params["num_cached_per_thread"] = 2
Пример #44
0
 def conv_kernel(days):
     a = 2.0 / (days + 1)
     kernel = np.ones(days, dtype=float)
     kernel[1:] = 1 - a
     return a * np.cumprod(kernel)
Пример #45
0
    return out_df


if __name__ == "__main__":
    period1_start = "2007-04-01"
    period1_end = "2020-06-20"
    target_beta = 1
    test_strategy = Strategy(tickers, period1_start, period1_end)
    test_strategy.find_para_lookback(preset_cov_period, preset_rho_period)

    SPY = pdr.get_data_yahoo('SPY',
                             start=period1_start,
                             end=period1_end,
                             interval='1d').dropna()['Adj Close']
    SPY_return = SPY.pct_change(1).dropna()
    SPY_return_cum = (np.cumprod(np.array(SPY_return) + 1)) - 1

    aa_omega, aaa, for_check = test_strategy.test(30, target_beta)
    cum = (np.cumprod(np.array(aaa) + 1)) - 1
    plt.plot(test_strategy.df.index[1:-2],
             SPY_return_cum[1:-1],
             color="indianred",
             label='Benchmark')
    plt.plot(test_strategy.df.index,
             cum,
             color="darkseagreen",
             label='Strategy Performance')
    plt.legend()
    plt.title("Target beta " + str(target_beta) + ', covariance period ' +
              str(preset_cov_period) + ', pho period ' +
              str(preset_rho_period))
Пример #46
0
from hurst import compute_Hc, random_walk
from AR import ar

# Execute Download
import binance_download

# Read and select Close price (Header: Timestamp, OHLCV, ...)
p = pd.read_json('Binance_BTCUSDT_1m_1483228800000-1580342400000.json')
df = p[[4]]
df['changes'] = df.pct_change()
changevec = df['changes'].dropna() + 1
changevec = changevec.dropna()

random_changes = np.array(changevec)
series = np.cumprod(random_changes)

H_l = []
c_l = []
data_l = []
mse_l = []
series_splits = np.array_split(series, 1000)

for currseries in series_splits:
    H, c, data = compute_Hc(currseries, kind='price', simplified=True)
    H_l.append(H)
    c_l.append(c)
    data_l.append(data)
    mse_l.append(ar(currseries - 1))  # Get back to simple returns around 0

# Evaluate Hurst equation for complete data set
Пример #47
0
def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path,
          ground_truth: bool, save_every: int, backup_every: int,
          force_restart: bool):
    # Check to make sure the hop length is correctly factorised
    assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length

    # Instantiate the model
    print("Initializing the model...")
    model = WaveRNN(rnn_dims=hp.voc_rnn_dims,
                    fc_dims=hp.voc_fc_dims,
                    bits=hp.bits,
                    pad=hp.voc_pad,
                    upsample_factors=hp.voc_upsample_factors,
                    feat_dims=hp.num_mels,
                    compute_dims=hp.voc_compute_dims,
                    res_out_dims=hp.voc_res_out_dims,
                    res_blocks=hp.voc_res_blocks,
                    hop_length=hp.hop_length,
                    sample_rate=hp.sample_rate,
                    mode=hp.voc_mode)

    if torch.cuda.is_available():
        model = model.cuda()
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # Initialize the optimizer
    optimizer = optim.Adam(model.parameters())
    for p in optimizer.param_groups:
        p["lr"] = hp.voc_lr
    loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss

    # Load the weights
    model_dir = models_dir.joinpath(run_id)
    model_dir.mkdir(exist_ok=True)
    weights_fpath = model_dir.joinpath(run_id + ".pt")
    if force_restart or not weights_fpath.exists():
        print("\nStarting the training of WaveRNN from scratch\n")
        model.save(weights_fpath, optimizer)
    else:
        print("\nLoading weights at %s" % weights_fpath)
        model.load(weights_fpath, optimizer)
        print("WaveRNN weights loaded from step %d" % model.step)

    # Initialize the dataset
    metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \
        voc_dir.joinpath("synthesized.txt")
    mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath(
        "mels_gta")
    wav_dir = syn_dir.joinpath("audio")
    dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir)
    test_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=True,
                             pin_memory=True)

    # Begin the training
    simple_table([('Batch size', hp.voc_batch_size), ('LR', hp.voc_lr),
                  ('Sequence Len', hp.voc_seq_len)])

    for epoch in range(1, 350):
        data_loader = DataLoader(
            dataset,
            collate_fn=collate_vocoder,
            batch_size=hp.voc_batch_size,
            num_workers=2 if platform.system() != "Windows" else 0,
            shuffle=True,
            pin_memory=True)
        start = time.time()
        running_loss = 0.

        for i, (x, y, m) in enumerate(data_loader, 1):
            if torch.cuda.is_available():
                x, m, y = x.cuda(), m.cuda(), y.cuda()

            # Forward pass
            y_hat = model(x, m)
            if model.mode == 'RAW':
                y_hat = y_hat.transpose(1, 2).unsqueeze(-1)
            elif model.mode == 'MOL':
                y = y.float()
            y = y.unsqueeze(-1)

            # Backward pass
            loss = loss_func(y_hat, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            speed = i / (time.time() - start)
            avg_loss = running_loss / i

            step = model.get_step()
            k = step // 1000

            if backup_every != 0 and step % backup_every == 0:
                model.checkpoint(model_dir, optimizer)

            if save_every != 0 and step % save_every == 0:
                model.save(weights_fpath, optimizer)

            msg = f"| Epoch: {epoch} ({i}/{len(data_loader)}) | " \
                f"Loss: {avg_loss:.4f} | {speed:.1f} " \
                f"steps/s | Step: {k}k | "
            stream(msg)

        gen_testset(model, test_loader, hp.voc_gen_at_checkpoint,
                    hp.voc_gen_batched, hp.voc_target, hp.voc_overlap,
                    model_dir)
        print("")
Пример #48
0
def create_bayesian_tear_sheet(returns,
                               benchmark_rets=None,
                               live_start_date=None,
                               samples=2000,
                               return_fig=False):
    """
    Generate a number of Bayesian distributions and a Bayesian
    cone plot of returns.

    Plots: Sharpe distribution, annual volatility distribution,
    annual alpha distribution, beta distribution, predicted 1 and 5
    day returns distributions, and a cumulative returns cone plot.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in create_full_tear_sheet.
    benchmark_rets : pd.Series, optional
        Daily noncumulative returns of the benchmark.
         - This is in the same style as returns.
    live_start_date : datetime, optional
        The point in time when the strategy began live
        trading, after its backtest period.
    samples : int, optional
        Number of posterior samples to draw.
    return_fig : boolean, optional
        If True, returns the figure that was plotted on.
    set_context : boolean, optional
        If True, set default plotting style context.
    """

    if live_start_date is None:
        raise NotImplementedError(
            'Bayesian tear sheet requires setting of live_start_date')

    if benchmark_rets is None:
        benchmark_rets = utils.get_symbol_rets('SPY',
                                               start=returns.index[0],
                                               end=returns.index[-1])

    live_start_date = utils.get_utc_timestamp(live_start_date)
    df_train = returns.loc[returns.index < live_start_date]
    df_test = returns.loc[returns.index >= live_start_date]

    # Run T model with missing data
    trace_t = bayesian.run_model('t',
                                 df_train,
                                 returns_test=df_test,
                                 samples=samples)

    # Compute BEST model
    trace_best = bayesian.run_model('best',
                                    df_train,
                                    returns_test=df_test,
                                    samples=samples)

    # Plot results

    fig = plt.figure(figsize=(14, 10 * 2))
    gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)

    axs = []
    row = 0

    # Plot Bayesian cone
    ax_cone = plt.subplot(gs[row, :])
    bayesian.plot_bayes_cone(df_train, df_test, trace=trace_t, ax=ax_cone)

    # Plot BEST results
    row += 1
    axs.append(plt.subplot(gs[row, 0]))
    axs.append(plt.subplot(gs[row, 1]))
    row += 1
    axs.append(plt.subplot(gs[row, 0]))
    axs.append(plt.subplot(gs[row, 1]))
    row += 1
    axs.append(plt.subplot(gs[row, 0]))
    axs.append(plt.subplot(gs[row, 1]))
    row += 1
    # Effect size across two
    axs.append(plt.subplot(gs[row, :]))

    bayesian.plot_best(trace=trace_best, axs=axs)

    # Compute Bayesian predictions
    row += 1
    ax_ret_pred_day = plt.subplot(gs[row, 0])
    ax_ret_pred_week = plt.subplot(gs[row, 1])
    day_pred = trace_t['returns_missing'][:, 0]
    p5 = scipy.stats.scoreatpercentile(day_pred, 5)
    sns.distplot(day_pred, ax=ax_ret_pred_day)
    ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)
    ax_ret_pred_day.set_xlabel('Predicted returns 1 day')
    ax_ret_pred_day.set_ylabel('Frequency')
    ax_ret_pred_day.text(0.4,
                         0.9,
                         'Bayesian VaR = %.2f' % p5,
                         verticalalignment='bottom',
                         horizontalalignment='right',
                         transform=ax_ret_pred_day.transAxes)
    # Plot Bayesian VaRs
    week_pred = (np.cumprod(trace_t['returns_missing'][:, :5] + 1, 1) - 1)[:,
                                                                           -1]
    p5 = scipy.stats.scoreatpercentile(week_pred, 5)
    sns.distplot(week_pred, ax=ax_ret_pred_week)
    ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)
    ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')
    ax_ret_pred_week.set_ylabel('Frequency')
    ax_ret_pred_week.text(0.4,
                          0.9,
                          'Bayesian VaR = %.2f' % p5,
                          verticalalignment='bottom',
                          horizontalalignment='right',
                          transform=ax_ret_pred_week.transAxes)

    # Run alpha beta model
    benchmark_rets = benchmark_rets.loc[df_train.index]
    trace_alpha_beta = bayesian.run_model('alpha_beta',
                                          df_train,
                                          bmark=benchmark_rets,
                                          samples=samples)

    # Plot alpha and beta
    row += 1
    ax_alpha = plt.subplot(gs[row, 0])
    ax_beta = plt.subplot(gs[row, 1])
    sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1, ax=ax_alpha)
    ax_alpha.set_xlabel('Annual Alpha')
    ax_alpha.set_ylabel('Belief')
    sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)
    ax_beta.set_xlabel('Beta')
    ax_beta.set_ylabel('Belief')

    gs.tight_layout(fig)

    plt.show()
    if return_fig:
        return fig
Пример #49
0
    def expected_log_evidence(self):
        """
        Calculate E[log p(N, z|rest).
        """
        uu = self.Nframe['unit']
        nn = self.Nframe['count']
        tt = self.Nframe['time']

        Elogp = 0
        eff_rate = 1

        if self.baseline:
            node = self.nodes['baseline']
            bar_log_lam = node.expected_log_x()
            bar_lam = node.expected_x()

            Elogp += np.sum(nn * bar_log_lam[uu])
            eff_rate *= bar_lam[uu]

        if self.latents:
            node = self.nodes['fr_latents']
            bar_log_lam = node.expected_log_x()
            xi = self.nodes['HMM'].nodes['z'].z[1]

            Elogp += np.sum(nn[:, np.newaxis] * xi[tt] * bar_log_lam[uu])
            eff_rate *= self.F_prod()

            # pieces for A and pi
            Elogp += self.nodes['HMM'].expected_log_state_sequence()

        if self.regressors:
            node = self.nodes['fr_regressors']
            bar_log_lam = node.expected_log_x()
            xx = self.Xframe.values

            Elogp += np.sum(nn[:, np.newaxis] * xx * bar_log_lam[uu])
            eff_rate *= self.G_prod()

        if self.overdispersion:
            node = self.nodes['overdispersion']
            bar_log_lam = node.expected_log_x()
            bar_lam = node.expected_x()

            Elogp += np.sum(nn * bar_log_lam)
            eff_rate *= bar_lam
            # print "Min, Mean, Max of effective rate: {}\t{}\t{} ~~~~~~".format(
            #     np.min(eff_rate), np.mean(eff_rate), np.max(eff_rate))

        if self.overdispersion_natural:
            #print "compute overdispersion natural!"
            node = self.nodes['overdispersion_natural']
            bar_log_phi = node.expected_log_x()
            bar_phi = node.expected_x()
            time_nat = self.time_natural

            # get the dataframe for count values
            cnt_dframe = self.Nframe.sort_values(['unit', 'trial',
                                                  'time'])['count']
            cnt_cumsum = np.cumsum(np.array(cnt_dframe).reshape(
                -1, time_nat)[:, ::-1],
                                   axis=1)[:, ::-1].ravel()

            # index matrix of 2 columns: Column 1 original, Column 2 sorted
            ind_mat = np.c_[np.array(cnt_dframe.index),
                            np.array(xrange(cnt_dframe.index.shape[0]))]
            # unsort cnt_cumsum to be in the original order
            cumsum_unsort = self._unsort_values(ind_mat, cnt_cumsum)

            Elogp += np.sum(cumsum_unsort * bar_log_phi)

            # sort bar_phi to compute cumulative product
            exphi_sorted = self._sort_values(ind_mat, bar_phi)
            # compute the cumultive product
            prod_array = np.cumprod(exphi_sorted.reshape(-1, time_nat),
                                    axis=1).ravel()
            # print "Min, Mean, Max of product array:  {}\t{}\t{}".format(
            #     np.min(prod_array), np.mean(prod_array), np.max(prod_array))

            # sort prod_array to be in the original order
            prod_unsort = self._unsort_values(ind_mat, prod_array)

            eff_rate *= prod_unsort
            # print "Min, Mean, Max of effective rate: {}\t{}\t{} ~~~~~~".format(
            #     np.min(eff_rate), np.mean(eff_rate), np.max(eff_rate))

        Elogp += -np.sum(eff_rate)

        return Elogp
Пример #50
0
def extrema(input, labels=None, index=None):
    """
    Calculate the minimums and maximums of the values of an array
    at labels, along with their positions.

    Parameters
    ----------
    input : ndarray
        Nd-image data to process.
    labels : ndarray, optional
        Labels of features in input.
        If not None, must be same shape as `input`.
    index : int or sequence of ints, optional
        Labels to include in output.  If None (default), all values where
        non-zero `labels` are used.

    Returns
    -------
    minimums, maximums : int or ndarray
        Values of minimums and maximums in each feature.
    min_positions, max_positions : tuple or list of tuples
        Each tuple gives the n-D coordinates of the corresponding minimum
        or maximum.

    See Also
    --------
    maximum, minimum, maximum_position, minimum_position, center_of_mass

    Examples
    --------
    >>> a = np.array([[1, 2, 0, 0],
                      [5, 3, 0, 4],
                      [0, 0, 0, 7],
                      [9, 3, 0, 0]])
    >>> from scipy import ndimage
    >>> ndimage.extrema(a)
    (0, 9, (0, 2), (3, 0))

    Features to process can be specified using `labels` and `index`:

    >>> lbl, nlbl = ndimage.label(a)
    >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
    (array([1, 4, 3]),
     array([5, 7, 9]),
     [(0, 0), (1, 3), (3, 1)],
     [(1, 0), (2, 3), (3, 0)])

    If no index is given, non-zero `labels` are processed:

    >>> ndimage.extrema(a, lbl)
    (1, 9, (0, 0), (3, 0))

    """
    dims = numpy.array(numpy.asarray(input).shape)
    # see numpy.unravel_index to understand this line.
    dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]

    minimums, min_positions, maximums, max_positions = _select(
        input,
        labels,
        index,
        find_min=True,
        find_max=True,
        find_min_positions=True,
        find_max_positions=True)

    if numpy.isscalar(minimums):
        return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
                tuple((max_positions // dim_prod) % dims))

    min_positions = [
        tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
    ]
    max_positions = [
        tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
    ]

    return minimums, maximums, min_positions, max_positions
def array_cumprod_global(arr):
    return np.cumprod(arr)
Пример #52
0
        # Exact formula should be pi + tao* np.dot(current_cov.I,Q - pi)
        posterior_sigma = current_cov + ((tao * current_cov).I + omega.I).I

        optimal_weight = posterior_sigma.I * posterior_mean / risk_aversion
        optimal_weight = optimal_weight / abs(optimal_weight).sum()

        weight.iloc[i, :] = optimal_weight.getA()[:, 0].T

        total_ret[class_type][i] = np.dot(ret.iloc[i, :],
                                          optimal_weight).getA()[0][0]

    # If view = 0
    capital_ret[i] = np.dot(ret.iloc[i, :], capital_weight).getA()[0][0]

total_ret.dropna(inplace=True)
net_value = np.cumprod(1 + total_ret)
net_value.plot()

## Backtest and benchmark
spy_ret = price['SPYClose'].pct_change().dropna()
market_ret = pd.Series(capital_ret, index=ret.index, name='market')
df = pd.concat([total_ret, market_ret, spy_ret], axis=1, join='inner')

## Get rid of all rows that ret = 0
#df = all_ret[~all_ret['BL'].isin([0])]
net_value = np.cumprod(1 + df)
net_value.iloc[:, 0:4].plot()
#
backtest = Backtest(df, 'SPYClose', 0.05, 52)
#print(backtest.summary())
#evaluation = pd.DataFrame(final_result/t,index=['sign_predict','range_predict','total_predict'],\
Пример #53
0
    def diffuse_reflection_matrix(self,
                                  frequency,
                                  eps_1,
                                  eps_2,
                                  mu_s,
                                  mu_i,
                                  dphi,
                                  npol,
                                  debug=False):
        """compute the reflection coefficients for an array of incident, scattered and azimuth angles
           in medium 1. Medium 2 is where the beam is transmitted.

        :param eps_1: permittivity of the medium where the incident beam is propagating.
        :param eps_2: permittivity of the other medium
        :param mu1: array of cosine of incident angles
        :param npol: number of polarization

        :return: the reflection matrix
"""
        mu_s = np.atleast_1d(mu_s)
        mu_i = np.atleast_1d(mu_i)

        if not np.allclose(mu_s, mu_i) or not np.allclose(dphi, np.pi):
            raise NotImplementedError(
                "Only the backscattering coefficient is implemented at this stage."
                "This is a very preliminary implementation")

        if len(np.atleast_1d(dphi)) != 1:
            raise NotImplementedError(
                "Only the backscattering coefficient is implemented at this stage. "
            )

        mu = mu_i[None, :]
        k = vector3.from_angles(
            2 * np.pi * frequency / C_SPEED * np.sqrt(eps_1).real, mu, 0)
        eps_r = eps_2 / eps_1

        ks = np.abs(k.norm * self.roughness_rms)
        kl = np.abs(k.norm * self.corr_length)

        try:
            self.check_validity(ks, kl, eps_r)
        except SMRTError as e:
            if self.warning_handling == "print":
                print(e)
            elif self.warning_handling == "nan":
                return smrt_matrix.full((npol, len(mu_i)), np.nan)

        Rv, Rh = self.fresnel_coefficients(eps_1, eps_2, mu_i, ks, kl)

        fvv = 2 * Rv / mu  # Eq 44 in Fung et al. 1992
        fhh = -2 * Rh / mu  # Eq 45 in Fung et al. 1992

        # prepare the series
        N = self.series_truncation
        n = np.arange(1, N + 1, dtype=np.float64)[:, None]

        rms2 = self.roughness_rms**2

        # Kirchoff term
        Iscalar_n = (2 * k.z)**n * np.exp(-rms2 * k.z**2)
        Ivv_n = Iscalar_n * fvv  # Eq 82 in Fung et al. 1992
        Ihh_n = Iscalar_n * fhh

        # Complementary term
        mu2 = mu**2
        sin2 = 1 - mu2
        tan2 = sin2 / mu2
        # part of Eq 91. We don't use all the simplification because we want validity for n>1, especially not np.exp(-rms2 * k.z**2)=1
        Ivv_n += k.z**n * (sin2 / mu * (1 + Rv)**2 * (1 - 1 / eps_r) *
                           (1 + tan2 / eps_r))
        Ihh_n += -k.z**n * (sin2 / mu * (1 + Rh)**2 *
                            (eps_r - 1) / mu2)  # part of Eq 95.

        # compute the series
        rms2_over_fractorial = np.cumprod(rms2 / n)[:, None]

        # Eq 82 in Fung et al. 1992
        coef = k.norm2 / 2 * np.exp(-2 * rms2 * k.z**2)
        coef_n = rms2_over_fractorial * self.W_n(n, -2 * k.x)

        sigma_vv = coef * np.sum(coef_n * abs2(Ivv_n), axis=0)
        sigma_hh = coef * np.sum(coef_n * abs2(Ihh_n), axis=0)

        # if debug:
        #    self.sigma_vv_1 = ( 8*k.norm2**2*rms2*abs2(Rv*mu2 + (1-mu2)*(1+Rv)**2 / 2 * (1 - 1 / eps_r)) * self.W_n(1, -2 * k.x) ).flat
        #    self.sigma_hh_1 = ( 8*k.norm2**2*rms2*abs2(Rh*mu2) * self.W_n(1, -2 * k.x) ).flat

        reflection_coefficients = smrt_matrix.zeros((npol, len(mu_i)))
        reflection_coefficients[0] = sigma_vv / (4 * np.pi * mu_i)
        reflection_coefficients[1] = sigma_hh / (4 * np.pi * mu_i)

        return reflection_coefficients
Пример #54
0
def spherical_transform(samples):
    """Map samples from the ``[0, 1]``--cube onto the hypersphere.

    Applies the `inverse transform method` to the distribution
    :class:`.SphericalCoords` to map uniform samples from the ``[0, 1]``--cube
    onto the surface of the hypersphere. [#]_

    Parameters
    ----------
    samples : ``(n, d) array_like``
        ``n`` uniform samples from the d-dimensional ``[0, 1]``--cube.

    Returns
    -------
    mapped_samples : ``(n, d+1) np.array``
        ``n`` uniform samples from the ``d``--dimensional sphere
        (Euclidean dimension of ``d+1``).

    See Also
    --------
    :class:`.Rd`
    :class:`.Sobol`
    :class:`.ScatteredHypersphere`
    :class:`.SphericalCoords`

    References
    ----------
    .. [#] K.-T. Fang and Y. Wang, Number-Theoretic Methods in Statistics.
       Chapman & Hall, 1994.

    Examples
    --------
    >>> from nengolib.stats import spherical_transform

    In the simplest case, we can map a one-dimensional uniform distribution
    onto a circle:

    >>> line = np.linspace(0, 1, 20)
    >>> mapped = spherical_transform(line)

    >>> import matplotlib.pyplot as plt
    >>> plt.figure(figsize=(6, 3))
    >>> plt.subplot(121)
    >>> plt.title("Original")
    >>> plt.scatter(line, np.zeros_like(line), s=30)
    >>> plt.subplot(122)
    >>> plt.title("Mapped")
    >>> plt.scatter(*mapped.T, s=25)
    >>> plt.show()

    This technique also generalizes to less trivial situations, for instance
    mapping a square onto a sphere:

    >>> square = np.asarray([[x, y] for x in np.linspace(0, 1, 50)
    >>>                             for y in np.linspace(0, 1, 10)])
    >>> mapped = spherical_transform(square)

    >>> from mpl_toolkits.mplot3d import Axes3D
    >>> plt.figure(figsize=(6, 3))
    >>> plt.subplot(121)
    >>> plt.title("Original")
    >>> plt.scatter(*square.T, s=15)
    >>> ax = plt.subplot(122, projection='3d')
    >>> ax.set_title("Mapped").set_y(1.)
    >>> ax.patch.set_facecolor('white')
    >>> ax.set_xlim3d(-1, 1)
    >>> ax.set_ylim3d(-1, 1)
    >>> ax.set_zlim3d(-1, 1)
    >>> ax.scatter(*mapped.T, s=15)
    >>> plt.show()
    """

    samples = np.asarray(samples)
    samples = samples[:, None] if samples.ndim == 1 else samples
    coords = np.empty_like(samples)
    n, d = coords.shape

    # inverse transform method (section 1.5.2)
    for j in range(d):
        coords[:, j] = SphericalCoords(d - j).ppf(samples[:, j])

    # spherical coordinate transform
    mapped = np.ones((n, d + 1))
    i = np.ones(d)
    i[-1] = 2.0
    s = np.sin(i[None, :] * np.pi * coords)
    c = np.cos(i[None, :] * np.pi * coords)
    mapped[:, 1:] = np.cumprod(s, axis=1)
    mapped[:, :-1] *= c
    return mapped
Пример #55
0
    #Do calculations to generate the output
    cases = N.zeros(3, dtype=float)

    #press enter right away
    cases[0] = B + 2  #new password + enter twice

    #keep typing
    prob_correct = probs.prod()
    cases[1] = prob_correct * (B - A + 1) + (1 - prob_correct) * (B - A + 1 +
                                                                  B + 1)

    #back up
    keycounts_correct = N.arange(1, A + 1) * 2 + 1 + (B - A)
    keycounts_incorrect = N.arange(1, A + 1) * 2 + 1 + B + 1 + (B - A)

    cum_correct = N.hstack((N.cumprod(probs)[-2::-1], N.array(1.)))

    exp_keycounts = keycounts_correct * cum_correct + keycounts_incorrect * (
        1 - cum_correct)
    cases[2] = exp_keycounts.min()

    print cases

    output = '%.6f' % cases.min()

    ##################NEW CODE GOES HERE###########################################
    #Write out the results for this case
    outfile.write('Case #%i: %s\n' % (i + 1, output))

#Close files
infile.close()
Пример #56
0
from Portefeuille import Portfeuille
from PM_strategy import CRP, BHP
import seaborn as sns
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt

SYMBOLS = [
    'ETHBTC', 'XRPBTC', 'EOSBTC', 'LTCBTC', 'ZECBTC', 'ETCBTC', 'XMRBTC'
]
START = datetime.datetime(2017, 8, 1)
END = datetime.datetime(2020, 4, 1)
Port = Portfeuille(SYMBOLS, START, END)
y = CRP(Port.df_normalized, START, END)
CRP_cum_return = np.cumprod(y) - 1
y2 = BHP(Port.df_normalized, START, END)
HBP_cum_return = np.cumprod(y2) - 1

x = pd.date_range(start=START, end=END, freq='30min')
plt.figure(figsize=(12, 6))
sns.lineplot(x=x, y=CRP_cum_return, color='red')
sns.lineplot(x=x, y=HBP_cum_return, color='blue')
plt.legend(('UCRP', 'UHBP'), loc='upper right')
plt.title(
    'Rendements cumulés pour différentes stratégies de gestion de portefeuille'
)
plt.show()
Пример #57
0
def stencil_grid(S, grid, dtype=None, format=None):
    """Construct a sparse matrix form a local matrix stencil 
    
    Parameters
    ----------
    S : ndarray
        matrix stencil stored in rank N array
    grid : tuple
        tuple containing the N grid dimensions
    dtype :
        data type of the result
    format : string
        sparse matrix format to return, e.g. "csr", "coo", etc.

    Returns
    -------
    A : sparse matrix
        Sparse matrix which represents the operator given by applying
        stencil S at each vertex of a regular grid with given dimensions.

    Notes
    -----
    The grid vertices are enumerated as arange(prod(grid)).reshape(grid).
    This implies that the last grid dimension cycles fastest, while the 
    first dimension cycles slowest.  For example, if grid=(2,3) then the
    grid vertices are ordered as (0,0), (0,1), (0,2), (1,0), (1,1), (1,2).

    This coincides with the ordering used by the NumPy functions 
    ndenumerate() and mgrid().

    Examples
    --------
    >>> stencil = [-1,2,-1]  # 1D Poisson stencil
    >>> grid = (5,)          # 1D grid with 5 vertices
    >>> A = stencil_grid(stencil, grid, dtype=float, format='csr')   
    >>> A.todense()
    matrix([[ 2., -1.,  0.,  0.,  0.],
            [-1.,  2., -1.,  0.,  0.],
            [ 0., -1.,  2., -1.,  0.],
            [ 0.,  0., -1.,  2., -1.],
            [ 0.,  0.,  0., -1.,  2.]])
    
    >>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
    >>> grid = (3,3)                            # 2D grid with shape 3x3
    >>> A = stencil_grid(stencil, grid, dtype=float, format='csr')   
    >>> A.todense()
    matrix([[ 4., -1.,  0., -1.,  0.,  0.,  0.,  0.,  0.],
            [-1.,  4., -1.,  0., -1.,  0.,  0.,  0.,  0.],
            [ 0., -1.,  4.,  0.,  0., -1.,  0.,  0.,  0.],
            [-1.,  0.,  0.,  4., -1.,  0., -1.,  0.,  0.],
            [ 0., -1.,  0., -1.,  4., -1.,  0., -1.,  0.],
            [ 0.,  0., -1.,  0., -1.,  4.,  0.,  0., -1.],
            [ 0.,  0.,  0., -1.,  0.,  0.,  4., -1.,  0.],
            [ 0.,  0.,  0.,  0., -1.,  0., -1.,  4., -1.],
            [ 0.,  0.,  0.,  0.,  0., -1.,  0., -1.,  4.]])
    
    """

    S = numpy.asarray(S, dtype=dtype)
    grid = tuple(grid)

    if not (numpy.asarray(S.shape) % 2 == 1).all():
        raise ValueError('all stencil dimensions must be odd')

    if len(grid) != numpy.rank(S):
        raise ValueError('stencil rank must equal number of grid dimensions')

    if min(grid) < 1:
        raise ValueError('grid dimensions must be positive')

    N_v = numpy.prod(grid)  # number of vertices in the mesh
    N_s = (S != 0).sum()  # number of nonzero stencil entries

    # diagonal offsets
    diags = numpy.zeros(N_s, dtype=int)

    # compute index offset of each dof within the stencil
    strides = numpy.cumprod([1] + list(reversed(grid)))[:-1]
    indices = tuple(i.copy() for i in S.nonzero())
    for i, s in zip(indices, S.shape):
        i -= s // 2
    for stride, coords in zip(strides, reversed(indices)):
        diags += stride * coords

    data = S[S != 0].repeat(N_v).reshape(N_s, N_v)

    indices = numpy.vstack(indices).T

    # zero boundary connections
    for index, diag in zip(indices, data):
        diag = diag.reshape(grid)
        for n, i in enumerate(index):
            if i > 0:
                s = [slice(None)] * len(grid)
                s[n] = slice(0, i)
                diag[s] = 0
            elif i < 0:
                s = [slice(None)] * len(grid)
                s[n] = slice(i, None)
                diag[s] = 0

    # remove diagonals that lie outside matrix
    mask = abs(diags) < N_v
    if not mask.all():
        diags = diags[mask]
        data = data[mask]

    # sum duplicate diagonals
    if len(numpy.unique(diags)) != len(diags):
        new_diags = numpy.unique(diags)
        new_data = numpy.zeros((len(new_diags), data.shape[1]),
                               dtype=data.dtype)

        for dia, dat in zip(diags, data):
            n = numpy.searchsorted(new_diags, dia)
            new_data[n, :] += dat

        diags = new_diags
        data = new_data

    return scipy.sparse.dia_matrix((data, diags),
                                   shape=(N_v, N_v)).asformat(format)
Пример #58
0
import time

from bokeh.sampledata.stocks import AAPL, FB, GOOG, IBM, MSFT
from bokeh.plotting import *
from bokeh.objects import GridPlot

output_file("correlation.html", title="correlation.py example")

hold()

num_points = 300

now = time.time()
dt = 24 * 3600  # days
dates = linspace(now, now + num_points * dt, num_points)
acme = cumprod(random.lognormal(0.0, 0.04, size=num_points))
choam = cumprod(random.lognormal(0.0, 0.04, size=num_points))

scatter(
    acme,
    choam,
    color='#A6CEE3',
    radius=3,
    tools="pan,zoom,resize",
    legend='close',
)

curplot().title = "ACME / CHOAM Correlations"
xgrid()[0].grid_line_dash = ""
xgrid()[0].grid_line_alpha = 0.3
ygrid()[0].grid_line_dash = ""
  def reshape(
      self, shape: Union[np.ndarray, List[Index], Tuple[Index, ...], List[int],
                         Tuple[int, ...]]
  ) -> "ChargeArray":
    """
    Reshape `tensor` into `shape.
    `ChargeArray.reshape` works the same as the dense 
    version, with the notable exception that the tensor can only be 
    reshaped into a form compatible with its elementary shape. 
    The elementary shape is the shape determined by ChargeArray._charges.
    For example, while the following reshaping is possible for regular 
    dense numpy tensor,
    ```
    A = np.random.rand(6,6,6)
    np.reshape(A, (2,3,6,6))
    ```
    the same code for ChargeArray
    ```
    q1 = U1Charge(np.random.randint(0,10,6))
    q2 = U1Charge(np.random.randint(0,10,6))
    q3 = U1Charge(np.random.randint(0,10,6))
    i1 = Index(charges=q1,flow=False)
    i2 = Index(charges=q2,flow=True)
    i3 = Index(charges=q3,flow=False)
    A=ChargeArray.randn(indices=[i1,i2,i3])
    print(A.shape) #prints (6,6,6)
    A.reshape((2,3,6,6)) #raises ValueError
    ```
    raises a `ValueError` since (2,3,6,6)
    is incompatible with the elementary shape (6,6,6) of the tensor.
    
    Args:
      tensor: A symmetric tensor.
      shape: The new shape. Can either be a list of `Index` 
        or a list of `int`.
    Returns:
      ChargeArray: A new tensor reshaped into `shape`
    """
    new_shape = []
    for s in shape:
      if isinstance(s, Index):
        new_shape.append(s.dim)
      else:
        new_shape.append(s)

    if np.array_equal(new_shape, self.shape):
      result = self.__new__(type(self))
      result.__init__(
          data=self.data,
          charges=self._charges,
          flows=self._flows,
          order=self._order,
          check_consistency=False)
      return result

    # a few simple checks
    if np.prod(new_shape) != np.prod(self.shape):
      raise ValueError("A tensor with {} elements cannot be "
                       "reshaped into a tensor with {} elements".format(
                           np.prod(self.shape), np.prod(new_shape)))
    flat_dims = np.asarray(
        [self._charges[n].dim for o in self._order for n in o])

    if len(new_shape) > len(self._charges):
      raise ValueError("The shape {} is incompatible with the "
                       "elementary shape {} of the tensor.".format(
                           tuple(new_shape), tuple(flat_dims)))

    if np.any(new_shape == 0) or np.any(flat_dims == 0):
      raise ValueError("reshaping empty arrays is ambiguous, and is currently "
                       "not supported.")

    partitions = [0]
    for n, ns in enumerate(new_shape):
      tmp = np.nonzero(np.cumprod(flat_dims) == ns)[0]
      if len(tmp) == 0:
        raise ValueError(
            "The shape {} is incompatible with the "
            "elementary shape {} of the tensor.".format(
                tuple(new_shape),
                tuple([self._charges[n].dim for o in self._order for n in o])))

      partitions.append(tmp[0] + 1)
      flat_dims = flat_dims[partitions[-1]:]
    for d in flat_dims:
      if d != 1:
        raise ValueError(
            "The shape {} is incompatible with the "
            "elementary shape {} of the tensor.".format(
                tuple(new_shape),
                tuple([self._charges[n].dim for o in self._order for n in o])))
      partitions[-1] += 1

    partitions = np.cumsum(partitions)

    flat_order = self.flat_order
    new_order = []
    for n in range(1, len(partitions)):
      new_order.append(list(flat_order[partitions[n - 1]:partitions[n]]))
    result = self.__new__(type(self))
    result.__init__(
        data=self.data,
        charges=self._charges,
        flows=self._flows,
        order=new_order,
        check_consistency=False)
    return result
Пример #60
0
stoploss = 0.5
maxcheckdate = 20
maxdecrease = 0.09
pre_spclose = np.zeros(zhangkai.CLOSE.shape)
spclose = pre_spclose.copy()
pre_spbenchclose = spclose.copy()
spbenchclose = spclose.copy()
finalclose = spclose.copy()
for i in range(len(zhangkai.CLOSE[:, 0])):
    loc = np.where(zhangkai.CLOSE[i, :] == 0)
    benchclose = zhangkai.BENCHMARK['close']
    rbenchclose = benchclose.values.T
    if len(loc[0]) == 0:  #已经上市
        pre_spclose[i, 1:] = np.diff(
            zhangkai.CLOSE[i, 0:]) / zhangkai.CLOSE[i, 0:-1]
        spclose[i, 0:] = np.cumprod(1 + pre_spclose[i, 0:])
        pre_spbenchclose[i, 1:] = np.diff(rbenchclose[0:]) / rbenchclose[0:-1]
        spbenchclose[i, 0:] = np.cumprod(1 + pre_spbenchclose[i, 0:])
        finalclose[i, 0:] = spclose[i, 0:] - spbenchclose[i, 0:]
    else:
        b = loc[0][-1]
        pre_spclose[i, b + 2:] = np.diff(
            zhangkai.CLOSE[i, b + 1:]) / zhangkai.CLOSE[i, b + 1:-1]
        spclose[i, b + 1:] = np.cumprod(1 + pre_spclose[i, b + 1:])
        pre_spbenchclose[i, b + 2:] = np.diff(
            rbenchclose[b + 1:]) / rbenchclose[b + 1:-1]
        spbenchclose[i, b + 1:] = np.cumprod(1 + pre_spbenchclose[i, b + 1:])
        finalclose[i, b + 1:] = spclose[i, b + 1:] - spbenchclose[i, b + 1:]
cishu = 0
for date in range(zhangkai.observe, len(zhangkai.tradedate) - 1):
    cishu = cishu + 1