Example #1
0
    def update_price(self, date, sid):
        try:
            p = pd.Series(self.collection.find_one({'sid': sid, 'date': date, 'dname': 'price'})['dvalue'])
        except:
            self.logger.warning('No price found for {} on {}', sid, date)
            return
        p.index = [datetime.strptime(date, '%Y%m%d')+timedelta(milliseconds=int(s)) for s in p.index]

        df5 = p.resample('5min', 'ohlc', label='right', closed='right')
        df5.columns = ['into', 'inth', 'intl', 'intc']
        df5['lstl'] = pd.expanding_min(p).resample('5min', 'last', label='right', closed='right')
        df5['lsth'] = pd.expanding_max(p).resample('5min', 'last', label='right', closed='right')
        df5['vlty'] = p.resample('5min', lambda x: x.std(), label='right', closed='right')
        df5['lstp'] = p.resample('5min', 'last', label='right', closed='right')
        df5['tvwp'] = p.resample('5min', 'mean', label='right', closed='right')
        df5.index = [dt.strftime('%H%M%S') for dt in df5.index]
        df5 = df5.ix[times_5min]
        for dname, ser in df5.iteritems():
            self.db.IF_5min.update({'sid': sid, 'date': date, 'dname': dname}, {'$set': {'dvalue': ser.to_dict()}}, upsert=True)

        df1 = p.resample('1min', 'ohlc', label='right', closed='right')
        df1.columns = ['into', 'inth', 'intl', 'intc']
        df1['lstl'] = pd.expanding_min(p).resample('1min', 'last', label='right', closed='right')
        df1['lstl'] = pd.expanding_max(p).resample('1min', 'last', label='right', closed='right')
        df1['vlty'] = p.resample('1min', lambda x: x.std(), label='right', closed='right')
        df1['lstp'] = p.resample('1min', 'last', label='right', closed='right')
        df1.index = [dt.strftime('%H%M%S') for dt in df1.index]
        df1 = df1.ix[times_1min]
        for dname, ser in df1.iteritems():
            self.db.IF_1min.update({'sid': sid, 'date': date, 'dname': dname}, {'$set': {'dvalue': ser.to_dict()}}, upsert=True)
Example #2
0
def computeRunningMinMaxSignals(df, ema_feats_df, spike_5s, norm):
    max_to_here = pd.expanding_max(df['microprice_ema_200ms_ticks'])
    min_to_here = pd.expanding_min(df['microprice_ema_200ms_ticks'])

    if spike_5s >= 0:
        max_disl = max_to_here
    else:
        max_disl = min_to_here

    ema_feats_df['from_max_disl_ema_200ms_ticks'] = computeRelativeDislocation(df['microprice_ema_200ms_ticks'], max_disl, max_disl)
    ema_feats_df['from_max_disl_ema_2000ms_ticks'] = computeRelativeDislocation(df['microprice_ema_2000ms_ticks'], max_disl, max_disl)
    ema_feats_df['from_max_disl_ema_10000ms_ticks'] = computeRelativeDislocation(df['microprice_ema_10000ms_ticks'], max_disl, max_disl)

    rev_from_max_disl_to_here = df['microprice_ema_200ms_ticks'] - max_disl
    max_rev_to_here = pd.expanding_max(rev_from_max_disl_to_here)
    min_rev_to_here = pd.expanding_min(rev_from_max_disl_to_here)

    if spike_5s >= 0:
        max_rev = max_disl + min_rev_to_here
    else:
        max_rev = max_disl + max_rev_to_here

    ema_feats_df['from_max_rev_ema_200ms_ticks'] = computeRelativeDislocation(df['microprice_ema_200ms_ticks'], max_rev, max_rev)
    ema_feats_df['from_max_rev_ema_2000ms_ticks'] = computeRelativeDislocation(df['microprice_ema_2000ms_ticks'], max_rev, max_rev)
    ema_feats_df['from_max_rev_ema_10000ms_ticks'] = computeRelativeDislocation(df['microprice_ema_10000ms_ticks'], max_rev, max_rev)
        

    return (ema_feats_df, df)
Example #3
0
def plot_result(result):
    '''
    Plot result from backtesting: sp500 accumulative return and pair trading return, sharp ratio, maximum drawdown
    '''
    cash_value_list = np.array(result[0])
    sp500_index = np.array(result[1])
    sp500_ret = sp500_index / sp500_index[0] - 1.
    rf_rate = result[2]
    ret = cash_value_list / cash_value_list[0] - 1.

    #date_list = date_in_data[3000:4100]

    sharp_ratio = (np.mean(ret) - rf_rate) / sqrt(np.var(ret))

    max_here = pd.expanding_max(ret)
    dd2here = ret - max_here
    max_drawdown = dd2here.min()

    plt.figure(figsize=(16, 5), dpi=120)
    plt.plot(sp500_ret, color="blue", label="sp500")
    plt.plot(ret, color="red", label="pair trading")
    plt.annotate('Sharp Ratio ' + str(round(sharp_ratio, 2)) +
                 ', Max Drawdown ' + str(round(max_drawdown, 2)),
                 xy=(0.01, 0.8),
                 xycoords='axes fraction')
    plt.legend()
Example #4
0
def indicator_KDJ(stock_data):  #KDJ指标计算函数
    # 计算KDJ指标
    low_list = pd.rolling_min(stock_data['low'], 9)  #9天为一个周期,但前8个值为NaN
    low_list.fillna(value=pd.expanding_min(stock_data['low']),
                    inplace=True)  #将NaN用累积窗口计算的最小值代替
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['KDJ_K'] = pd.ewma(rsv, com=2, adjust=False)
    stock_data['KDJ_D'] = pd.ewma(stock_data['KDJ_K'], com=2, adjust=False)
    stock_data['KDJ_J'] = 3 * stock_data['KDJ_K'] - 2 * stock_data['KDJ_D']
    # 计算KDJ指标金叉、死叉情况
    stock_data['KDJ_金叉死叉'] = ''
    kdj_position = stock_data['KDJ_K'] > stock_data['KDJ_D']
    stock_data.loc[kdj_position[(kdj_position == True)
                                & (kdj_position.shift() == False)].index,
                   'KDJ_金叉死叉'] = '金叉'  #前一天K<D,当天K>D
    stock_data.loc[kdj_position[(kdj_position == False)
                                & (kdj_position.shift() == True)].index,
                   'KDJ_金叉死叉'] = '死叉'
    # 通过复权价格计算接下来几个交易日的收益率
    for n in [1, 2, 3, 5, 10, 20]:
        stock_data['接下来' + str(n) + '个交易日涨跌幅'] = stock_data['close'].shift(
            -1 * n) / stock_data['close'] - 1.0
    stock_data.dropna(how='any', inplace=True)  # 删除所有有空值的数据行
    # 筛选出KDJ金叉的数据,并将这些数据合并到all_stock中
    stock_data = stock_data[(stock_data['KDJ_金叉死叉'] == '金叉')]
    if not stock_data.empty:
        return stock_data
Example #5
0
def max_drawdown(date_line, capital_line):
    """
    :param date_line: 日期序列
    :param capital_line: 账户价值序列
    :return: 输出最大回撤及开始日期和结束日期
    """
    # 将数据序列合并为一个dataframe并按日期排序
    df = pd.DataFrame({'date': date_line, 'capital': capital_line})
    df.sort_values(by='date', inplace=True)
    df.reset_index(drop=True, inplace=True)

    # 直接在df中创建两个新的series
    df['max2here'] = pd.expanding_max(df['capital'])  # 计算当日之前的账户最大价值
    df['dd2here'] = df['capital'] / df['max2here'] - 1  # 计算当日的回撤
    # 这之后的df中含有4个columns,分别是date,capital,max2here,dd2here

    # 计算最大回撤和结束时间
    temp = df.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    '''
    这里按照升序排列,也就是第一个是最小的,因为考虑的是回撤,所以亏损最多的是第一个
    这里的iloc[0]是先讲0行取出,然后对取出来的数据取date和dd2Zhere,当然这里更好的方式就是直接用ix
    '''

    max_dd = temp['dd2here']
    # 最大回撤
    end_date = temp['date']
    # 最大回撤时对应的日期

    # 计算开始时间
    df = df[df['date'] <= end_date]  # 布尔型索引
    start_date = df.sort_values(by='capital', ascending=False).iloc[0]['date']
    # 得到的数据应该是最大回撤日前前账户资本最大的日期,即为start_date
    print('最大回撤为:%f, 开始日期:%s, 结束日期:%s' % (max_dd, start_date, end_date))
Example #6
0
def drawdown(ser):
    """``ser`` is a daily returns Series instead of a cumulative returns Series."""
    ser = ser.cumsum()
    end = (pd.expanding_max(ser)-ser).argmax()
    start = ser.ix[:end].argmax()

    return start, end, (ser[start]-ser[end]) * 100
Example #7
0
def dd_stats(returns, N=252):
 #initialize
 eSeries = (returns +1).cumprod()
 eHighSeries = pd.expanding_max(eSeries)
 ddSeriesPer = (eSeries - eHighSeries)/eHighSeries
 maxDD = ddSeriesPer.min()
 ddDays = []
 DDs = []
 count = 0
 ddStart= 0

 # calc dd days
 for i,k in enumerate(eHighSeries):
  if i > 0:
   if eHighSeries[i-1]==eHighSeries[i]: # in DD
    if count ==0: #DD Started
     ddStart = i-1
    count +=1
    if i == len(eHighSeries)-1: #still in drawdown at the end of series
     ddDays.append(count)
     DDs.append(min(ddSeriesPer[ddStart:i]))
   else: #new hwm, DDEnded
    if count != 0:
     ddDays.append(count)
     DDs.append(min(ddSeriesPer[ddStart:i]))
     #ddStart =0
    count =0


 # tick[tick.index == datetime.datetime(2008,11,24)]
 # ddStartDate = datetime.datetime.utcfromtimestamp(returns[i-1:i].index.astype(int)*1e-9)
 return np.sqrt(N) * returns.mean()/abs(maxDD), maxDD, max(ddDays), np.mean(ddDays), len(DDs), np.mean(DDs), eSeries, ddSeriesPer, np.log(eSeries[len(eSeries)-1])-np.log(eSeries[1])
Example #8
0
def max_drawdown(date_line, close_line):
    """
    date_line: 日期序列
    capital_line: 账户价值序列
    输出最大回撤及开始日期和结束日期
    """
    # 将数据序列合并为一个dataframe并按日期排序
    df = pd.DataFrame({'date': date_line, 'close': close_line})
    df.sort_values(by='date', inplace=True)
    df.reset_index(drop=True, inplace=True)

    # 计算当日之前的账户最大价值
    df['max2here'] = pd.expanding_max(df['close'])
    # 计算当日的回撤
    df['dd2here'] = df['close'] / df['max2here'] - 1

    # 计算最大回撤和结束时间
    temp = df.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']
    end_date = temp['date']

    # 计算开始时间
    df = df[df['date'] <= end_date]
    text2 = '最大回撤为:{}'.format(max_dd)
    print('最大回撤为:{}'.format(max_dd))
    return text2
Example #9
0
    def calculate_ret_stats(self, returns_df, ann_factor):
        """
        calculate_ret_stats - Calculates return statistics for an asset's returns including IR, vol, ret and drawdowns

        Parameters
        ----------
        returns_df : DataFrame
            asset returns
        ann_factor : int
            annualisation factor to use on return statistics

        Returns
        -------
        DataFrame
        """
        tsc = TimeSeriesCalcs()

        self._rets = returns_df.mean(axis=0) * ann_factor
        self._vol = returns_df.std(axis=0) * math.sqrt(ann_factor)
        self._inforatio = self._rets / self._vol
        self._kurtosis = returns_df.kurtosis(axis=0)

        index_df = tsc.create_mult_index(returns_df)
        max2here = pandas.expanding_max(index_df)
        dd2here = index_df / max2here - 1

        self._dd = dd2here.min()
Example #10
0
def get_kdj(code):
    stock_data = ts.get_k_data(code)
    # kdj
    low_list = pd.rolling_min(stock_data['low'], 9)
    low_list.fillna(value=pd.expanding_min(stock_data['low']), inplace=True)
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['kdj_k'] = pd.ewma(rsv, com=2)
    stock_data['kdj_d'] = pd.ewma(stock_data['kdj_k'], com=2)
    stock_data['kdj_j'] = 3 * stock_data['kdj_k'] - 2 * stock_data['kdj_d']
    # 用今天的j值和昨天比较
    kdj_j = stock_data['kdj_j']
    yesterdayJ = kdj_j[kdj_j.size - 2]
    todayJ = kdj_j[kdj_j.size - 1]
    kdj_k = stock_data['kdj_k']
    todayK = kdj_k[kdj_k.size - 1]
    # 如果今天的j值大于昨天的j值才继续后面的逻辑
    if (todayJ > yesterdayJ and todayK < float(20)):
        # 计算价格5日百分比
        stock_data = stock_data[stock_data.date > str(dc.get_the_day_before_today(1))]
        stock_data['kdj_ok'] = 1
    else:
        stock_data = stock_data[stock_data.date > str(dc.get_the_day_before_today(1))]
        stock_data['kdj_ok'] = 0
    return stock_data
Example #11
0
def MaxDrawdown(data, prt=True):
    """
    计算最大回撤,最大回撤期,起始日期,结束日期
    :param date_line: 日期收益率序列,包含两列--日期,净值
    :return: 输出最大回撤及开始日期和结束日期
    """
    f_data = GetData(data)  #规整数据
    f_data['max2here'] = pd.expanding_max(f_data['capital'])  #计算当日之前的账户最大价值
    f_data['dd2here'] = f_data['capital'] / f_data['max2here'] - 1  #计算当日的回撤

    # 计算最大回撤和结束时间
    temp = f_data.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']
    end_date = temp['date']

    # 计算开始时间
    f_data = f_data[f_data['date'] <= end_date]
    start_date = f_data.sort_values(by='capital',
                                    ascending=False).iloc[0]['date']
    if end_date == 'start':
        end_date = start_date
    time = str(pd.Timestamp(end_date) - pd.Timestamp(start_date))

    if prt:
        print u'最大回撤为:%.2f%%, 最大回撤时间:%s ,开始日期:%s, 结束日期:%s' % (
            max_dd * 100, time, start_date, end_date)

    return max_dd, time, start_date, end_date
Example #12
0
def drawdown(ser):
    """``ser`` is a daily returns Series instead of a cumulative returns Series."""
    ser = ser.cumsum()
    end = (pd.expanding_max(ser) - ser).argmax()
    start = ser.ix[:end].argmax()

    return start, end, (ser[start] - ser[end]) * 100
Example #13
0
 def getMaxdd(self):
     ser = self.dy['dayvalue'][self.firstTradeIdx:]
     # only compare each point to the previous running peak
     # O(N)
     running_max = pandas.expanding_max(ser)
     ddpct = (ser - running_max) / running_max
     return round(abs(ddpct.min()), 3)
Example #14
0
def test_max_dd():
	df = normalize.parse_file(argv[1])
	ser = df[u'净值']
	df['max2here'] = pd.expanding_max(ser)
	df['dd2here'] = ser - df['max2here']
	df['ddpct'] = df['dd2here'] / df['max2here']
	df.to_excel('max_dd.xls', encoding='gbk')
Example #15
0
def maximum_drawdown(series):
    """
    https://en.wikipedia.org/wiki/Drawdown_(economics)
    the peak may be zero
    e.g.
    s= [0, -0.4, -0.2, 0.2]
    peak = [0, 0, 0, 0.2]
    therefore we don't provide relative percentage of mdd

    Parameters:
    ---------------
    series : array-like
        return of investment (ROI) series

    Returns:
    ---------
    float
        maximum dropdown in the series
    """
    s = np.asarray(series)
    peak = pd.expanding_max(s)

    # absolute drawdown
    ad = np.maximum(peak - s, 0)
    mad = np.max(ad)

    return mad
Example #16
0
    def calculate_ret_stats(self, returns_df, ann_factor):
        """
        calculate_ret_stats - Calculates return statistics for an asset's returns including IR, vol, ret and drawdowns

        Parameters
        ----------
        returns_df : DataFrame
            asset returns
        ann_factor : int
            annualisation factor to use on return statistics

        Returns
        -------
        DataFrame
        """
        tsc = TimeSeriesCalcs()

        self._rets = returns_df.mean(axis=0) * ann_factor
        self._vol = returns_df.std(axis=0) * math.sqrt(ann_factor)
        self._inforatio = self._rets / self._vol
        self._kurtosis = returns_df.kurtosis(axis=0)

        index_df = tsc.create_mult_index(returns_df)
        max2here = pandas.expanding_max(index_df)
        dd2here = index_df / max2here - 1

        self._dd = dd2here.min()
Example #17
0
def calculatePortValue(dfPort):
    seriesPortReturn = dfPort.reset_index().groupby(['TradingDay'])['Return'].mean()
    seriesPortValue = (seriesPortReturn+1).cumprod()
    seriesPortValue.name = u'累计净值'
    dfPortValue = pandas.DataFrame(seriesPortValue)
    seriesMax = pandas.expanding_max(dfPortValue[u'累计净值'])
    dfPortValue['MaxDD'] = (seriesMax - dfPortValue[u'累计净值']) / seriesMax
    return dfPortValue
Example #18
0
    def analyse(self):

        # Logger.log(logging.INFO, "Analyse Strategy", {"scope":__name__, "Rule 1":self._rule1, "Rule 2":self._rule2, "Rule 3":self._rule3, "Type":self._type})

        connection = sqlite3.connect(pyswing.database.pySwingDatabase)
        query = self.analyseStrategySql % (self._rule1, self._rule2, self._rule3, self._exit, self._type)
        self._strategyData = read_sql_query(query, connection, 'Date')
        self._strategyData['ExitValueAfterCosts'] = self._strategyData['ExitValue'] - 0.2
        connection.close()

        exitValueDataFrame = self._strategyData.ix[:,'ExitValueAfterCosts']

        mean = exitValueDataFrame.mean()
        median = exitValueDataFrame.median()
        sum = exitValueDataFrame.sum()
        count = exitValueDataFrame.count()

        tradesPerYear = count / 10
        sharpeRatio = sqrt(tradesPerYear) * exitValueDataFrame.mean() / exitValueDataFrame.std()

        self._strategyData["Sum"] = expanding_sum(exitValueDataFrame)
        self._strategyData["Max"] = expanding_max(self._strategyData["Sum"])
        self._strategyData["Min"] = expanding_min(self._strategyData["Sum"])
        self._strategyData["DD"] = self._strategyData["Max"] - self._strategyData["Min"]

        runningSum = expanding_sum(exitValueDataFrame)
        max2here = expanding_max(runningSum)
        dd2here = runningSum - max2here
        drawDown = dd2here.min()

        Logger.log(logging.INFO, "Analysing Strategy", {"scope":__name__, "Rule 1":self._rule1, "Rule 2":self._rule2, "Rule 3":self._rule3, "Exit":self._exit, "Type":self._type, "Mean":str(mean), "Median":str(median), "Sum":str(sum), "Count":str(count), "SharpeRatio":str(sharpeRatio), "DrawDown":str(drawDown)})

        connection = sqlite3.connect(pyswing.database.pySwingDatabase)
        c = connection.cursor()

        deleteSql = self.deleteStrategySql % (pyswing.globals.pySwingStrategy, self._rule1, self._rule2, self._rule3, self._exit, self._type)
        c.executescript(deleteSql)
        connection.commit()

        insertSql = self.insertStrategySql % (pyswing.globals.pySwingStrategy, self._rule1, self._rule2, self._rule3, self._exit, self._type, str(mean), str(median), str(sum), str(count), str(sharpeRatio), str(drawDown))
        c.executescript(insertSql)
        connection.commit()

        c.close()
        connection.close()
Example #19
0
def max_drawdown(df):
    df['max2here']=pd.expanding_max(df['close'])
    df['dd2here']=df['close']/df['max2here']-1
    temp=df.sort_values(by='dd2here').iloc[0][['date','dd2here']]
    max_dd=temp['dd2here']
    end_date=temp['date']
    df=df[df['date']<=end_date]
    start_date=df.sort_values(by='close',ascending=False).iloc[0]['date']
    print('最大回车:%f,开始时间:%s,结束日期:%s'%(max_dd,start_date,end_date))
Example #20
0
def KDJ(date, N=9, M1=3, M2=3):
    low_list = pd.rolling_min(date[low], N)
    low_list.fillna(value=pd.expanding_min(date[low]), inplace=True)
    high_list = pd.rolling_max(date[high], N)
    high_list.fillna(value=pd.expanding_max(date[high]), inplace=True)
    rsv = (date['close'] - low_list) / (high_list - low_list) * 100
    date['KDJ_K'] = pd.ewma(rsv, com=2)
    date['KDJ_D'] = pd.ewma(date['KDJ_K'], com=2)
    date['KDJ_J'] = 3 * date['KDJ_K'] - 2 * date['KDJ_D']
Example #21
0
def drawdowns(ror, rorStyle=0):
    #use with apply
    vami = return_index(ror, rorStyle)
    peak = pd.expanding_max(vami)
    t = pd.concat([ror, vami, peak], axis=1); t.columns = ['ror','vami','peak']
    t['indd'] = t.apply(lambda x: 1 if (x['peak'] > x['vami']) else np.nan, axis=1)
    #this is for compounding - needed?
    #t['indd'] = t.apply(lambda x: ((x['vami'] / x['peak'])-1) if (x['peak'] > x['vami']) else np.nan, axis=1) 
    return t
Example #22
0
def kdj(data,date,m1,m2):
	data_use=data[['high','low','open','close']]
	data['lown'] = pd.rolling_min(data_use['low'], date)
	data.lown.fillna(value=pd.expanding_min(data_use['low']), inplace=True)
	data['highn'] = pd.rolling_max(data_use['high'], date)
	data.highn.fillna(value=pd.expanding_max(data_use['high']), inplace=True)
	data['rsv']=(data['close'] - data['lown']) / (data['highn'] - data['lown']) * 100
	data['kdj_k'] = pd.ewma(data['rsv'], m1)
	data['kdj_d'] = pd.ewma(data['kdj_k'], m2)
	data['kdj_j'] = 3 * data['kdj_k'] - 2 * data['kdj_d']
Example #23
0
def kdj(data,date,m1,m2):
	data_use=data[['high','low','open','close']]
	data['lown'] = pd.rolling_min(data_use['low'], date)
	data.lown.fillna(value=pd.expanding_min(data_use['low']), inplace=True)
	data['highn'] = pd.rolling_max(data_use['high'], date)
	data.highn.fillna(value=pd.expanding_max(data_use['high']), inplace=True)
	data['rsv']=(data['close'] - data['lown']) / (data['highn'] - data['lown']) * 100
	data['kdj_k'] = pd.ewma(data['rsv'], m1)
	data['kdj_d'] = pd.ewma(data['kdj_k'], m2)
	data['kdj_j'] = 3 * data['kdj_k'] - 2 * data['kdj_d']
Example #24
0
def analysis_kdjv2(code):
    filename='./stockdata/data/last3year/'+code+'.csv'
    stock_dataT = pd.read_csv(filename, parse_dates=['date'])
    #stock_dataT.sort('date', inplace=True)
    stock_data=stock_dataT.loc[:,('date', 'high', 'low', 'close', 'p_change')]
    # 计算KDJ指标
    stock_data['low_list'] = pd.rolling_min(stock_data['low'], 9)
    stock_data['low_list'].fillna(value=pd.expanding_min(stock_data['low']), inplace=True)
    stock_data['high_list'] = pd.rolling_max(stock_data['high'], 9)
    stock_data['high_list'].fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    stock_data['rsv'] = (stock_data['close'] - stock_data['low_list']) / (stock_data['high_list'] - stock_data['low_list']) * 100
    #stock_data['rsv']=(stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['KDJ_K'] = pd.ewma(stock_data['rsv'], com=3)
    stock_data['KDJ_D'] = pd.ewma(stock_data['KDJ_K'], com=3)
    stock_data['KDJ_J'] = 3 * stock_data['KDJ_K'] - 2 * stock_data['KDJ_D']
    # 计算KDJ指标金叉、死叉情况
    ###通常就敏感性而言,J值最强,K值次之,D值最慢,而就安全性而言,J值最差,K值次之,D值最稳
    ##金叉用1表示,死叉用0表示
    buyi=stock_data[(stock_data['KDJ_K'] > stock_data['KDJ_D'])&(stock_data['KDJ_K'].shift(1) < stock_data['KDJ_D'].shift(1))].index
    stock_data.loc[buyi,'Signal'] = 1
    selli=stock_data[(stock_data['KDJ_K'] < stock_data['KDJ_D'])&(stock_data['KDJ_K'].shift(1) > stock_data['KDJ_D'].shift(1))].index
    stock_data.loc[selli,'Signal'] = 0
    #kdj_position = stock_data['KDJ_K'] > stock_data['KDJ_D']
    #stock_data.loc[kdj_position[(kdj_position == True) & (kdj_position.shift() == False)].index, 'KDJ_BS'] = 1
    #stock_data.loc[kdj_position[(kdj_position == False) & (kdj_position.shift() == True)].index, 'KDJ_BS'] = 0
    stock_data['position']=stock_data['Signal'].shift(1)
    stock_data['position'].fillna(method='ffill', inplace=True)
    #当仓位为1时,已当天的开盘价买入股票,当仓位为0时,以收盘价卖出该股份。计算从数据期内的收益
    stock_data['Cash_index'] = ((stock_data['p_change']/100) * stock_data['position'] + 1.0).cumprod()
    initial_idx = 1
    #initial_idx = stock_data.iloc[0]['close'] / (1 + (stock_data.iloc[0]['p_change']/100))
    stock_data['Cash_index'] *= initial_idx
    print 'The KDJ Backwards methon Signal:'
    Make_decision(stock_data)
    # 通过复权价格计算接下来几个交易日的收益率
    for n in [1, 2, 3, 5, 10, 20]:
        stock_data['CP_next_'+str(n)+'_days'] =(stock_data['close'].shift(-1*n) / stock_data['close'] - 1.0)*100
        #stock_data.dropna(how='any', inplace=True)# 删除所有有空值的数据行
        # ========== 将算好的数据输出到csv文件 - 注意:这里请填写输出文件在您电脑中的路径
        ##统计出现买点时点的数据
        dd=stock_data[stock_data['Signal']==1]
    print_return_next_n_day(dd)
    codedir='./output/A/'+code+os.sep
    if not os.path.exists(codedir):
        os.mkdir(codedir)
    # ==========计算每年指数的收益以及海龟交易法则的收益
    stock_data['p_change_KDJV2'] = (stock_data['p_change']) * stock_data['position']
    year_rtn = stock_data.set_index('date')[['p_change', 'p_change_KDJV2']].\
               resample('A', how=lambda x: (x/100+1.0).prod() - 1.0) * 100
    year_rtn.to_csv(codedir+'kdjv2_year.csv', encoding='gbk')
    stock_data.to_csv(codedir+'kdjv2.csv',encoding='gbk',index=False)
    stock_data.tail(20).to_csv(codedir+'kdjv2_Signal.csv',encoding='gbk',index=False)
    print 'the share %s trading sign for KDJV2:'%code
    print stock_data.tail(5)
    return
Example #25
0
def kdj(stock):
    low_list = pd.rolling_min(stock.low, 9)
    low_list.fillna(value = pd.expanding_min(stock.low), inplace = True)
    high_list = pd.rolling_max(stock.high, 9)
    high_list.fillna(value = pd.expanding_max(stock.high), inplace = True)
    rsv = (stock.close - low_list) / (high_list - low_list) * 100
    k = pd.ewma(rsv, com =2)
    d = pd.ewma(k, com =2)
    j = 3 * k[2:] - 2 *d

    return k, d, j
Example #26
0
def kdj(stock):
    low_list = pd.rolling_min(stock.low, 9)
    low_list.fillna(value=pd.expanding_min(stock.low), inplace=True)
    high_list = pd.rolling_max(stock.high, 9)
    high_list.fillna(value=pd.expanding_max(stock.high), inplace=True)
    rsv = (stock.close - low_list) / (high_list - low_list) * 100
    k = pd.ewma(rsv, com=2)
    d = pd.ewma(k, com=2)
    j = 3 * k[2:] - 2 * d

    return k, d, j
def max_drawdown(date_line, capital_line):
    df = pd.DataFrame({'date': date_line, 'capital': capital_line})
    df.sort('date', inplace=True)
    df.reset_index(drop=True, inplace=True)
    df['max2here'] = pd.expanding_max(df['capital'])
    df['dd2here'] = df['max2here'] - df['capital']
    temp = df.sort('dd2here', ascending=False).iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']
    end_date = temp['date']
    df = df[df['date'] <= end_date]
    start_date = df.sort('capital', ascending=False).iloc[0]['date']
    return max_dd  # '最大回撤为:%f,开始日期:%s,结束日期:%s'%(max_dd,start_date, end_date)
Example #28
0
    def calculate_ret_stats(self, returns_df, ann_factor):
        tsc = TimeSeriesCalcs()

        self._rets = returns_df.mean(axis=0) * ann_factor
        self._vol = returns_df.std(axis=0) * math.sqrt(ann_factor)
        self._inforatio = self._rets / self._vol

        index_df = tsc.create_mult_index(returns_df)
        max2here = pandas.expanding_max(index_df)
        dd2here = index_df / max2here - 1

        self._dd = dd2here.min()
    def calculate_ret_stats(self, returns_df, ann_factor):
        tsc = TimeSeriesCalcs()

        self._rets = returns_df.mean(axis=0) * ann_factor
        self._vol = returns_df.std(axis=0) * math.sqrt(ann_factor)
        self._inforatio = self._rets / self._vol

        index_df = tsc.create_mult_index(returns_df)
        max2here = pandas.expanding_max(index_df)
        dd2here = index_df / max2here - 1

        self._dd = dd2here.min()
Example #30
0
def maxdd(df):
    df['max2here'] = pd.expanding_max(df['worth'])
    df['dd2here'] = df['worth'] / df['max2here'] - 1.0
    max_dd = df.sort_values(by='dd2here').ix[0, 'dd2here']
    end_date = df.sort_values(by='dd2here').index[0]
    end_date_str = end_date.strftime('%Y-%m-%d')
    df = df[df.index < df.sort_values(by='dd2here').index[0]]
    start_date = df.sort_values(by='worth', ascending=False).index[0]
    start_date_str = start_date.strftime('%Y-%m-%d')
    lasts_days = len(df.loc[start_date:end_date])
    print max_dd
    print 'maxxdd is %f,from %s to %s last %d days' % (
        max_dd, start_date_str, end_date_str, lasts_days)
Example #31
0
File: perf.py Project: ychaim/tia
def drawdowns(returns, geometric=True):
    """
    compute the drawdown series for the period return series
    return: periodic return Series or DataFrame
    """
    wealth = 1. + returns_cumulative(returns, geometric=geometric, expanding=True)
    values = wealth.values
    if values.ndim == 2:
        ncols = values.shape[-1]
        values = np.vstack(([1.] * ncols, values))
        maxwealth = pd.expanding_max(values)[1:]
        dds = wealth / maxwealth - 1.
        dds[dds > 0] = 0  # Can happen if first returns are positive
        return dds
    elif values.ndim == 1:
        values = np.hstack(([1.], values))
        maxwealth = pd.expanding_max(values)[1:]
        dds = wealth / maxwealth - 1.
        dds[dds > 0] = 0  # Can happen if first returns are positive
        return dds
    else:
        raise ValueError('unable to process array with %s dimensions' % values.ndim)
Example #32
0
def get_kdj(stock_data):
    # kdj计算
    low_list = pd.rolling_min(stock_data['low'], 9)
    low_list.fillna(value=pd.expanding_min(stock_data['low']), inplace=True)
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    # 增加kdj数据到 stock_data中
    stock_data['kdj_k'] = round(pd.ewma(rsv, com=2), 2)
    stock_data['kdj_d'] = round(pd.ewma(stock_data['kdj_k'], com=2), 2)
    stock_data['kdj_j'] = round(
        3 * stock_data['kdj_k'] - 2 * stock_data['kdj_d'], 2)
    return stock_data
Example #33
0
def max_dd(ser):
    """max2here is the 'maximum to date' for each date in the series
    dd2here is the difference between the value at that date and the maximum up
    to that date
    max_indexer picks out the relevant maximum date
    """
    max2here = pd.expanding_max(ser)
    dd2here = ser - max2here
    mindate = dd2here[dd2here==dd2here.min()].index[0]
    maxindexer = max2here[:mindate].max()
    maxvalue = max2here[max2here==maxindexer].iloc[0,]
    maxdate = max2here[max2here==maxindexer].index[0]
    dd = dd2here.min()/maxvalue
    return dd
Example #34
0
def Max_drawdown(trade_date, sum_profit):

    data = {'trade_date': trade_date, 'sum_profit': sum_profit}
    dataframe = pd.DataFrame(data)
    dataframe['max2here'] = pd.expanding_max(dataframe['sum_profit'])
    dataframe['drawdown'] = dataframe['sum_profit'] - dataframe['max2here']
    temp = dataframe.sort_values(by='drawdown').iloc[0]
    max_drawdown = temp.drawdown
    max_drawdown_enddate = temp.trade_date.strftime('%Y-%m-%d')
    sub_dataframe = dataframe[dataframe.trade_date <= max_drawdown_enddate]
    max_drawdown_startdate = sub_dataframe.sort_values(
        by='sum_profit',
        ascending=False).iloc[0]['trade_date'].strftime('%Y-%m-%d')

    return max_drawdown, max_drawdown_startdate, max_drawdown_enddate
Example #35
0
def test_max_dd():
	df = normalize.parse_file(argv[1])
	ser = df[u'收盘']
	df['max2here'] = pd.expanding_max(ser)
	df['dd2here'] = ser - df['max2here']
	df['ddpct'] = df['dd2here'] / df['max2here']
	#df.to_excel('399300maxdd.xls',encoding='gbk')
	print '({data:['
	for i,row in df.iterrows():
		date=row[u'日期'].lstrip()
		date = date.replace('/', '-')
		print '{symbol:"399300",d:"'+date+'",h:'+'{:.2f}'.format(-row['ddpct']*100)+'},'
	
	(row_count, col_count) = df.shape
	print '],total:"'+str(row_count)+'"})'
Example #36
0
def drawdowns(returns, geometric=True):
    """
    compute the drawdown series for the period return series
    return: periodic return Series or DataFrame
    """
    wealth = 1.0 + returns_cumulative(
        returns, geometric=geometric, expanding=True)
    values = wealth.values
    if values.ndim == 2:
        ncols = values.shape[-1]
        values = np.vstack(([1.0] * ncols, values))
        maxwealth = pd.expanding_max(values)[1:]
        dds = wealth / maxwealth - 1.0
        dds[dds > 0] = 0  # Can happen if first returns are positive
        return dds
    elif values.ndim == 1:
        values = np.hstack(([1.0], values))
        maxwealth = pd.expanding_max(values)[1:]
        dds = wealth / maxwealth - 1.0
        dds[dds > 0] = 0  # Can happen if first returns are positive
        return dds
    else:
        raise ValueError("unable to process array with %s dimensions" %
                         values.ndim)
Example #37
0
def calKDJ(data, N=0, M=0):
    if N == 0:
        N = 9
    if M == 0:
        M = 2
    low_list = pd.rolling_min(data['low'], N)
    low_list.fillna(value=pd.expanding_min(data['low']), inplace=True)
    high_list = pd.rolling_max(data['high'], N)
    high_list.fillna(value=pd.expanding_max(data['high']), inplace=True)
    rsv = (data['close'] - low_list) / (high_list - low_list) * 100
    KDJ_K = pd.ewma(rsv, com=M)
    KDJ_D = pd.ewma(KDJ_K, com=M)
    KDJ_J = 3 * KDJ_K - 2 * KDJ_D
    #kdjdata.fillna(0, inplace=True)
    return low_list, high_list, rsv, KDJ_K, KDJ_D, KDJ_J
def max_drawdown(date_line, capital_line):
    """
    :param date_line: 日期序列
    :param capital_line: 账户价值序列
    :return: 输出最大回撤及开始日期和结束日期
    """
    # 将数据序列合并为一个dataframe并按日期排序
    df = pd.DataFrame({'date': date_line, 'capital': capital_line})

    df['max2here'] = pd.expanding_max(df['capital'])  # 计算当日之前的账户最大价值
    df['dd2here'] = df['capital'] / df['max2here'] - 1  # 计算当日的回撤
    #  计算最大回撤和结束时间
    temp = df.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']

    return max_dd
Example #39
0
def max_drawdown(date_line, capital_line):

    df = pd.DataFrame({'date': date_line, 'capital': capital_line})
    df['max2here'] = pd.expanding_max(df['capital'])  # calc expanding max
    df['dd2here'] = df['capital'] / df[
        'max2here'] - 1.0  # calculate day drawdown

    #calculate max draw down ,date
    temp = df.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']
    end_date = temp['date'].strftime('%Y-%m-%d')

    #calculate days
    df = df[df['date'] < end_date]
    start_date = df.sort_values(
        by='capital', ascending=False).iloc[0]['date'].strftime('%Y-%m-%d')
    print 'max draw down: %f, start date: %s, end date: %s' % (
        max_dd, start_date, end_date)
Example #40
0
def trimmed_frame(tseries_grp,
                  video_meta_rec,
                  trim_meta=True,
                  *args,
                  **kwargs):
    """
    Trim out nonsense steps from the data,
    """
    tseries_grp.sort('run_time', inplace=True)
    view_count = pd.expanding_max(
        tseries_grp['view_count'].astype('float')).astype('int')
    mask = np.ediff1d(view_count.values, to_begin=1) > 0
    # watch out for duplicate metadata
    if trim_meta:
        if len(video_meta_rec.shape) > 1:
            video_meta_rec = video_meta_rec.iloc[0, :]

    return tseries_grp.iloc[mask].dropna(), video_meta_rec.dropna()
Example #41
0
def get_kdj_history(code):
    stock_data = ts.get_k_data(code)
    # kdj
    low_list = pd.rolling_min(stock_data['low'], 9)
    low_list.fillna(value=pd.expanding_min(stock_data['low']), inplace=True)
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['kdj_k'] = round(pd.ewma(rsv, com=2), 2)
    stock_data['kdj_d'] = round(pd.ewma(stock_data['kdj_k'], com=2), 2)
    stock_data['kdj_j'] = round(3 * stock_data['kdj_k'] - 2 * stock_data['kdj_d'], 2)
    # 用今天的j值和昨天比较
    kdj_j = stock_data['kdj_j']
    if (kdj_j.size < 6):
        stock_data = stock_data.tail(1)
        stock_data['kdj_k'] = 0
        stock_data['kdj_ok'] = 0
        return stock_data
    yesterdayJ = kdj_j[kdj_j.size - 6]
    todayJ = kdj_j[kdj_j.size - 5]
    kdj_k = stock_data['kdj_k']
    todayK = kdj_k[kdj_k.size - 5]
    # 如果今天的j值大于昨天的j值才继续后面的逻辑
    if (todayJ > yesterdayJ and todayK < float(20)):
        # 计算价格5日百分比
        stock_data_copy = stock_data[:]
        stock_data_copy = stock_data_copy.tail(5)
        stock_data_copy['indexNum'] = [1, 2, 3, 4, 5]
        stock_data_copy = stock_data_copy.sort(columns='high')
        stock_data_copy = stock_data_copy.tail(1)
        maxValue = stock_data_copy.high.values
        maxDate = stock_data_copy.date.values
        stock_data = stock_data.tail(5)
        stock_data = stock_data.head(1)
        stock_data['kdj_ok'] = 1
        highPercent = maxValue / stock_data.close.values[0]
        stock_data['highPercent'] = (round(highPercent, 3) * 100) - 100
        stock_data['highDate'] = maxDate
        stock_data['highDays'] = stock_data_copy.indexNum.values
    else:
        stock_data = stock_data.tail(1)
        stock_data['kdj_ok'] = 0
    return stock_data
Example #42
0
 def expanding_smoother(self, data, stype='rolling_mean', min_periods=None, freq=None):
     """
     
     Perform a expanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html
     
     :param data: pandas dataframe input data
     :param stype: soothing type
     :param min_periods: periods
     :param freq: frequence
     smoothing types:
     expanding_count	Number of non-null observations
     expanding_sum	Sum of values
     expanding_mean	Mean of values
     expanding_median	Arithmetic median of values
     expanding_min	Minimum
     expanding_max	Maximum
     expandingg_std	Unbiased standard deviation
     expanding_var	Unbiased variance
     expanding_skew	Unbiased skewness (3rd moment)
     expanding_kurt	Unbiased kurtosis (4th moment)
     
     """
     if stype == 'count':
         newy = pd.expanding_count(data, min_periods=min_periods, freq=freq)
     if stype == 'sum':
         newy = pd.expanding_sum(data, min_periods=min_periods, freq=freq)
     if stype == 'mean':
         newy = pd.expanding_mean(data, min_periods=min_periods, freq=freq)
     if stype == 'median':
         newy = pd.expanding_median(data, min_periods=min_periods, freq=freq)
     if stype == 'min':
         newy = pd.expanding_min(data, min_periods=min_periods, freq=freq)
     if stype == 'max':
         newy = pd.expanding_max(data, min_periods=min_periods, freq=freq)
     if stype == 'std':
         newy = pd.expanding_std(data, min_periods=min_periods, freq=freq)
     if stype == 'var':
         newy = pd.expanding_var(data, min_periods=min_periods, freq=freq)
     if stype == 'skew':
         newy = pd.expanding_skew(data, min_periods=min_periods, freq=freq)
     if stype == 'kurt':
         newy = pd.expanding_kurt(data, min_periods=min_periods, freq=freq)
     return newy
Example #43
0
def maximum_drawdown(series):
    """
    https://en.wikipedia.org/wiki/Drawdown_(economics)
    the peak may be zero
    e.g.
    s= [0, -0.4, -0.2, 0.2]
    peak = [0, 0, 0, 0.2]
    therefore we don't provide relative percentage of mdd

    Parameters:
    ---------------
    series: list or numpy.array, ROI series
    """
    s = np.asarray(series)
    peak = pd.expanding_max(s)

    # absolute drawdown
    ad = np.maximum(peak - s, 0)
    mad = np.max(ad)

    return mad
Example #44
0
def max_drawdown(date_line, capital_line):
    """
    :param date_line: 日期序列
    :param capital_line: 账户价值序列
    :return: 输出最大回撤及开始日期和结束日期
    """
    # 将数据序列合并为一个dataframe
    df = pd.DataFrame({'date': date_line, 'capital': capital_line})

    df['max2here'] = pd.expanding_max(df['capital'])  # 计算当日之前的账户最大价值
    df['dd2here'] = df['capital'] / df['max2here'] - 1  # 计算当日的回撤

    # 计算最大回撤和结束时间
    temp = df.sort_values(by='dd2here').iloc[0][['date', 'dd2here']]
    max_dd = temp['dd2here']
    end_date = temp['date'].strftime('%Y-%m-%d')

    # 计算开始时间
    df = df[df['date'] <= end_date]
    start_date = df.sort_values(by='capital', ascending=False).iloc[0]['date'].strftime('%Y-%m-%d')

    print '最大回撤为:%f, 开始日期:%s, 结束日期:%s' % (max_dd, start_date, end_date)
def indicator_KDJ(stock_data):#KDJ指标计算函数
    # 计算KDJ指标
    low_list = pd.rolling_min(stock_data['low'], 9) #9天为一个周期,但前8个值为NaN
    low_list.fillna(value=pd.expanding_min(stock_data['low']), inplace=True) #将NaN用累积窗口计算的最小值代替
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['KDJ_K'] = pd.ewma(rsv, com=2, adjust=False)
    stock_data['KDJ_D'] = pd.ewma(stock_data['KDJ_K'], com=2, adjust=False)
    stock_data['KDJ_J'] = 3 * stock_data['KDJ_K'] - 2 * stock_data['KDJ_D']
    # 计算KDJ指标金叉、死叉情况
    stock_data['KDJ_金叉死叉'] = ''
    kdj_position = stock_data['KDJ_K'] > stock_data['KDJ_D']
    stock_data.loc[kdj_position[(kdj_position == True) & (kdj_position.shift() == False)].index, 'KDJ_金叉死叉'] = '金叉' #前一天K<D,当天K>D
    stock_data.loc[kdj_position[(kdj_position == False) & (kdj_position.shift() == True)].index, 'KDJ_金叉死叉'] = '死叉'
    # 通过复权价格计算接下来几个交易日的收益率
    for n in [1, 2, 3, 5, 10, 20]:
        stock_data['接下来'+str(n)+'个交易日涨跌幅'] = stock_data['close'].shift(-1*n) / stock_data['close'] - 1.0
    stock_data.dropna(how='any', inplace=True)# 删除所有有空值的数据行
    # 筛选出KDJ金叉的数据,并将这些数据合并到all_stock中
    stock_data = stock_data[(stock_data['KDJ_金叉死叉'] == '金叉')]
    if not stock_data.empty:
        return stock_data
Example #46
0
    def computeRunningMinMaxSignals(self, df, start_dt, spike_ticks, spike_5s_pred):
#	print start_dt
	pre_event_snapshot_dt = start_dt - timedelta(seconds=2.5)
        pre_event_snapshot_loc = df.index.get_loc(pre_event_snapshot_dt)

#	print df.ix[self.cur_loc, 'time'], len(df.ix[pre_event_snapshot_loc : self.cur_loc+1, 'microprice_ema_200ms'])

        max_to_here = pd.expanding_max(df.ix[pre_event_snapshot_loc : self.cur_loc+1, 'microprice_ema_200ms'])[-1]
        min_to_here = pd.expanding_min(df.ix[pre_event_snapshot_loc : self.cur_loc+1, 'microprice_ema_200ms'])[-1]

        max_to_here_ticks = self.priceTicks(max_to_here)
        min_to_here_ticks = self.priceTicks(min_to_here)

        if (spike_ticks + spike_5s_pred)/2.0 >= 0:
            max_disl = max_to_here_ticks
        else:
            max_disl = min_to_here_ticks

        #df.ix[self.cur_loc, 'from_max_disl_ema_200ms_ticks'] = computeRelativeDislocation(df.ix[self.cur_loc, 'microprice_ema_200ms_ticks'], max_disl)
	df.ix[self.cur_loc, 'max_disl_ema_200ms_ticks'] = max_disl
        #df[self.cur_loc]['max_disl_ema_200ms_ticks'] = max_disl

	return df
Example #47
0
def dd_stats(returns, N=252):
 #initialize
 equitySeries = (returns +1).cumprod()
 equityHighWaterMarkSeries = pd.expanding_max(equitySeries)
 ddPercentSeries = (equitySeries - equityHighWaterMarkSeries)/equityHighWaterMarkSeries
 maxDD = ddPercentSeries.min()
 ddDays = []
 DDs = []
 count = 0
 ddStart= 0

 # calc dd days
 for i,k in enumerate(equityHighWaterMarkSeries):
  if i > 0:
   if equityHighWaterMarkSeries[i-1]==equityHighWaterMarkSeries[i]: # in DD
    if count ==0: #DD Started
     ddStart = i-1
    count +=1
    if i == len(equityHighWaterMarkSeries)-1: #still in drawdown at the end of series
     ddDays.append(count)
     DDs.append(min(ddPercentSeries[ddStart:i]))
   else: #new hwm, DDEnded
    if count != 0:
     ddDays.append(count)
     DDs.append(min(ddPercentSeries[ddStart:i]))
     #ddStart =0
    count =0
    
 total_return = equitySeries[-1] - 1.0
 ccr_total_return = np.log(equitySeries[len(equitySeries)-1])-np.log(equitySeries[1])
 annualised_mar = np.sqrt(N) * returns.mean()/abs(maxDD)
 total_return_mar = ccr_total_return / maxDD
 cagr = equitySeries[-1]**(1.0/((returns.index[-1]-returns.index[0]).days/365.25))-1.0
 
 return annualised_mar,  total_return_mar, maxDD,  max(ddDays), \
  np.mean(ddDays),  len(DDs),  np.mean(DDs),  equitySeries,  ddPercentSeries, \
  ccr_total_return, total_return, cagr #12 stats
Example #48
0
def maximum_drawdown_raw(x):
    return (x - pd.expanding_max(x)).min()
Example #49
0
# 保留这几个需要的字段:'date', 'high', 'low', 'close', 'change'
#index_data = index_data[['date', 'high', 'low', 'close', 'change']]
# 对数据按照【date】交易日期从小到大排序
#index_data.sort('date', inplace=True)


# ==========计算海龟交易法则的买卖点
# 设定海龟交易法则的两个参数,当收盘价大于最近N1天的最高价时买入,当收盘价低于最近N2天的最低价时卖出
# 这两个参数可以自行调整大小,但是一般N1 > N2
N1 = 20
N2 = 10

# 通过rolling_max方法计算最近N1个交易日的最高价
index_data['最近N1个交易日的最高点'] =  pd.rolling_max(index_data['high'], N1)
# 对于上市不足N1天的数据,取上市至今的最高价
index_data['最近N1个交易日的最高点'].fillna(value=pd.expanding_max(index_data['high']), inplace=True)

# 通过相似的方法计算最近N2个交易日的最低价
index_data['最近N2个交易日的最低点'] =  pd.rolling_min(index_data['low'], N1)
index_data['最近N2个交易日的最低点'].fillna(value=pd.expanding_min(index_data['low']), inplace=True)

# 当当天的【close】> 昨天的【最近N1个交易日的最高点】时,将【收盘发出的信号】设定为1
buy_index = index_data[index_data['close'] > index_data['最近N1个交易日的最高点'].shift(1)].index
index_data.loc[buy_index, '收盘发出的信号'] = 1
# 当当天的【close】< 昨天的【最近N2个交易日的最低点】时,将【收盘发出的信号】设定为0
sell_index = index_data[index_data['close'] < index_data['最近N2个交易日的最低点'].shift(1)].index
index_data.loc[sell_index, '收盘发出的信号'] = 0

# 计算每天的仓位,当天持有上证指数时,仓位为1,当天不持有上证指数时,仓位为0
index_data['当天的仓位'] = index_data['收盘发出的信号'].shift(1)
index_data['当天的仓位'].fillna(method='ffill', inplace=True)
Example #50
0
index_data = index_data[['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']]
index_data.sort('Date', inplace=True)

'''计算海龟交易法则的买卖点
设定海龟交易法则的两个参数,当收盘价大于最近N1天的最高价时买入,当收盘价低于最近N2天的最低价时卖出
这两个参数可以自行调整大小,但是一般N1 > N2
'''
N1 = 20
N2 = 10

'''
 通过rolling_max方法计算最近N1个交易日的最高价, 最近N1个交易日的最高点: MaxIn_N1
对于上市不足N1天的数据,取上市至今的最高价
'''
index_data['MaxIn_N1'] =  pandas.rolling_max(index_data['High'], N1)
index_data['MaxIn_N1'].fillna(value=pandas.expanding_max(index_data['High']), inplace=True)
'''
通过rolling_min方法计算最近N2个交易日的最低价, MinIn_N2:MinIn_N2
'''
index_data['MinIn_N2'] =  pandas.rolling_min(index_data['Low'], N1)
index_data['MinIn_N2'].fillna(value=pandas.expanding_min(index_data['Low']), inplace=True)

# 当当天的【close】> 昨天的【MaxIn_N1】时,将【收盘发出的信号】设定为1
buy_index = index_data[index_data['Close'] > index_data['MaxIn_N1'].shift(1)].index
index_data.loc[buy_index, '收盘发出的信号'] = 1

# 当当天的【close】< 昨天的【MinIn_N2】时,将【收盘发出的信号】设定为0
sell_index = index_data[index_data['Close'] < index_data['MinIn_N2'].shift(1)].index
index_data.loc[sell_index, '收盘发出的信号'] = 0

Example #51
0
def maximum_drawdown_rate(x):
    x_min = x.min() + 0.0000000001
    y = (x.values + x_min).cumprod() - x_min
    return ((y / pd.expanding_max(y)) - 1).min()
Example #52
0
def drawdown(returns):
    equity = cumret(returns)
    emax = pd.expanding_max(equity)
    return (equity - emax) * -1
Example #53
0
def ulcer(eqd):
    eq = eqd.cumsum()
    return (((eq - pandas.expanding_max(eq)) ** 2).sum() / len(eq)) ** 0.5
Example #54
0

start = lambda eqd: eqd.index[0]
end = lambda eqd: eqd.index[-1]
days = lambda eqd: (eqd.index[-1] - eqd.index[0]).days
trades_per_month = lambda eqd: eqd.groupby(
    lambda x: (x.year, x.month)
).apply(lambda x: x[x != 0].count()).mean()
profit = lambda eqd: eqd.sum()
average = lambda eqd: eqd[eqd != 0].mean()
average_gain = lambda eqd: eqd[eqd > 0].mean()
average_loss = lambda eqd: eqd[eqd < 0].mean()
winrate = lambda eqd: float(sum(eqd > 0)) / len(eqd)
payoff = lambda eqd: eqd[eqd > 0].mean() / -eqd[eqd < 0].mean()
pf = PF = lambda eqd: abs(eqd[eqd > 0].sum() / eqd[eqd < 0].sum())
maxdd = lambda eqd: (eqd.cumsum() - pandas.expanding_max(eqd.cumsum())).abs().max()
rf = RF = lambda eqd: eqd.sum() / maxdd(eqd)
trades = lambda eqd: len(eqd[eqd != 0])
_days = lambda eqd: eqd.resample('D', how='sum').dropna()


def sharpe(eqd):
    ''' daily sharpe ratio '''
    d = _days(eqd)
    return (d.mean() / d.std()) ** (252**0.5)


def sortino(eqd):
    ''' daily sortino ratio '''
    d = _days(eqd)
    return (d.mean() / d[d < 0]).std()
Example #55
0
def time_in_drawdown(returns):
    equity = cumret(returns)
    emax = pd.expanding_max(equity)
    block = (emax.shift(1) != emax).astype(int).cumsum()
    block_by_values = block.groupby(by=block.__getitem__)
    return block_by_values.transform(lambda x: range(1, len(x) + 1)) - 1
Example #56
0
File: pl.py Project: ychaim/tia
 def drawdowns(self):
     ltd = self.pl.cumsum()
     maxpl = pd.expanding_max(ltd)
     maxpl[maxpl < 0] = 0
     dd = ltd - maxpl
     return dd