def get_economic_event_ret_over_custom_event_day(self, data_frame_in, event_dates, name, event, start, end, lagged = False,
                                              NYC_cutoff = 10):

        filter = Filter()
        event_dates = filter.filter_time_series_by_date(start, end, event_dates)

        data_frame = data_frame_in.copy(deep=True) # because we change the dates!

        timezone = Timezone()
        calendar = Calendar()

        bday = CustomBusinessDay(weekmask='Mon Tue Wed Thu Fri')

        event_dates_nyc = timezone.convert_index_from_UTC_to_new_york_time(event_dates)
        average_hour_nyc = numpy.average(event_dates_nyc.index.hour)

        event_dates = calendar.floor_date(event_dates)

        # realised is traditionally on later day eg. 3rd Jan realised ON is 2nd-3rd Jan realised
        # so if Fed meeting is on 2nd Jan later, then we need realised labelled on 3rd (so minus a day)
        # implied expires on next day eg. 3rd Jan implied ON is 3rd-4th Jan implied

        # TODO smarter way of adjusting dates, as sometimes events can be before/after 10am NY cut
        if (lagged and average_hour_nyc >= NYC_cutoff):
            data_frame.index = data_frame.index - bday
        elif (not lagged and average_hour_nyc < NYC_cutoff): # ie. implied
            data_frame.index = data_frame.index + bday

        # set as New York time and select only those ON vols at the 10am NY cut just before the event
        data_frame_events = data_frame.ix[event_dates.index]
        data_frame_events.columns = data_frame.columns.values + '-' + name + ' ' + event

        return data_frame_events
    def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df):
        """Compares the trading strategy we are backtesting against a benchmark

        Parameters
        ----------
        br : BacktestRequest
            Parameters for backtest such as start and finish dates

        strategy_df : pandas.DataFrame
            Strategy time series

        benchmark_df : pandas.DataFrame
            Benchmark time series
        """

        include_benchmark = False
        calc_stats = False

        if hasattr(br, 'include_benchmark'): include_benchmark = br.include_benchmark
        if hasattr(br, 'calc_stats'): calc_stats = br.calc_stats

        if include_benchmark:
            ret_stats = RetStats()
            risk_engine = RiskEngine()
            filter = Filter()
            calculations = Calculations()

            # align strategy time series with that of benchmark
            strategy_df, benchmark_df = strategy_df.align(benchmark_df, join='left', axis = 0)

            # if necessary apply vol target to benchmark (to make it comparable with strategy)
            if hasattr(br, 'portfolio_vol_adjust'):
                if br.portfolio_vol_adjust is True:
                    benchmark_df = risk_engine.calculate_vol_adjusted_index_from_prices(benchmark_df, br = br)

            # only calculate return statistics if this has been specified (note when different frequencies of data
            # might underrepresent vol
            # if calc_stats:
            benchmark_df = benchmark_df.fillna(method='ffill')
            ret_stats.calculate_ret_stats_from_prices(benchmark_df, br.ann_factor)

            if calc_stats:
                benchmark_df.columns = ret_stats.summary()

            # realign strategy & benchmark
            strategy_benchmark_df = strategy_df.join(benchmark_df, how='inner')
            strategy_benchmark_df = strategy_benchmark_df.fillna(method='ffill')

            strategy_benchmark_df = filter.filter_time_series_by_date(br.plot_start, br.finish_date, strategy_benchmark_df)
            strategy_benchmark_df = calculations.create_mult_index_from_prices(strategy_benchmark_df)

            self._benchmark_pnl = benchmark_df
            self._benchmark_ret_stats = ret_stats

            return strategy_benchmark_df

        return strategy_df
    def get_intraday_moves_over_custom_event(self, data_frame_rets, ef_time_frame, vol=False,
                                             minute_start = 5, mins = 3 * 60, min_offset = 0 , create_index = False,
                                             resample = False, freq = 'minutes'):

        filter = Filter()

        ef_time_frame = filter.filter_time_series_by_date(data_frame_rets.index[0], data_frame_rets.index[-1], ef_time_frame)
        ef_time = ef_time_frame.index

        if freq == 'minutes':
            ef_time_start = ef_time - timedelta(minutes = minute_start)
            ef_time_end = ef_time + timedelta(minutes = mins)
            ann_factor = 252 * 1440
        elif freq == 'days':
            ef_time = ef_time_frame.index.normalize()
            ef_time_start = ef_time - timedelta(days = minute_start)
            ef_time_end = ef_time + timedelta(days = mins)
            ann_factor = 252

        ords = range(-minute_start + min_offset, mins + min_offset)

        # all data needs to be equally spaced
        if resample:

            # make sure time series is properly sampled at 1 min intervals
            data_frame_rets = data_frame_rets.resample('1min')
            data_frame_rets = data_frame_rets.fillna(value = 0)
            data_frame_rets = filter.remove_out_FX_out_of_hours(data_frame_rets)

        data_frame_rets['Ind'] = numpy.nan

        start_index = data_frame_rets.index.searchsorted(ef_time_start)
        finish_index = data_frame_rets.index.searchsorted(ef_time_end)

        # not all observation windows will be same length (eg. last one?)

        # fill the indices which represent minutes
        # TODO vectorise this!
        for i in range(0, len(ef_time_frame.index)):
            try:
                data_frame_rets.ix[start_index[i]:finish_index[i], 'Ind'] = ords
            except:
                data_frame_rets.ix[start_index[i]:finish_index[i], 'Ind'] = ords[0:(finish_index[i] - start_index[i])]

        # set the release dates
        data_frame_rets.ix[start_index,'Rel'] = ef_time                                         # set entry points
        data_frame_rets.ix[finish_index + 1,'Rel'] = numpy.zeros(len(start_index))              # set exit points
        data_frame_rets['Rel'] = data_frame_rets['Rel'].fillna(method = 'pad')                  # fill down signals

        data_frame_rets = data_frame_rets[pandas.notnull(data_frame_rets['Ind'])]               # get rid of other

        data_frame = data_frame_rets.pivot(index='Ind',
                                           columns='Rel', values=data_frame_rets.columns[0])

        data_frame.index.names = [None]

        if create_index:
            calculations = Calculations()
            data_frame.ix[-minute_start + min_offset,:] = numpy.nan
            data_frame = calculations.create_mult_index(data_frame)
        else:
            if vol is True:
                # annualise (if vol)
                data_frame = data_frame.rolling(center=False,window=5).std() * math.sqrt(ann_factor)
            else:
                data_frame = data_frame.cumsum()

        return data_frame
class EventsFactory(EventStudy):
    """Provides methods to fetch data on economic data events and to perform basic event studies for market data around
    these events. Note, requires a file of input of the following (transposed as columns!) - we give an example for
    NFP released on 7 Feb 2003 (note, that release-date-time-full, need not be fully aligned by row).

    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.Date	                31/01/2003 00:00
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.close	                xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.actual-release	        143
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-median	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-average	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-high	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-low	            xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-high.1	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.number-observations	xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.first-revision	        185
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.first-revision-date	20030307
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.release-dt	            20030207
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.release-date-time-full	08/01/1999 13:30

    """

    # _econ_data_frame = None

    # where your HDF5 file is stored with economic data
    # TODO integrate with on the fly downloading!
    _hdf5_file_econ_file = MarketConstants().hdf5_file_econ_file
    _db_database_econ_file = MarketConstants().db_database_econ_file

    ### manual offset for certain events where Bloomberg/data vendor displays the wrong date (usually because of time differences)
    _offset_events = {'AUD-Australia Labor Force Employment Change SA.release-dt' : 1}

    def __init__(self, df = None):
        super(EventStudy, self).__init__()

        self.config = ConfigManager()
        self.logger = LoggerManager().getLogger(__name__)
        self.filter = Filter()
        self.io_engine = IOEngine()
        self.speed_cache = SpeedCache()

        if df is not None:
            self._econ_data_frame = df
        else:
            self.load_economic_events()

        return

    def load_economic_events(self):
        self._econ_data_frame = self.speed_cache.get_dataframe(self._db_database_econ_file)

        if self._econ_data_frame is None:
            # self._econ_data_frame = self.io_engine.read_time_series_cache_from_disk(self._hdf5_file_econ_file)
            self._econ_data_frame = self.io_engine.read_time_series_cache_from_disk(
                self._db_database_econ_file, engine=marketconstants.write_engine,
                db_server=marketconstants.db_server,
                db_port=marketconstants.db_port,
                username=marketconstants.db_username,
                password=marketconstants.db_password)

            self.speed_cache.put_dataframe(self._db_database_econ_file, self._econ_data_frame)

    def harvest_category(self, category_name):
        cat = self.config.get_categories_from_tickers_selective_filter(category_name)

        for k in cat:
            md_request = self.market_data_generator.populate_md_request(k)
            data_frame = self.market_data_generator.fetch_market_data(md_request)

            # TODO allow merge of multiple sources

        return data_frame

    def get_economic_events(self):
        return self._econ_data_frame

    def dump_economic_events_csv(self, path):
        self._econ_data_frame.to_csv(path)

    def get_economic_event_date_time(self, name, event = None, csv = None):
        ticker = self.create_event_desciptor_field(name, event, "release-date-time-full")

        if csv is None:
            data_frame = self._econ_data_frame[ticker]
            data_frame.index = self._econ_data_frame[ticker]
        else:
            dateparse = lambda x: datetime.datetime.strptime(x, '%d/%m/%Y %H:%M')

            data_frame = pandas.read_csv(csv, index_col=0, parse_dates = True, date_parser=dateparse)

        data_frame = data_frame[pandas.notnull(data_frame.index)]

        start_date = datetime.datetime.strptime("01-Jan-1971", "%d-%b-%Y")
        self.filter.filter_time_series_by_date(start_date, None, data_frame)

        return data_frame

    def get_economic_event_date_time_dataframe(self, name, event = None, csv = None):
        series = self.get_economic_event_date_time(name, event, csv)

        data_frame = pandas.DataFrame(series.values, index=series.index)
        data_frame.columns.name = self.create_event_desciptor_field(name, event, "release-date-time-full")

        return data_frame

    def get_economic_event_date_time_fields(self, fields, name, event = None):
        ### acceptible fields
        # observation-date <- observation time for the index
        # actual-release
        # survey-median
        # survey-average
        # survey-high
        # survey-low
        # survey-high
        # number-observations
        # release-dt
        # release-date-time-full
        # first-revision
        # first-revision-date

        ticker = []

        # construct tickers of the form USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.actual-release
        for i in range(0, len(fields)):
            ticker.append(self.create_event_desciptor_field(name, event, fields[i]))

        # index on the release-dt field eg. 20101230 (we shall convert this later)
        ticker_index = self.create_event_desciptor_field(name, event, "release-dt")

        ######## grab event date/times
        event_date_time = self.get_economic_event_date_time(name, event)
        date_time_fore = event_date_time.index

        # create dates for join later
        date_time_dt = [datetime.datetime(
                                date_time_fore[x].year,
                                date_time_fore[x].month,
                                date_time_fore[x].day)
                                for x in range(len(date_time_fore))]

        event_date_time_frame = pandas.DataFrame(event_date_time.index, date_time_dt)
        event_date_time_frame.index = date_time_dt

        ######## grab event date/fields
        self._econ_data_frame[name + ".observation-date"] = self._econ_data_frame.index
        data_frame = self._econ_data_frame[ticker]

        data_frame.index = self._econ_data_frame[ticker_index]

        data_frame = data_frame[data_frame.index != 0]              # eliminate any 0 dates (artifact of Excel)
        data_frame = data_frame[pandas.notnull(data_frame.index)]   # eliminate any NaN dates (artifact of Excel)
        ind_dt = data_frame.index

        # convert yyyymmdd format to datetime
        data_frame.index = [datetime.datetime(
                               int((ind_dt[x] - (ind_dt[x] % 10000))/10000),
                               int(((ind_dt[x] % 10000) - (ind_dt[x] % 100))/100),
                               int(ind_dt[x] % 100)) for x in range(len(ind_dt))]

        # HACK! certain events need an offset because BBG have invalid dates
        if ticker_index in self._offset_events:
             data_frame.index = data_frame.index + timedelta(days=self._offset_events[ticker_index])

        ######## join together event dates/date-time/fields in one data frame
        data_frame = event_date_time_frame.join(data_frame, how='inner')
        data_frame.index = pandas.to_datetime(data_frame.index)
        data_frame.index.name = ticker_index

        return data_frame

    def create_event_desciptor_field(self, name, event, field):
        if event is None:
            return name + "." + field
        else:
            return name + "-" + event + "." + field

    def get_all_economic_events_date_time(self):
        event_names = self.get_all_economic_events()
        columns = ['event-name', 'release-date-time-full']

        data_frame = pandas.DataFrame(data=numpy.zeros((0,len(columns))), columns=columns)

        for event in event_names:
            event_times = self.get_economic_event_date_time(event)

            for time in event_times:
                data_frame.append({'event-name':event, 'release-date-time-full':time}, ignore_index=True)

        return data_frame

    def get_all_economic_events(self):
        field_names = self._econ_data_frame.columns.values

        event_names = [x.split('.')[0] for x in field_names if '.Date' in x]

        event_names_filtered = [x for x in event_names if len(x) > 4]

        # sort list alphabetically (and remove any duplicates)
        return list(set(event_names_filtered))

    def get_economic_event_date(self, name, event = None):
        return self._econ_data_frame[
            self.create_event_desciptor_field(name, event, ".release-dt")]

    def get_economic_event_ret_over_custom_event_day(self, data_frame_in, name, event, start, end, lagged = False,
                                              NYC_cutoff = 10):

        # get the times of events
        event_dates = self.get_economic_event_date_time(name, event)

        return super(EventsFactory, self).get_economic_event_ret_over_custom_event_day(data_frame_in, event_dates, name, event, start, end,
                                                                                       lagged = lagged, NYC_cutoff = NYC_cutoff)

    def get_economic_event_vol_over_event_day(self, vol_in, name, event, start, end, realised = False):

        return self.get_economic_event_ret_over_custom_event_day(vol_in, name, event, start, end,
            lagged = realised)

        # return super(EventsFactory, self).get_economic_event_ret_over_event_day(vol_in, name, event, start, end, lagged = realised)

    def get_daily_moves_over_event(self):
        # TODO
        pass

    # return only US events etc. by dates
    def get_intraday_moves_over_event(self, data_frame_rets, cross, event_fx, event_name, start, end, vol, mins = 3 * 60,
                                      min_offset = 0, create_index = False, resample = False, freq = 'minutes'):

        ef_time_frame = self.get_economic_event_date_time_dataframe(event_fx, event_name)
        ef_time_frame = self.filter.filter_time_series_by_date(start, end, ef_time_frame)

        return self.get_intraday_moves_over_custom_event(data_frame_rets, ef_time_frame,
                                                         vol, mins = mins, min_offset = min_offset,
                                                         create_index = create_index, resample = resample, freq = freq)#, start, end)

    def get_surprise_against_intraday_moves_over_event(self, data_frame_cross_orig, cross, event_fx, event_name, start, end,
                                                       offset_list = [1, 5, 30, 60], add_surprise = False,
                                                       surprise_field = 'survey-average'):

        fields = ['actual-release', 'survey-median', 'survey-average', 'survey-high', 'survey-low']

        ef_time_frame = self.get_economic_event_date_time_fields(fields, event_fx, event_name)
        ef_time_frame = self.filter.filter_time_series_by_date(start, end, ef_time_frame)

        return self.get_surprise_against_intraday_moves_over_custom_event(data_frame_cross_orig, ef_time_frame, cross, event_fx, event_name, start, end,
                                                       offset_list = offset_list, add_surprise = add_surprise,
                                                       surprise_field = surprise_field)
Beispiel #5
0
class EventsFactory(EventStudy):
    """Provides methods to fetch data on economic data events and to perform basic event studies for market data around
    these events. Note, requires a file of input of the following (transposed as columns!) - we give an example for
    NFP released on 7 Feb 2003 (note, that release-date-time-full, need not be fully aligned by row).

    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.Date	                31/01/2003 00:00
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.close	                xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.actual-release	        143
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-median	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-average	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-high	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-low	            xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.survey-high.1	        xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.number-observations	xyz
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.first-revision	        185
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.first-revision-date	20030307
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.release-dt	            20030207
    USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.release-date-time-full	08/01/1999 13:30

    """

    # _econ_data_frame = None

    # where your HDF5 file is stored with economic data
    # TODO integrate with on the fly downloading!
    _hdf5_file_econ_file = MarketConstants().hdf5_file_econ_file
    _db_database_econ_file = MarketConstants().db_database_econ_file

    ### manual offset for certain events where Bloomberg/data vendor displays the wrong date (usually because of time differences)
    _offset_events = {
        'AUD-Australia Labor Force Employment Change SA.release-dt': 1
    }

    def __init__(self, df=None):
        super(EventStudy, self).__init__()

        self.config = ConfigManager()
        self.logger = LoggerManager().getLogger(__name__)
        self.filter = Filter()
        self.io_engine = IOEngine()

        if df is not None:
            self._econ_data_frame = df
        else:
            self.load_economic_events()

        return

    def load_economic_events(self):
        # self._econ_data_frame = self.io_engine.read_time_series_cache_from_disk(self._hdf5_file_econ_file)
        self._econ_data_frame = self.io_engine.read_time_series_cache_from_disk(
            self._db_database_econ_file,
            engine=MarketConstants().write_engine,
            db_server=MarketConstants().db_server,
            username=MarketConstants().db_username,
            password=MarketConstants().db_password)

    def harvest_category(self, category_name):
        cat = self.config.get_categories_from_tickers_selective_filter(
            category_name)

        for k in cat:
            md_request = self.market_data_generator.populate_md_request(k)
            data_frame = self.market_data_generator.fetch_market_data(
                md_request)

            # TODO allow merge of multiple sources

        return data_frame

    def get_economic_events(self):
        return self._econ_data_frame

    def dump_economic_events_csv(self, path):
        self._econ_data_frame.to_csv(path)

    def get_economic_event_date_time(self, name, event=None, csv=None):
        ticker = self.create_event_desciptor_field(name, event,
                                                   "release-date-time-full")

        if csv is None:
            data_frame = self._econ_data_frame[ticker]
            data_frame.index = self._econ_data_frame[ticker]
        else:
            dateparse = lambda x: datetime.datetime.strptime(
                x, '%d/%m/%Y %H:%M')

            data_frame = pandas.read_csv(csv,
                                         index_col=0,
                                         parse_dates=True,
                                         date_parser=dateparse)

        data_frame = data_frame[pandas.notnull(data_frame.index)]

        start_date = datetime.datetime.strptime("01-Jan-1971", "%d-%b-%Y")
        self.filter.filter_time_series_by_date(start_date, None, data_frame)

        return data_frame

    def get_economic_event_date_time_dataframe(self,
                                               name,
                                               event=None,
                                               csv=None):
        series = self.get_economic_event_date_time(name, event, csv)

        data_frame = pandas.DataFrame(series.values, index=series.index)
        data_frame.columns.name = self.create_event_desciptor_field(
            name, event, "release-date-time-full")

        return data_frame

    def get_economic_event_date_time_fields(self, fields, name, event=None):
        ### acceptible fields
        # observation-date <- observation time for the index
        # actual-release
        # survey-median
        # survey-average
        # survey-high
        # survey-low
        # survey-high
        # number-observations
        # release-dt
        # release-date-time-full
        # first-revision
        # first-revision-date

        ticker = []

        # construct tickers of the form USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA.actual-release
        for i in range(0, len(fields)):
            ticker.append(
                self.create_event_desciptor_field(name, event, fields[i]))

        # index on the release-dt field eg. 20101230 (we shall convert this later)
        ticker_index = self.create_event_desciptor_field(
            name, event, "release-dt")

        ######## grab event date/times
        event_date_time = self.get_economic_event_date_time(name, event)
        date_time_fore = event_date_time.index

        # create dates for join later
        date_time_dt = [
            datetime.datetime(date_time_fore[x].year, date_time_fore[x].month,
                              date_time_fore[x].day)
            for x in range(len(date_time_fore))
        ]

        event_date_time_frame = pandas.DataFrame(event_date_time.index,
                                                 date_time_dt)
        event_date_time_frame.index = date_time_dt

        ######## grab event date/fields
        self._econ_data_frame[
            name + ".observation-date"] = self._econ_data_frame.index
        data_frame = self._econ_data_frame[ticker]

        data_frame.index = self._econ_data_frame[ticker_index]

        data_frame = data_frame[data_frame.index !=
                                0]  # eliminate any 0 dates (artifact of Excel)
        data_frame = data_frame[pandas.notnull(
            data_frame.index)]  # eliminate any NaN dates (artifact of Excel)
        ind_dt = data_frame.index

        # convert yyyymmdd format to datetime
        data_frame.index = [
            datetime.datetime(
                int((ind_dt[x] - (ind_dt[x] % 10000)) / 10000),
                int(((ind_dt[x] % 10000) - (ind_dt[x] % 100)) / 100),
                int(ind_dt[x] % 100)) for x in range(len(ind_dt))
        ]

        # HACK! certain events need an offset because BBG have invalid dates
        if ticker_index in self._offset_events:
            data_frame.index = data_frame.index + timedelta(
                days=self._offset_events[ticker_index])

        ######## join together event dates/date-time/fields in one data frame
        data_frame = event_date_time_frame.join(data_frame, how='inner')
        data_frame.index = pandas.to_datetime(data_frame.index)
        data_frame.index.name = ticker_index

        return data_frame

    def create_event_desciptor_field(self, name, event, field):
        if event is None:
            return name + "." + field
        else:
            return name + "-" + event + "." + field

    def get_all_economic_events_date_time(self):
        event_names = self.get_all_economic_events()
        columns = ['event-name', 'release-date-time-full']

        data_frame = pandas.DataFrame(data=numpy.zeros((0, len(columns))),
                                      columns=columns)

        for event in event_names:
            event_times = self.get_economic_event_date_time(event)

            for time in event_times:
                data_frame.append(
                    {
                        'event-name': event,
                        'release-date-time-full': time
                    },
                    ignore_index=True)

        return data_frame

    def get_all_economic_events(self):
        field_names = self._econ_data_frame.columns.values

        event_names = [x.split('.')[0] for x in field_names if '.Date' in x]

        event_names_filtered = [x for x in event_names if len(x) > 4]

        # sort list alphabetically (and remove any duplicates)
        return list(set(event_names_filtered))

    def get_economic_event_date(self, name, event=None):
        return self._econ_data_frame[self.create_event_desciptor_field(
            name, event, ".release-dt")]

    def get_economic_event_ret_over_custom_event_day(self,
                                                     data_frame_in,
                                                     name,
                                                     event,
                                                     start,
                                                     end,
                                                     lagged=False,
                                                     NYC_cutoff=10):

        # get the times of events
        event_dates = self.get_economic_event_date_time(name, event)

        return super(EventsFactory,
                     self).get_economic_event_ret_over_custom_event_day(
                         data_frame_in,
                         event_dates,
                         name,
                         event,
                         start,
                         end,
                         lagged=lagged,
                         NYC_cutoff=NYC_cutoff)

    def get_economic_event_vol_over_event_day(self,
                                              vol_in,
                                              name,
                                              event,
                                              start,
                                              end,
                                              realised=False):

        return self.get_economic_event_ret_over_custom_event_day(
            vol_in, name, event, start, end, lagged=realised)

        # return super(EventsFactory, self).get_economic_event_ret_over_event_day(vol_in, name, event, start, end, lagged = realised)

    def get_daily_moves_over_event(self):
        # TODO
        pass

    # return only US events etc. by dates
    def get_intraday_moves_over_event(self,
                                      data_frame_rets,
                                      cross,
                                      event_fx,
                                      event_name,
                                      start,
                                      end,
                                      vol,
                                      mins=3 * 60,
                                      min_offset=0,
                                      create_index=False,
                                      resample=False,
                                      freq='minutes'):

        ef_time_frame = self.get_economic_event_date_time_dataframe(
            event_fx, event_name)
        ef_time_frame = self.filter.filter_time_series_by_date(
            start, end, ef_time_frame)

        return self.get_intraday_moves_over_custom_event(
            data_frame_rets,
            ef_time_frame,
            vol,
            mins=mins,
            min_offset=min_offset,
            create_index=create_index,
            resample=resample,
            freq=freq)  #, start, end)

    def get_surprise_against_intraday_moves_over_event(
            self,
            data_frame_cross_orig,
            cross,
            event_fx,
            event_name,
            start,
            end,
            offset_list=[1, 5, 30, 60],
            add_surprise=False,
            surprise_field='survey-average'):

        fields = [
            'actual-release', 'survey-median', 'survey-average', 'survey-high',
            'survey-low'
        ]

        ef_time_frame = self.get_economic_event_date_time_fields(
            fields, event_fx, event_name)
        ef_time_frame = self.filter.filter_time_series_by_date(
            start, end, ef_time_frame)

        return self.get_surprise_against_intraday_moves_over_custom_event(
            data_frame_cross_orig,
            ef_time_frame,
            cross,
            event_fx,
            event_name,
            start,
            end,
            offset_list=offset_list,
            add_surprise=add_surprise,
            surprise_field=surprise_field)
Beispiel #6
0
    def get_intraday_moves_over_custom_event(self, data_frame_rets, ef_time_frame, vol=False,
                                             minute_start = 5, mins = 3 * 60, min_offset = 0 , create_index = False,
                                             resample = False, freq = 'minutes'):

        filter = Filter()

        ef_time_frame = filter.filter_time_series_by_date(data_frame_rets.index[0], data_frame_rets.index[-1], ef_time_frame)
        ef_time = ef_time_frame.index

        if freq == 'minutes':
            ef_time_start = ef_time - timedelta(minutes = minute_start)
            ef_time_end = ef_time + timedelta(minutes = mins)
            ann_factor = 252 * 1440
        elif freq == 'days':
            ef_time = ef_time_frame.index.normalize()
            ef_time_start = ef_time - timedelta(days = minute_start)
            ef_time_end = ef_time + timedelta(days = mins)
            ann_factor = 252

        ords = range(-minute_start + min_offset, mins + min_offset)

        # all data needs to be equally spaced
        if resample:

            # make sure time series is properly sampled at 1 min intervals
            data_frame_rets = data_frame_rets.resample('1min')
            data_frame_rets = data_frame_rets.fillna(value = 0)
            data_frame_rets = filter.remove_out_FX_out_of_hours(data_frame_rets)

        data_frame_rets['Ind'] = numpy.nan

        start_index = data_frame_rets.index.searchsorted(ef_time_start)
        finish_index = data_frame_rets.index.searchsorted(ef_time_end)

        # not all observation windows will be same length (eg. last one?)

        # fill the indices which represent minutes
        # TODO vectorise this!
        for i in range(0, len(ef_time_frame.index)):
            try:
                data_frame_rets.ix[start_index[i]:finish_index[i], 'Ind'] = ords
            except:
                data_frame_rets.ix[start_index[i]:finish_index[i], 'Ind'] = ords[0:(finish_index[i] - start_index[i])]

        # set the release dates
        data_frame_rets.ix[start_index,'Rel'] = ef_time                                         # set entry points
        data_frame_rets.ix[finish_index + 1,'Rel'] = numpy.zeros(len(start_index))              # set exit points
        data_frame_rets['Rel'] = data_frame_rets['Rel'].fillna(method = 'pad')                  # fill down signals

        data_frame_rets = data_frame_rets[pandas.notnull(data_frame_rets['Ind'])]               # get rid of other

        data_frame = data_frame_rets.pivot(index='Ind',
                                           columns='Rel', values=data_frame_rets.columns[0])

        data_frame.index.names = [None]

        if create_index:
            calculations = Calculations()
            data_frame.ix[-minute_start + min_offset,:] = numpy.nan
            data_frame = calculations.create_mult_index(data_frame)
        else:
            if vol is True:
                # annualise (if vol)
                data_frame = data_frame.rolling(center=False,window=5).std() * math.sqrt(ann_factor)
            else:
                data_frame = data_frame.cumsum()

        return data_frame
Beispiel #7
0
    def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df):
        """
        compare_strategy_vs_benchmark - Compares the trading strategy we are backtesting against a benchmark

        Parameters
        ----------
        br : BacktestRequest
            Parameters for backtest such as start and finish dates

        strategy_df : pandas.DataFrame
            Strategy time series

        benchmark_df : pandas.DataFrame
            Benchmark time series
        """

        include_benchmark = False
        calc_stats = False

        if hasattr(br, 'include_benchmark'):
            include_benchmark = br.include_benchmark
        if hasattr(br, 'calc_stats'): calc_stats = br.calc_stats

        if include_benchmark:
            ret_stats = RetStats()
            risk_engine = RiskEngine()
            filter = Filter()
            calculations = Calculations()

            # align strategy time series with that of benchmark
            strategy_df, benchmark_df = strategy_df.align(benchmark_df,
                                                          join='left',
                                                          axis=0)

            # if necessary apply vol target to benchmark (to make it comparable with strategy)
            if hasattr(br, 'portfolio_vol_adjust'):
                if br.portfolio_vol_adjust is True:
                    benchmark_df = risk_engine.calculate_vol_adjusted_index_from_prices(
                        benchmark_df, br=br)

            # only calculate return statistics if this has been specified (note when different frequencies of data
            # might underrepresent vol
            # if calc_stats:
            benchmark_df = benchmark_df.fillna(method='ffill')
            ret_stats.calculate_ret_stats_from_prices(benchmark_df,
                                                      br.ann_factor)

            if calc_stats:
                benchmark_df.columns = ret_stats.summary()

            # realign strategy & benchmark
            strategy_benchmark_df = strategy_df.join(benchmark_df, how='inner')
            strategy_benchmark_df = strategy_benchmark_df.fillna(
                method='ffill')

            strategy_benchmark_df = filter.filter_time_series_by_date(
                br.plot_start, br.finish_date, strategy_benchmark_df)
            strategy_benchmark_df = calculations.create_mult_index_from_prices(
                strategy_benchmark_df)

            self._benchmark_pnl = benchmark_df
            self._benchmark_ret_stats = ret_stats

            return strategy_benchmark_df

        return strategy_df
Beispiel #8
0
    def get_intraday_moves_over_custom_event(self,
                                             data_frame_rets,
                                             ef_time_frame,
                                             vol=False,
                                             minute_start=5,
                                             mins=3 * 60,
                                             min_offset=0,
                                             create_index=False,
                                             resample=False,
                                             freq='minutes',
                                             cumsum=True,
                                             adj_cumsum_zero_point=False,
                                             adj_zero_point=2):
        filter = Filter()

        ef_time_frame = filter.filter_time_series_by_date(
            data_frame_rets.index[0], data_frame_rets.index[-1], ef_time_frame)
        ef_time = ef_time_frame.index

        if freq == 'minutes':
            ef_time_start = ef_time - timedelta(minutes=minute_start)
            ef_time_end = ef_time + timedelta(minutes=mins)
            ann_factor = 252 * 1440
        elif freq == 'days':
            ef_time = ef_time_frame.index.normalize()
            ef_time_start = ef_time - pandas.tseries.offsets.BusinessDay(
            ) * minute_start
            ef_time_end = ef_time + pandas.tseries.offsets.BusinessDay() * mins
            ann_factor = 252
        elif freq == 'weeks':
            ef_time = ef_time_frame.index.normalize()
            ef_time_start = ef_time - pandas.tseries.offsets.Week(
            ) * minute_start
            ef_time_end = ef_time + pandas.tseries.offsets.Week() * mins
            ann_factor = 52

        ords = list(range(-minute_start + min_offset, mins + min_offset))
        lst_ords = list(ords)

        # All data needs to be equally spaced
        if resample:
            # Make sure time series is properly sampled at 1 min intervals
            if freq == 'minutes':
                data_frame_rets = data_frame_rets.resample('1min').last()
                data_frame_rets = data_frame_rets.fillna(value=0)
                data_frame_rets = filter.remove_out_FX_out_of_hours(
                    data_frame_rets)
            elif freq == 'daily':
                data_frame_rets = data_frame_rets.resample('B').last()
                data_frame_rets = data_frame_rets.fillna(value=0)
            elif freq == 'weekly':
                data_frame_rets = data_frame_rets.resample('W').last()
                data_frame_rets = data_frame_rets.fillna(value=0)

        start_index = data_frame_rets.index.searchsorted(ef_time_start)
        finish_index = data_frame_rets.index.searchsorted(ef_time_end)

        data_frame = pandas.DataFrame(index=ords, columns=ef_time_frame.index)

        for i in range(0, len(ef_time_frame.index)):

            vals = data_frame_rets.iloc[start_index[i]:finish_index[i]].values

            st = ef_time_start[i]
            en = ef_time_end[i]

            # Add extra "future" history in case we are doing an event study which goes outside our data window
            # (will just be filled with NaN)
            if len(vals) < len(lst_ords):
                extend = np.zeros((len(lst_ords) - len(vals), 1)) * np.nan

                # If start window date is before we have data
                if st < data_frame_rets.index[0]:
                    vals = np.append(extend, vals)

                # If end date window is after we have data
                else:
                    vals = np.append(vals, extend)

            data_frame[ef_time_frame.index[i]] = vals

        data_frame.index.names = [None]

        if create_index:
            calculations = Calculations()
            data_frame.iloc[-minute_start + min_offset] = numpy.nan
            data_frame = calculations.create_mult_index(data_frame)
        else:
            if vol is True:
                # Annualise (if vol)
                data_frame = data_frame.rolling(
                    center=False, window=5).std() * math.sqrt(ann_factor)
            elif cumsum:

                data_frame = data_frame.cumsum()

                # Adjust DataFrame so zero point shows zero returns
                if adj_cumsum_zero_point:
                    ind = abs(minute_start) - adj_zero_point

                    for i, c in enumerate(data_frame.columns):
                        data_frame[
                            c] = data_frame[c] - data_frame[c].values[ind]

        return data_frame