Ejemplo n.º 1
0
    def fetch_group_time_series(self, market_data_request_list):

        logger = LoggerManager().getLogger(__name__)

        data_frame_agg = None

        thread_no = constants.market_thread_no['other']

        if market_data_request_list[
                0].data_source in constants.market_thread_no:
            thread_no = constants.market_thread_no[
                market_data_request_list[0].data_source]

        if thread_no > 0:
            pool = SwimPool().create_pool(
                thread_technique=constants.market_thread_technique,
                thread_no=thread_no)

            # Open the market data downloads in their own threads and return the results
            result = pool.map_async(self.fetch_single_time_series,
                                    market_data_request_list)
            data_frame_group = result.get()

            pool.close()
            pool.join()
        else:
            data_frame_group = []

            for md_request in market_data_request_list:
                data_frame_group.append(
                    self.fetch_single_time_series(md_request))

        # Collect together all the time series
        if data_frame_group is not None:
            data_frame_group = [i for i in data_frame_group if i is not None]

            # import itertools
            # columns = list(itertools.chain.from_iterable([i.columns for i in data_frame_group if i is not None]))

            # For debugging!
            # import pickle
            # import datetime
            # pickle.dump(data_frame_group, open(str(datetime.datetime.now()).replace(':', '-').replace(' ', '-').replace(".", "-") + ".p", "wb"))

            if data_frame_group is not None:
                try:
                    data_frame_agg = self.calculations.join(data_frame_group,
                                                            how='outer')

                    # Force ordering to be the same!
                    # data_frame_agg = data_frame_agg[columns]
                except Exception as e:
                    logger.warning(
                        'Possible overlap of columns? Have you specifed same ticker several times: '
                        + str(e))

        return data_frame_agg
Ejemplo n.º 2
0
    def fetch_group_time_series(self, market_data_request_list):

        logger = LoggerManager().getLogger(__name__)

        df_agg = None

        thread_no = constants.market_thread_no["other"]

        if market_data_request_list[
            0].data_source in constants.market_thread_no:
            thread_no = constants.market_thread_no[
                market_data_request_list[0].data_source]

        if thread_no > 0:
            pool = SwimPool().create_pool(
                thread_technique=constants.market_thread_technique,
                thread_no=thread_no)

            # Open the market data downloads in their own threads and return 
            # the results
            result = pool.map_async(self.fetch_single_time_series,
                                    market_data_request_list)
            df_group = result.get()

            pool.close()
            pool.join()
        else:
            df_group = []

            for md_request in market_data_request_list:
                df_group.append(
                    self.fetch_single_time_series(md_request))

        # Collect together all the time series
        if df_group is not None:
            df_group = [i for i in df_group if i is not None]

            if df_group is not None:
                try:
                    df_agg = self._calculations.join(df_group,
                                                             how="outer")

                    # Force ordering to be the same!
                    # df_agg = df_agg[columns]
                except Exception as e:
                    logger.warning(
                        "Possible overlap of columns? Have you specifed same "
                        "ticker several times: " + str(e))

        return df_agg
Ejemplo n.º 3
0
    def fetch_group_time_series(self, market_data_request_list):

        data_frame_agg = None

        thread_no = DataConstants().market_thread_no['other']

        if market_data_request_list[0].data_source in DataConstants(
        ).market_thread_no:
            thread_no = DataConstants().market_thread_no[
                market_data_request_list[0].data_source]

        if thread_no > 0:
            pool = SwimPool().create_pool(
                thread_technique=DataConstants().market_thread_technique,
                thread_no=thread_no)

            # open the market data downloads in their own threads and return the results
            result = pool.map_async(self.fetch_single_time_series,
                                    market_data_request_list)
            data_frame_group = result.get()

            pool.close()
            pool.join()
        else:
            data_frame_group = []

            for md_request in market_data_request_list:
                data_frame_group.append(
                    self.fetch_single_time_series(md_request))

        # collect together all the time series
        if data_frame_group is not None:
            data_frame_group = [i for i in data_frame_group if i is not None]

            # for debugging!
            # import pickle
            # import datetime
            # pickle.dump(data_frame_group, open(str(datetime.datetime.now()).replace(':', '-').replace(' ', '-').replace(".", "-") + ".p", "wb"))

            if data_frame_group is not None:
                try:
                    data_frame_agg = self.calculations.pandas_outer_join(
                        data_frame_group)
                except Exception as e:
                    self.logger.warning(
                        'Possible overlap of columns? Have you specifed same ticker several times: '
                        + str(e))

        return data_frame_agg
Ejemplo n.º 4
0
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#

# TODO: Needs fixing, given change in APIs

if __name__ == '__main__':
    ###### below line CRUCIAL when running Windows, otherwise multiprocessing doesn't work! (not necessary on Linux)
    from findatapy.util import SwimPool

    SwimPool()

    from findatapy.market import Market, MarketDataRequest, MarketDataGenerator

    market = Market(market_data_generator=MarketDataGenerator())

    # Choose run_example   (0 = all examples)
    # Example 1: bitcoincharts
    # Example 2: poloniex (needs to be fixed)
    # Example 3: binance
    # Example 4: bitfinex
    # Example 5: gdax
    # Example 6: kraken
    #
    # Check findatapy/conf/time_series_tickers_list.csv for all possible tickers
    # Note we use XBT instead of BTC.  Same for XET (ETH) and XLC (LTC).
Ejemplo n.º 5
0
#
# Copyright 2016-2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#


if __name__ == '__main__':
    ###### below line CRUCIAL when running Windows, otherwise multiprocessing doesn't work! (not necessary on Linux)
    from findatapy.util import SwimPool; SwimPool()

    from findatapy.market import Market, MarketDataRequest, MarketDataGenerator

    market = Market(market_data_generator=MarketDataGenerator())

    # Get the first release for GDP and also print the release date of that
    md_request = MarketDataRequest(
        start_date="01 Jun 2000",                                                      # start date (download data over past decade)
        data_source='alfred',                                                          # use ALFRED/FRED as data source
        tickers=['US GDP'],                                                            # ticker
        fields=['actual-release', 'release-date-time-full'],                           # which fields to download
        vendor_tickers=['GDP'],                                                        # ticker (FRED)
        vendor_fields=['actual-release', 'release-date-time-full'])                    # which FRED fields to download

    df = market.fetch_market(md_request)