コード例 #1
0
    def test_forward_happypath_esi(self):
        """test a forward-split: ESI"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_esi_data1 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            config=ROOT_CONFIG
        )
        raw_esi_data2 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            config=ROOT_CONFIG
        )
        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            fetch_source=api_utils.SwitchCCPSource.ESI,
            config=ROOT_CONFIG
        )
        #split_data.to_csv('split_data_esi.csv', index=False)

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_esi_data1 = prep_raw_data(
            raw_esi_data1.copy(),
            min_split_date
        )
        raw_esi_data2 = prep_raw_data(
            raw_esi_data2.copy(),
            min_split_date
        )

        pre_split_data = split_data[split_data.date <= split_obj.date_str].reset_index()
        pre_raw_data = raw_esi_data2[raw_esi_data2.date <= split_obj.date_str].reset_index()
        post_split_data = split_data[split_data.date > split_obj.date_str].reset_index()
        post_raw_data = raw_esi_data1[raw_esi_data1.date > split_obj.date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(
            post_raw_data,
            post_split_data
        )

        validate_split_data(
            pre_raw_data,
            pre_split_data,
            split_obj
        )
コード例 #2
0
def test_fetch_market_history(config=CONFIG):
    """test `fetch_market_history` utility"""

    data = crest_utils.fetch_market_history(config.get('TEST', 'region_id'),
                                            config.get('TEST', 'type_id'),
                                            config=ROOT_CONFIG)

    assert isinstance(data, pd.DataFrame)
    expected_cols = [
        'date', 'avgPrice', 'highPrice', 'lowPrice', 'volume', 'orders'
        #extra keys:
        #'volume_str',
        #'orderCountStr'
    ]
    for key in expected_cols:
        assert key in data.columns.values

    ohlc = crest_utils.data_to_ohlc(data)

    assert ohlc['date'].equals(data['date'])
    assert ohlc['open'].equals(data['avgPrice'])
    assert ohlc['high'].equals(data['highPrice'])
    assert ohlc['low'].equals(data['lowPrice'])
    assert ohlc['volume'].equals(data['volume'])

    assert data['avgPrice'].shift(1).equals(ohlc['close'])
コード例 #3
0
def fetch_esi(
        type_id,
        region_id,
        data_range=400,
        logger=logging.getLogger(PROGNAME)
):
    """fetch data from ESI endpoint

    Args:
        type_id (int): EVE Online type_id
        region_id (int): EVE Online region_id
        data_range (int, optional): days of back-propogation
        logger (:obj:`logging.logger`) logging handle

    Returns:
        pandas.DataFrame: data from endpoint

    """
    logger.info('--Fetching price history: ESI')
    if data_range > CREST_MAX:
        warning_msg = 'ESI only returns %d days' % CREST_MAX
        warnings.warn(warning_msg, UserWarning)
        logger.warning(warning_msg)

    data = crest_utils.fetch_market_history(
        region_id,
        type_id,
        config=CONFIG,
        logger=logger
    )

    return data.tail(n=data_range)
コード例 #4
0
def fetch_esi(type_id, region_id, data_range=400, logger=LOGGER):
    """fetch data from ESI endpoint

    Args:
        type_id (int): EVE Online type_id
        region_id (int): EVE Online region_id
        data_range (int, optional): days of back-propogation
        logger (:obj:`logging.logger`, optional) logging handle

    Returns:
        (:obj:`pandas.DataFrame`): data from endpoint

    """
    logger.info('--Fetching price history: ESI')
    if data_range > CREST_MAX:
        warning_msg = 'ESI only returns %d days' % CREST_MAX
        warnings.warn(warning_msg, UserWarning)
        logger.warning(warning_msg)

    data = crest_utils.fetch_market_history(region_id,
                                            type_id,
                                            mode=api_utils.SwitchCCPSource.ESI,
                                            config=CONFIG,
                                            logger=logger)

    return data.tail(n=data_range)
コード例 #5
0
def test_fetch_market_history_esi(config=CONFIG):
    """test `fetch_market_history` utility"""

    data = crest_utils.fetch_market_history(
        config.get('TEST', 'region_id'),
        config.get('TEST', 'type_id'),
        config=ROOT_CONFIG,
    )

    assert isinstance(data, pd.DataFrame)
    expected_cols = [
        'date',
        'avgPrice',
        'highPrice',
        'lowPrice',
        'volume',
        'orders'
        #extra keys:
        #'volume_str',
        #'orderCountStr'
    ]
    for key in expected_cols:
        assert key in data.columns.values

    ohlc = crest_utils.data_to_ohlc(data)

    assert ohlc['date'].equals(data['date'])
    assert ohlc['open'].equals(data['avgPrice'])
    assert ohlc['high'].equals(data['highPrice'])
    assert ohlc['low'].equals(data['lowPrice'])
    assert ohlc['volume'].equals(data['volume'])

    assert data['avgPrice'].shift(1).equals(ohlc['close'])
コード例 #6
0
    def test_forward_happypath_crest(self):
        """test a forward-split: crest"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_crest_data1 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            mode=api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        raw_crest_data2 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            mode=api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        #split_data.to_csv('split_data_crest.csv', index=False)

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_crest_data1 = prep_raw_data(raw_crest_data1.copy(), min_split_date)
        raw_crest_data2 = prep_raw_data(raw_crest_data2.copy(), min_split_date)

        split_date_str = datetime.strftime(split_obj.split_date,
                                           '%Y-%m-%dT%H:%M:%S')
        pre_split_data = split_data[
            split_data.date <= split_date_str].reset_index()
        pre_raw_data = raw_crest_data2[
            raw_crest_data2.date <= split_date_str].reset_index()
        post_split_data = split_data[
            split_data.date > split_date_str].reset_index()
        post_raw_data = raw_crest_data1[
            raw_crest_data1.date > split_date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(post_raw_data, post_split_data)

        validate_split_data(pre_raw_data, pre_split_data, split_obj)
コード例 #7
0
 def test_future_split_crest(self):
     """validate with CREST source"""
     test_data_crest = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.CREST,
         config=ROOT_CONFIG)
     assert test_data_crest.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             mode=api_utils.SwitchCCPSource.CREST,
             config=ROOT_CONFIG))
コード例 #8
0
 def test_future_split_esi(self):
     """validate on ESI"""
     test_data_esi = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.ESI,
         config=ROOT_CONFIG)
     assert test_data_esi.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             mode=api_utils.SwitchCCPSource.ESI,
             config=ROOT_CONFIG))
コード例 #9
0
 def test_future_split_esi(self):
     """validate on ESI"""
     test_data_esi = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.ESI,
         config=ROOT_CONFIG
     )
     assert test_data_esi.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             config=ROOT_CONFIG
         )
     )
コード例 #10
0
def fetch_split_history(
        region_id,
        type_id,
        fetch_source,
        data_range=400,
        #split_cache_file=SPLIT_CACHE_FILE,
        config=api_config.CONFIG,
        logger=api_config.LOGGER
):
    """for split items, fetch and stitch the data together

    Args:
        region_id (int): EVE Online region_id
        type_id (int): EVE Online type_id
        fetch_source (:enum:`api_config.SwitchCCPSource`): which endpoint to fetch
        data_range (int, optional): how much total data to fetch
        config (:obj:`configparser.ConfigParser`, optional): config overrides
        logger (:obj:`logging.logger`, optional): logging handle

    Returns:
        (:obj:`pandas.DataFrame`) data from endpoint

    """
    ## Figure out if there's work to do ##
    if type_id not in api_config.SPLIT_INFO:
        raise exceptions.NoSplitConfigFound(
            'No config set for {0}'.format(type_id)
        )

    split_obj = api_config.SPLIT_INFO[type_id]
    fetch_id = split_obj.current_typeid()

    logger.debug(split_obj.__dict__)
    logger.info(
        'fetching data from remote {0} (was {1})'.\
        format(type_id, fetch_id)
    )
    ## Get current market data ##
    if fetch_source == api_config.SwitchCCPSource.EMD:
        logger.info('--EMD fetch')
        current_data = forecast_utils.fetch_market_history_emd(
            region_id,
            fetch_id,
            data_range=data_range,
            config=config,
            #logger=logger
        )
        current_data = forecast_utils.parse_emd_data(current_data['result'])
    else:
        logger.info('--CCP fetch')
        current_data = crest_utils.fetch_market_history(
            region_id,
            fetch_id,
            mode=fetch_source,
            config=config,
            logger=logger
        )

    ## Early exit: split too old or hasn't happened yet ##
    min_date = datetime_helper(current_data['date'].min())
    if min_date > split_obj.split_date or not bool(split_obj):
        #split is too old OR split hasn't happened yet
        logger.info('No split work -- Returning current pull')
        return current_data

    ## Fetch split data ##
    logger.info(
        '--fetching data from cache {0}@{1}'.format(
            split_obj.original_id, region_id
    ))
    split_data = fetch_split_cache_data(
        region_id,
        split_obj.original_id,
        split_date=split_obj.date_str
    )

    if type_id == split_obj.new_id: #adjust the back history
        logger.info('--splitting old-data')
        split_data = execute_split(
            split_data,
            split_obj
        )
    # vv FIX ME vv: Testable? #
    elif type_id == split_obj.original_id: #adjust the current data

        logger.info('--splitting new-data')
        current_data = execute_split(
            current_data,
            split_obj
        )
    # ^^ FIX ME ^^ #
    else:   #pragma: no cover
        logger.error(
            'Unable to map new/old type_ids correctly' +
            '\n\ttype_id={0}'.format(type_id) +
            '\n\toriginal_id={0} new_id={1}'.format(split_obj.original_id, split_obj.new_id),
            exc_info=True
        )
        raise exceptions.MissmatchedTypeIDs(
            status=500,
            message='unable to map types to splitcache function'
        )

    logger.info('--combining data')
    current_data.to_csv('current_data.csv', index=False)
    split_data.to_csv('split_data.csv', index=False)
    combined_data = combine_split_history(
        current_data.copy(),    #pass by value, not by reference
        split_data.copy()
    )

    if fetch_source == api_config.SwitchCCPSource.CREST:
        logger.info('--Setting CREST datetime')
        combined_data['date'] = pd.to_datetime(combined_data['date']).\
            dt.strftime('%Y-%m-%dT%H:%M:%S')
    return combined_data
コード例 #11
0
def fetch_extended_history(region_id,
                           type_id,
                           min_data=MIN_DATA,
                           crest_range=CREST_RANGE,
                           config=api_config.CONFIG,
                           data_range=DEFAULT_RANGE,
                           logger=LOGGER):
    """fetch data from database

    Args:
        region_id (int): EVE Online regionID: https://crest-tq.eveonline.com/regions/
        type_id (int): EVE Online typeID: https://crest-tq.eveonline.com/types/
        cache_buster (bool, optional): skip cache, fetch new data
        data_range (int, optional): how far back to fetch data
        logger (:obj:`logging.logger`): logging handle

    Returns:
        (:obj:`pandas.data_frame`): collection of data from database
            ['date', 'avgPrice', 'highPrice', 'lowPrice', 'volume', 'orders']
    """
    logger.info('--fetching history data')
    try:
        raw_data = fetch_market_history_emd(region_id,
                                            type_id,
                                            data_range,
                                            config=config)
        logger.debug(raw_data['result'][:5])
        data = parse_emd_data(raw_data['result'])
    except Exception as err_msg:  #pragma: no cover
        logger.warning('ERROR: trouble getting data from EMD' +
                       '\n\tregion_id={0}'.format(region_id) +
                       '\n\ttype_id={0}'.format(type_id) +
                       '\n\tdata_range={0}'.format(data_range),
                       exc_info=True)
        data = []

    if len(data) < crest_range:  #pragma: no cover
        logger.info('--Not enough data found, fetching CREST data')

        try:
            data = crest_utils.fetch_market_history(region_id,
                                                    type_id,
                                                    config=config,
                                                    logger=logger)
        except Exception as err_msg:  #pragma: no cover
            logger.error('ERROR: trouble getting data from CREST' +
                         '\n\tregion_id={0}'.format(region_id) +
                         '\n\ttype_id={0}'.format(type_id),
                         exc_info=True)
            raise exceptions.EMDBadMarketData(
                status=500, message='Unable to fetch historical data')

    if len(data) < min_data:  #pragma: no cover
        logger.warning('Not enough data to seed prediction' +
                       '\n\tregion_id={0}'.format(region_id) +
                       '\n\ttype_id={0}'.format(type_id) +
                       '\n\tlen(data)={0}'.format(len(data)))
        raise exceptions.ProphetNotEnoughData(
            status=500, message='Not enough data to build a prediction')

    return data
コード例 #12
0
def fetch_extended_history(
        region_id,
        type_id,
        mode=api_config.SwitchCCPSource.ESI,
        min_data=MIN_DATA,
        crest_range=CREST_RANGE,
        config=api_config.CONFIG,
        data_range=DEFAULT_RANGE,
        logger=logging.getLogger('publicAPI')
):
    """fetch data from database

    Args:
        region_id (int): EVE Online regionID: https://crest-tq.eveonline.com/regions/
        type_id (int): EVE Online typeID: https://crest-tq.eveonline.com/types/
        cache_buster (bool): skip cache, fetch new data
        data_range (int): how far back to fetch data
        logger (:obj:`logging.logger`): logging handle

    Returns:
        pandas.DataFrame: collection of data from database
            ['date', 'avgPrice', 'highPrice', 'lowPrice', 'volume', 'orders']
    """
    logger.info('--fetching history data')
    try:
        raw_data = fetch_market_history_emd(
            region_id,
            type_id,
            data_range,
            config=config
        )
        logger.debug(raw_data['result'][:5])
        data = parse_emd_data(raw_data['result'])
    except Exception as err_msg:    #pragma: no cover
        logger.warning(
            'ERROR: trouble getting data from EMD' +
            '\n\tregion_id={0}'.format(region_id) +
            '\n\ttype_id={0}'.format(type_id) +
            '\n\tdata_range={0}'.format(data_range),
            exc_info=True
        )
        data = []

    if len(data) < crest_range: #pragma: no cover
        logger.info('--Not enough data found, fetching CREST data')

        try:
            data = crest_utils.fetch_market_history(
                region_id,
                type_id,
                config=config,
                logger=logger
            )
        except Exception as err_msg:    #pragma: no cover
            logger.error(
                'ERROR: trouble getting data from CREST' +
                '\n\tregion_id={0}'.format(region_id) +
                '\n\ttype_id={0}'.format(type_id),
                exc_info=True
            )
            raise exceptions.EMDBadMarketData(
                status=500,
                message='Unable to fetch historical data'
            )

    if len(data) < min_data:    #pragma: no cover
        logger.warning(
            'Not enough data to seed prediction' +
            '\n\tregion_id={0}'.format(region_id) +
            '\n\ttype_id={0}'.format(type_id) +
            '\n\tlen(data)={0}'.format(len(data))
        )
        raise exceptions.ProphetNotEnoughData(
            status=500,
            message='Not enough data to build a prediction'
        )

    return data
コード例 #13
0
ファイル: split_utils.py プロジェクト: EVEprosper/ProsperAPI
def fetch_split_history(
        region_id,
        type_id,
        fetch_source=api_config.SwitchCCPSource.EMD,
        data_range=400,
        config=api_config.CONFIG,
        logger=logging.getLogger('publicAPI')
):
    """for split items, fetch and stitch the data together

    Args:
        region_id (int): EVE Online region_id
        type_id (int): EVE Online type_id
        fetch_source (:enum:`api_config.SwitchCCPSource`): which endpoint to fetch
        data_range (int): how much total data to fetch
        config (:obj:`configparser.ConfigParser`): config overrides
        logger (:obj:`logging.logger`): logging handle

    Returns:
        pandas.DataFrame: data from endpoint

    """
    ## Figure out if there's work to do ##
    if type_id not in api_config.SPLIT_INFO:
        raise exceptions.NoSplitConfigFound(
            'No config set for {0}'.format(type_id)
        )

    split_obj = api_config.SPLIT_INFO[type_id]
    fetch_id = split_obj.current_typeid()

    logger.debug(split_obj.__dict__)
    logger.info(
        'fetching data from remote %s (was %s)',
        type_id, fetch_id
    )
    ## Get current market data ##
    if fetch_source == api_config.SwitchCCPSource.EMD:
        logger.info('--EMD fetch')
        current_data = forecast_utils.fetch_market_history_emd(
            region_id,
            fetch_id,
            data_range=data_range,
            config=config,
        )
        current_data = forecast_utils.parse_emd_data(current_data['result'])
    else:
        logger.info('--CCP fetch')
        current_data = crest_utils.fetch_market_history(
            region_id,
            fetch_id,
            config=config,
            logger=logger
        )

    ## Early exit: split too old or hasn't happened yet ##
    min_date = datetime_helper(current_data['date'].min())
    if min_date > split_obj.split_date or not bool(split_obj):
        #split is too old OR split hasn't happened yet
        logger.info('No split work -- Returning current pull')
        return current_data

    ## Fetch split data ##
    logger.info(
        '--fetching data from cache %s@%s',
        split_obj.original_id, region_id
    )
    split_data = fetch_split_cache_data(
        region_id,
        split_obj.original_id,
        split_date=split_obj.date_str
    )

    if type_id == split_obj.new_id:  # adjust the back history
        logger.info('--splitting old-data')
        split_data = execute_split(
            split_data,
            split_obj
        )
    # vv FIX ME vv: Testable? #
    elif type_id == split_obj.original_id:  # adjust the current data

        logger.info('--splitting new-data')
        current_data = execute_split(
            current_data,
            split_obj
        )
    # ^^ FIX ME ^^ #
    else:  # pragma: no cover
        logger.error(
            'Unable to map new/old type_ids correctly' +
            '\n\ttype_id={0}'.format(type_id) +
            '\n\toriginal_id={0} new_id={1}'.format(split_obj.original_id, split_obj.new_id),
            exc_info=True
        )
        raise exceptions.MissmatchedTypeIDs(
            status=500,
            message='unable to map types to splitcache function'
        )

    logger.info('--combining data')
    current_data.to_csv('current_data.csv', index=False)
    split_data.to_csv('split_data.csv', index=False)
    combined_data = combine_split_history(
        current_data.copy(),  # pass by value, not by reference
        split_data.copy()
    )

    return combined_data
コード例 #14
0
    def get(self, return_type):
        """GET data from CREST and send out OHLC info"""
        args = self.reqparse.parse_args()
        #TODO: info archive
        LOGGER.info('OHLC {0} Request: {1}'.format(return_type, args))

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405

        mode = api_config.SwitchCCPSource(
            api_config.CONFIG.get('GLOBAL', 'crest_or_esi'))
        ## Validate inputs ##
        try:
            crest_utils.validate_id('map_regions',
                                    args.get('regionID'),
                                    mode=mode,
                                    config=api_config.CONFIG,
                                    logger=LOGGER)
            crest_utils.validate_id('inventory_types',
                                    args.get('typeID'),
                                    mode=mode,
                                    config=api_config.CONFIG,
                                    logger=LOGGER)
        except exceptions.ValidatorException as err:
            LOGGER.warning('ERROR: unable to validate type/region ids' +
                           '\n\targs={0}'.format(args),
                           exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            LOGGER.error('ERROR: unable to validate type/region ids' +
                         'args={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## Fetch CREST ##
        try:
            #LOGGER.info(api_config.SPLIT_INFO)
            if args.get('typeID') in api_config.SPLIT_INFO:
                LOGGER.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    mode,
                    config=api_config.CONFIG,
                    logger=LOGGER)
            else:
                data = crest_utils.fetch_market_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    mode=mode,
                    logger=LOGGER)
            data = crest_utils.data_to_ohlc(data)
        except exceptions.ValidatorException as err:  #pragma: no cover
            LOGGER.error('ERROR: unable to parse CREST data' +
                         '\n\targs={0}'.format(args),
                         exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            #except Exception as err:    #pragma: no cover
            LOGGER.error('ERROR: unhandled issue in parsing CREST data' +
                         'args={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## Format output ##
        if return_type == AcceptedDataFormat.JSON.value:
            LOGGER.info('rolling json response')
            data_str = data.to_json(path_or_buf=None, orient='records')
            message = json.loads(data_str)
        elif return_type == AcceptedDataFormat.CSV.value:
            LOGGER.info('rolling csv response')
            data_str = data.to_csv(
                path_or_buf=None,
                header=True,
                index=False,
                columns=['date', 'open', 'high', 'low', 'close', 'volume'])
            message = output_csv(data_str, 200)
        else:  #pragma: no cover
            #TODO: CUT?
            LOGGER.error('invalid format requested' +
                         '\n\targs={0}'.format(args) +
                         '\n\treturn_type={0}'.format(return_type),
                         exc_info=True)
            return 'UNSUPPORTED FORMAT', 500

        return message
コード例 #15
0
    def get(self, return_type):
        """GET data from CREST and send out OHLC info"""
        args = self.reqparse.parse_args()
        #TODO: info archive
        self.logger.info('OHLC %s Request: %s', return_type, args)

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405
        ## Validate inputs ##
        try:
            crest_utils.validate_id(
                'map_regions',
                args.get('regionID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
            crest_utils.validate_id(
                'inventory_types',
                args.get('typeID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
        except exceptions.ValidatorException as err:
            self.logger.warning(
                'ERROR: unable to validate type/region ids' +
                '\n\targs={0}'.format(args),
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            self.logger.error(
                'ERROR: unable to validate type/region ids' +
                'args={0}'.format(args),
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## Fetch CREST ##
        try:
            #LOGGER.info(api_config.SPLIT_INFO)
            if args.get('typeID') in api_config.SPLIT_INFO:
                self.logger.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    logger=self.logger,
                )
            else:
                data = crest_utils.fetch_market_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    logger=LOGGER
                )
            data = crest_utils.data_to_ohlc(data)
        except exceptions.ValidatorException as err: #pragma: no cover
            self.logger.error(
                'ERROR: unable to parse CREST data\n\targs=%s',
                args,
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            self.logger.error(
                'ERROR: unhandled issue in parsing CREST data\n\targs=%s',
                args,
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## Format output ##
        if return_type == AcceptedDataFormat.JSON.value:
            self.logger.info('rolling json response')
            data_str = data.to_json(
                path_or_buf=None,
                orient='records'
            )
            message = json.loads(data_str)
        elif return_type == AcceptedDataFormat.CSV.value:
            self.logger.info('rolling csv response')
            data_str = data.to_csv(
                path_or_buf=None,
                header=True,
                index=False,
                columns=[
                    'date',
                    'open',
                    'high',
                    'low',
                    'close',
                    'volume'
                ]
            )
            message = output_csv(data_str, 200)
        else:   #pragma: no cover
            #TODO: CUT?
            self.logger.error(
                'invalid format requested' +
                '\n\targs=%s' +
                '\n\treturn_type=%s',
                args, return_type,
                exc_info=True
            )
            return 'UNSUPPORTED FORMAT', 500

        return message