Exemplo n.º 1
0
def test_split_history_throws():
    """make sure fetch_split_history throws expected errors"""
    with pytest.raises(exceptions.NoSplitConfigFound):
        split_obj = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            int(TEST_CONFIG.get('TEST', 'alt_id')) + 1,
            api_utils.SwitchCCPSource.EMD)
Exemplo n.º 2
0
def test_split_history_throws():
    """make sure fetch_split_history throws expected errors"""
    with pytest.raises(exceptions.NoSplitConfigFound):
        split_obj = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            int(TEST_CONFIG.get('TEST', 'alt_id')) + 1,
            api_utils.SwitchCCPSource.EMD
        )
Exemplo n.º 3
0
    def test_forward_happypath_emd(self):
        """test a forward-split: emd"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_emd_data = forecast_utils.fetch_market_history_emd(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            data_range=TEST_CONFIG.get('TEST', 'history_count'),
            config=ROOT_CONFIG
        )
        raw_emd_data1 = forecast_utils.parse_emd_data(raw_emd_data['result'])
        raw_emd_data = forecast_utils.fetch_market_history_emd(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            data_range=TEST_CONFIG.get('TEST', 'history_count'),
            config=ROOT_CONFIG
        )
        raw_emd_data2 = forecast_utils.parse_emd_data(raw_emd_data['result'])

        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            api_utils.SwitchCCPSource.EMD,
            config=ROOT_CONFIG
        )

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_emd_data1 = prep_raw_data(
            raw_emd_data1.copy(),
            min_split_date
        )
        raw_emd_data2 = prep_raw_data(
            raw_emd_data2.copy(),
            min_split_date
        )

        pre_split_data = split_data[split_data.date <= split_obj.date_str].reset_index()
        pre_raw_data = raw_emd_data2[raw_emd_data2.date <= split_obj.date_str].reset_index()
        post_split_data = split_data[split_data.date > split_obj.date_str].reset_index()
        post_raw_data = raw_emd_data1[raw_emd_data1.date > split_obj.date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(
            post_raw_data,
            post_split_data
        )

        validate_split_data(
            pre_raw_data,
            pre_split_data,
            split_obj
        )
Exemplo n.º 4
0
 def test_future_split_crest(self):
     """validate with CREST source"""
     test_data_crest = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.CREST,
         config=ROOT_CONFIG)
     assert test_data_crest.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             mode=api_utils.SwitchCCPSource.CREST,
             config=ROOT_CONFIG))
Exemplo n.º 5
0
 def test_future_split_esi(self):
     """validate on ESI"""
     test_data_esi = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.ESI,
         config=ROOT_CONFIG)
     assert test_data_esi.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             mode=api_utils.SwitchCCPSource.ESI,
             config=ROOT_CONFIG))
Exemplo n.º 6
0
 def test_future_split_esi(self):
     """validate on ESI"""
     test_data_esi = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.ESI,
         config=ROOT_CONFIG
     )
     assert test_data_esi.equals(
         crest_utils.fetch_market_history(
             TEST_CONFIG.get('TEST', 'region_id'),
             self.test_type_id,
             config=ROOT_CONFIG
         )
     )
Exemplo n.º 7
0
 def test_future_split_emd(self):
     """valdiate with EMD source"""
     test_data_emd = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         api_utils.SwitchCCPSource.EMD,
         data_range=TEST_CONFIG.get('TEST', 'history_count'),
         config=ROOT_CONFIG)
     emd_data_raw = forecast_utils.fetch_market_history_emd(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         data_range=TEST_CONFIG.get('TEST', 'history_count'),
         config=ROOT_CONFIG)
     assert test_data_emd.equals(
         forecast_utils.parse_emd_data(emd_data_raw['result']))
Exemplo n.º 8
0
    def test_forward_happypath_esi(self):
        """test a forward-split: ESI"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_esi_data1 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            config=ROOT_CONFIG
        )
        raw_esi_data2 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            config=ROOT_CONFIG
        )
        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            fetch_source=api_utils.SwitchCCPSource.ESI,
            config=ROOT_CONFIG
        )
        #split_data.to_csv('split_data_esi.csv', index=False)

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_esi_data1 = prep_raw_data(
            raw_esi_data1.copy(),
            min_split_date
        )
        raw_esi_data2 = prep_raw_data(
            raw_esi_data2.copy(),
            min_split_date
        )

        pre_split_data = split_data[split_data.date <= split_obj.date_str].reset_index()
        pre_raw_data = raw_esi_data2[raw_esi_data2.date <= split_obj.date_str].reset_index()
        post_split_data = split_data[split_data.date > split_obj.date_str].reset_index()
        post_raw_data = raw_esi_data1[raw_esi_data1.date > split_obj.date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(
            post_raw_data,
            post_split_data
        )

        validate_split_data(
            pre_raw_data,
            pre_split_data,
            split_obj
        )
Exemplo n.º 9
0
 def test_future_split_emd(self):
     """valdiate with EMD source"""
     test_data_emd = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         fetch_source=api_utils.SwitchCCPSource.EMD,
         data_range=TEST_CONFIG.get('TEST', 'history_count'),
         config=ROOT_CONFIG
     )
     emd_data_raw = forecast_utils.fetch_market_history_emd(
         TEST_CONFIG.get('TEST', 'region_id'),
         self.test_type_id,
         data_range=TEST_CONFIG.get('TEST', 'history_count'),
         config=ROOT_CONFIG
     )
     assert test_data_emd.equals(forecast_utils.parse_emd_data(emd_data_raw['result']))
Exemplo n.º 10
0
 def test_short_split(self):
     """make sure escaped if split was too far back"""
     short_days = floor(DAYS_SINCE_SPLIT / 2)
     test_data_emd = split_utils.fetch_split_history(
         TEST_CONFIG.get('TEST', 'region_id'),
         DEMO_SPLIT['type_id'],
         api_utils.SwitchCCPSource.EMD,
         data_range=short_days,
         config=ROOT_CONFIG)
     emd_data_raw = forecast_utils.fetch_market_history_emd(
         TEST_CONFIG.get('TEST', 'region_id'),
         DEMO_SPLIT['type_id'],
         data_range=short_days,
         config=ROOT_CONFIG)
     assert test_data_emd.equals(
         forecast_utils.parse_emd_data(emd_data_raw['result']))
Exemplo n.º 11
0
    def test_short_split(self):
        """make sure escaped if split was too far back"""
        short_days = floor(DAYS_SINCE_SPLIT/2)
        test_data_emd = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            data_range=short_days,
            config=ROOT_CONFIG
        )
        emd_data_raw = forecast_utils.fetch_market_history_emd(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            data_range=short_days,
            config=ROOT_CONFIG
        )

        assert test_data_emd.equals(
            forecast_utils.parse_emd_data(emd_data_raw['result']))
Exemplo n.º 12
0
    def test_forward_happypath_emd(self):
        """test a forward-split: crest"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_emd_data = forecast_utils.fetch_market_history_emd(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            data_range=TEST_CONFIG.get('TEST', 'history_count'),
            config=ROOT_CONFIG)
        raw_emd_data1 = forecast_utils.parse_emd_data(raw_emd_data['result'])
        raw_emd_data = forecast_utils.fetch_market_history_emd(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            data_range=TEST_CONFIG.get('TEST', 'history_count'),
            config=ROOT_CONFIG)
        raw_emd_data2 = forecast_utils.parse_emd_data(raw_emd_data['result'])

        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            api_utils.SwitchCCPSource.EMD,
            config=ROOT_CONFIG)
        #split_data.to_csv('split_data_emd.csv', index=False)

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_emd_data1 = prep_raw_data(raw_emd_data1.copy(), min_split_date)
        raw_emd_data2 = prep_raw_data(raw_emd_data2.copy(), min_split_date)

        pre_split_data = split_data[
            split_data.date <= split_obj.date_str].reset_index()
        pre_raw_data = raw_emd_data2[
            raw_emd_data2.date <= split_obj.date_str].reset_index()
        post_split_data = split_data[
            split_data.date > split_obj.date_str].reset_index()
        post_raw_data = raw_emd_data1[
            raw_emd_data1.date > split_obj.date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(post_raw_data, post_split_data)

        validate_split_data(pre_raw_data, pre_split_data, split_obj)
Exemplo n.º 13
0
    def test_forward_happypath_crest(self):
        """test a forward-split: crest"""
        split_obj = split_utils.SplitInfo(DEMO_SPLIT)
        raw_crest_data1 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_type_id,
            mode=api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        raw_crest_data2 = crest_utils.fetch_market_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            self.test_original_id,
            mode=api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        split_data = split_utils.fetch_split_history(
            TEST_CONFIG.get('TEST', 'region_id'),
            DEMO_SPLIT['type_id'],
            api_utils.SwitchCCPSource.CREST,
            config=ROOT_CONFIG)
        #split_data.to_csv('split_data_crest.csv', index=False)

        ## Doctor data for testing ##
        min_split_date = split_data.date.min()
        raw_crest_data1 = prep_raw_data(raw_crest_data1.copy(), min_split_date)
        raw_crest_data2 = prep_raw_data(raw_crest_data2.copy(), min_split_date)

        split_date_str = datetime.strftime(split_obj.split_date,
                                           '%Y-%m-%dT%H:%M:%S')
        pre_split_data = split_data[
            split_data.date <= split_date_str].reset_index()
        pre_raw_data = raw_crest_data2[
            raw_crest_data2.date <= split_date_str].reset_index()
        post_split_data = split_data[
            split_data.date > split_date_str].reset_index()
        post_raw_data = raw_crest_data1[
            raw_crest_data1.date > split_date_str].reset_index()

        ## Validate pre/post Split values ##
        validate_plain_data(post_raw_data, post_split_data)

        validate_split_data(pre_raw_data, pre_split_data, split_obj)
Exemplo n.º 14
0
    def get(self, return_type):
        args = self.reqparse.parse_args()
        self.logger.info('Prophet %s Request: %s', return_type, args)

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405

        forecast_range = api_config.DEFAULT_RANGE
        if 'range' in args:
            forecast_range = args.get('range')
        ## Validate inputs ##
        try:
            api_utils.check_key(
                args.get('api'),
                throw_on_fail=True,
                logger=self.logger,
            )
            crest_utils.validate_id(
                'map_regions',
                args.get('regionID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
            crest_utils.validate_id(
                'inventory_types',
                args.get('typeID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
            forecast_range = forecast_utils.check_requested_range(
                forecast_range,
                max_range=api_config.MAX_RANGE,
                raise_for_status=True
            )
        except exceptions.ValidatorException as err:
            self.logger.warning(
                'ERROR: unable to validate type/region ids\n\targs=%s',
                args,
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            self.logger.error(
                'ERROR: unable to validate type/region ids\n\targs=%s',
                args,
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## check cache ##
        cache_data = forecast_utils.check_prediction_cache(
            args.get('regionID'),
            args.get('typeID')
        )
        self.logger.debug(cache_data)
        if cache_data is not None:
            self.logger.info('returning cached forecast')
            message = forecast_reporter(
                cache_data,
                forecast_range,
                return_type,
                self.logger,
            )

            return message

        ## No cache, get data ##
        try:
            if args.get('typeID') in api_config.SPLIT_INFO:
                LOGGER.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    data_range=api_config.MAX_RANGE,
                    config=api_config.CONFIG,
                    logger=self.logger,
                )
                data.sort_values(
                    by='date',
                    ascending=True,
                    inplace=True
                )
            else:
                data = forecast_utils.fetch_extended_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    data_range=api_config.MAX_RANGE,
                    config=api_config.CONFIG,
                    logger=self.logger,
                )
            data = forecast_utils.build_forecast(
                data,
                api_config.MAX_RANGE
            )
        except exceptions.ValidatorException as err:
            #FIX ME: testing?
            self.logger.warning(
                'ERROR: unable to generate forecast\n\targs=%s',
                args,
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            LOGGER.error(
                'ERROR: unable to generate forecast\n\targs=%s',
                args,
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## Update cache ##
        forecast_utils.write_prediction_cache(
            args.get('regionID'),
            args.get('typeID'),
            data,
            logger=self.logger,
        )
        try:
            message = forecast_reporter(
                data,
                forecast_range,
                return_type,
                self.logger,
            )
        except Exception as err_msg:    #pragma: no cover
            LOGGER.error(
                'invalid format requested'
                '\n\targs=%s'
                '\n\treturn_type=%s',
                args, return_type,
                exc_info=True
            )
            return 'UNABLE TO GENERATE REPORT', 500
        return message
Exemplo n.º 15
0
    def get(self, return_type):
        """GET data from CREST and send out OHLC info"""
        args = self.reqparse.parse_args()
        #TODO: info archive
        LOGGER.info('OHLC {0} Request: {1}'.format(return_type, args))

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405

        mode = api_config.SwitchCCPSource(
            api_config.CONFIG.get('GLOBAL', 'crest_or_esi'))
        ## Validate inputs ##
        try:
            crest_utils.validate_id('map_regions',
                                    args.get('regionID'),
                                    mode=mode,
                                    config=api_config.CONFIG,
                                    logger=LOGGER)
            crest_utils.validate_id('inventory_types',
                                    args.get('typeID'),
                                    mode=mode,
                                    config=api_config.CONFIG,
                                    logger=LOGGER)
        except exceptions.ValidatorException as err:
            LOGGER.warning('ERROR: unable to validate type/region ids' +
                           '\n\targs={0}'.format(args),
                           exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            LOGGER.error('ERROR: unable to validate type/region ids' +
                         'args={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## Fetch CREST ##
        try:
            #LOGGER.info(api_config.SPLIT_INFO)
            if args.get('typeID') in api_config.SPLIT_INFO:
                LOGGER.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    mode,
                    config=api_config.CONFIG,
                    logger=LOGGER)
            else:
                data = crest_utils.fetch_market_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    mode=mode,
                    logger=LOGGER)
            data = crest_utils.data_to_ohlc(data)
        except exceptions.ValidatorException as err:  #pragma: no cover
            LOGGER.error('ERROR: unable to parse CREST data' +
                         '\n\targs={0}'.format(args),
                         exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            #except Exception as err:    #pragma: no cover
            LOGGER.error('ERROR: unhandled issue in parsing CREST data' +
                         'args={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## Format output ##
        if return_type == AcceptedDataFormat.JSON.value:
            LOGGER.info('rolling json response')
            data_str = data.to_json(path_or_buf=None, orient='records')
            message = json.loads(data_str)
        elif return_type == AcceptedDataFormat.CSV.value:
            LOGGER.info('rolling csv response')
            data_str = data.to_csv(
                path_or_buf=None,
                header=True,
                index=False,
                columns=['date', 'open', 'high', 'low', 'close', 'volume'])
            message = output_csv(data_str, 200)
        else:  #pragma: no cover
            #TODO: CUT?
            LOGGER.error('invalid format requested' +
                         '\n\targs={0}'.format(args) +
                         '\n\treturn_type={0}'.format(return_type),
                         exc_info=True)
            return 'UNSUPPORTED FORMAT', 500

        return message
Exemplo n.º 16
0
    def get(self, return_type):
        args = self.reqparse.parse_args()
        LOGGER.info('Prophet {0} Request: {1}'.format(return_type, args))

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405

        mode = api_config.SwitchCCPSource(
            api_config.CONFIG.get('GLOBAL', 'crest_or_esi'))
        forecast_range = api_config.DEFAULT_RANGE
        if 'range' in args:
            forecast_range = args.get('range')
        ## Validate inputs ##
        try:
            api_utils.check_key(args.get('api'),
                                throw_on_fail=True,
                                logger=LOGGER)
            crest_utils.validate_id('map_regions',
                                    args.get('regionID'),
                                    config=api_config.CONFIG,
                                    mode=mode,
                                    logger=LOGGER)
            crest_utils.validate_id('inventory_types',
                                    args.get('typeID'),
                                    config=api_config.CONFIG,
                                    mode=mode,
                                    logger=LOGGER)
            forecast_range = forecast_utils.check_requested_range(
                forecast_range,
                max_range=api_config.MAX_RANGE,
                raise_for_status=True)
        except exceptions.ValidatorException as err:
            LOGGER.warning('ERROR: unable to validate type/region ids' +
                           '\n\targs={0}'.format(args),
                           exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            LOGGER.error('ERROR: unable to validate type/region ids' +
                         'args={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## check cache ##
        cache_data = forecast_utils.check_prediction_cache(
            args.get('regionID'), args.get('typeID'))
        LOGGER.debug(cache_data)
        if cache_data is not None:
            LOGGER.info('returning cached forecast')
            message = forecast_reporter(cache_data, forecast_range,
                                        return_type, LOGGER)

            return message

        ## No cache, get data ##
        try:
            if args.get('typeID') in api_config.SPLIT_INFO:
                LOGGER.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    mode,
                    data_range=api_config.MAX_RANGE,
                    config=api_config.CONFIG,
                    logger=LOGGER)
                data.sort_values(by='date', ascending=True, inplace=True)
            else:
                data = forecast_utils.fetch_extended_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    mode=mode,
                    data_range=api_config.MAX_RANGE,
                    config=api_config.CONFIG,
                    logger=LOGGER)
            data = forecast_utils.build_forecast(data, api_config.MAX_RANGE)
        except exceptions.ValidatorException as err:
            #FIX ME: testing?
            LOGGER.warning('ERROR: unable to generate forecast' +
                           '\n\targs={0}'.format(args),
                           exc_info=True)
            return err.message, err.status
        except Exception:  #pragma: no cover
            LOGGER.error('ERROR: unable to generate forecast' +
                         '\n\targs={0}'.format(args),
                         exc_info=True)
            return 'UNHANDLED EXCEPTION', 500

        ## Update cache ##
        forecast_utils.write_prediction_cache(args.get('regionID'),
                                              args.get('typeID'),
                                              data,
                                              logger=LOGGER)
        try:
            message = forecast_reporter(data, forecast_range, return_type,
                                        LOGGER)
        except Exception as err_msg:  #pragma: no cover
            LOGGER.error('invalid format requested' +
                         '\n\targs={0}'.format(args) +
                         '\n\treturn_type={0}'.format(return_type),
                         exc_info=True)
            return 'UNABLE TO GENERATE REPORT', 500
        return message
Exemplo n.º 17
0
    def get(self, return_type):
        """GET data from CREST and send out OHLC info"""
        args = self.reqparse.parse_args()
        #TODO: info archive
        self.logger.info('OHLC %s Request: %s', return_type, args)

        if return_type not in return_supported_types():
            return 'INVALID RETURN FORMAT', 405
        ## Validate inputs ##
        try:
            crest_utils.validate_id(
                'map_regions',
                args.get('regionID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
            crest_utils.validate_id(
                'inventory_types',
                args.get('typeID'),
                config=api_config.CONFIG,
                logger=self.logger,
            )
        except exceptions.ValidatorException as err:
            self.logger.warning(
                'ERROR: unable to validate type/region ids' +
                '\n\targs={0}'.format(args),
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            self.logger.error(
                'ERROR: unable to validate type/region ids' +
                'args={0}'.format(args),
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## Fetch CREST ##
        try:
            #LOGGER.info(api_config.SPLIT_INFO)
            if args.get('typeID') in api_config.SPLIT_INFO:
                self.logger.info('FORK: using split utility')
                data = split_utils.fetch_split_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    logger=self.logger,
                )
            else:
                data = crest_utils.fetch_market_history(
                    args.get('regionID'),
                    args.get('typeID'),
                    config=api_config.CONFIG,
                    logger=LOGGER
                )
            data = crest_utils.data_to_ohlc(data)
        except exceptions.ValidatorException as err: #pragma: no cover
            self.logger.error(
                'ERROR: unable to parse CREST data\n\targs=%s',
                args,
                exc_info=True
            )
            return err.message, err.status
        except Exception: #pragma: no cover
            self.logger.error(
                'ERROR: unhandled issue in parsing CREST data\n\targs=%s',
                args,
                exc_info=True
            )
            return 'UNHANDLED EXCEPTION', 500

        ## Format output ##
        if return_type == AcceptedDataFormat.JSON.value:
            self.logger.info('rolling json response')
            data_str = data.to_json(
                path_or_buf=None,
                orient='records'
            )
            message = json.loads(data_str)
        elif return_type == AcceptedDataFormat.CSV.value:
            self.logger.info('rolling csv response')
            data_str = data.to_csv(
                path_or_buf=None,
                header=True,
                index=False,
                columns=[
                    'date',
                    'open',
                    'high',
                    'low',
                    'close',
                    'volume'
                ]
            )
            message = output_csv(data_str, 200)
        else:   #pragma: no cover
            #TODO: CUT?
            self.logger.error(
                'invalid format requested' +
                '\n\targs=%s' +
                '\n\treturn_type=%s',
                args, return_type,
                exc_info=True
            )
            return 'UNSUPPORTED FORMAT', 500

        return message