def test_coordinates_converter(): coord = GsDataApi._coordinate_from_str("A_B_C_D") assert str(coord) == 'A_B_C_D' coord = GsDataApi._coordinate_from_str("A_B_C.E") assert str(coord) == 'A_B_C.E' coord = GsDataApi._coordinate_from_str("A_B_.E") assert str(coord) == 'A_B_.E' coord = GsDataApi._coordinate_from_str("A_B_C_D;E.F") assert str(coord) == 'A_B_C_D;E.F' with pytest.raises(MqValueError, match='invalid coordinate A'): GsDataApi._coordinate_from_str("A")
def get_thematic_beta(self, basket_identifier: str, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today()) -> pd.DataFrame: if not self.positioned_entity_type == EntityType.ASSET: raise NotImplementedError response = GsAssetApi.resolve_assets(identifier=[basket_identifier], fields=['id', 'type'], limit=1)[basket_identifier] _id, _type = get(response, '0.id'), get(response, '0.type') if len(response) == 0 or _id is None: raise MqValueError(f'Basket could not be found using identifier {basket_identifier}.') if _type not in BasketType.to_list(): raise MqValueError(f'Asset {basket_identifier} of type {_type} is not a Custom or Research Basket.') query = DataQuery(where={'assetId': self.id, 'basketId': _id}, start_date=start, end_date=end) response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.COMPOSITE_THEMATIC_BETAS.value) df = [] for r in response: df.append({'date': r['date'], 'assetId': r['assetId'], 'basketId': r['basketId'], 'thematicBeta': r['beta']}) df = pd.DataFrame(df) return df.set_index('date')
def fci(country_id: str, measure: _FCI_MEASURE = _FCI_MEASURE.FCI, *, source: str = None, real_time: bool = False, request_id: Optional[str] = None) -> pd.Series: """ Daily Financial Conditions Index (FCI) for each of the world's large economies and many smaller ones, as well as aggregate FCIs for regions. :param country_id: id of country/region :param measure: FCI metric to retrieve :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :param request_id: server request id :return: FCI metric value """ if real_time: raise NotImplementedError('real-time FCI data is not available') type_ = QueryType(inflection.titleize(measure.value)) if (measure == _FCI_MEASURE.REAL_FCI or measure == _FCI_MEASURE.REAL_TWI_CONTRIBUTION): ds = Dataset('FCI') df = ds.get_data(geographyId=country_id) if (measure == _FCI_MEASURE.REAL_FCI): measure = 'realFCI' else: measure = 'realTWIContribution' series = ExtendedSeries(dtype=float) if ( measure not in df.columns) else ExtendedSeries(df[measure]) series.dataset_ids = ('FCI', ) return series q = GsDataApi.build_market_data_query([country_id], query_type=type_, source=source, real_time=real_time) df = _market_data_timed(q, request_id) return _extract_series_from_df(df, type_, True)
def test_get_coverage_api(mocker): test_coverage_data_1 = { 'results': [{ 'gsid': 'gsid1' }], 'scrollId': 'fake-scroll-id-1', 'totalResults': 1 } test_coverage_data_2 = { 'results': [], 'scrollId': 'fake-scroll-id-2', 'totalResults': 1 } mocker.patch.object(ContextMeta, 'current', return_value=GsSession(Environment.QA)) mock = mocker.patch.object(ContextMeta.current, '_get') mock.side_effect = [test_coverage_data_1, test_coverage_data_2] data = GsDataApi.get_coverage('MA_RANK') assert [{'gsid': 'gsid1'}] == data
def swaption_vol(asset: Asset, expiration_tenor: str, termination_tenor: str, relative_strike: float, *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day implied normal volatility for swaption vol matrices. :param asset: asset object loaded from security master :param expiration_tenor: relative date representation of expiration date on the option e.g. 3m :param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y :param relative_strike: strike level relative to at the money e.g. 10 :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swaption implied normal volatility curve """ if real_time: raise NotImplementedError('realtime swaption_vol not implemented') rate_benchmark_mqid = convert_asset_for_rates_data_set( asset, RatesConversionType.DEFAULT_BENCHMARK_RATE) _logger.debug('where expiry=%s, tenor=%s, strike=%s', expiration_tenor, termination_tenor, relative_strike) q = GsDataApi.build_market_data_query([rate_benchmark_mqid], QueryType.SWAPTION_VOL, where=FieldFilterMap( expiry=expiration_tenor, tenor=termination_tenor, strike=relative_strike), source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['swaptionVol']
def get_portfolio_constituents(self, fields: List[str] = None, start_date: dt.date = None, end_date: dt.date = None, return_format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]: """ Get historical portfolio constituents :param fields: list of fields to include in the results :param start_date: start date :param end_date: end date :param return_format: return format; defaults to a Pandas DataFrame, but can be manually set to ReturnFormat.JSON :return: Portfolio constituent data for each day in the requested date range """ where = {'reportId': self.id} date_batches = _get_ppaa_batches(self.get_asset_count(start_date, end_date), 3000000) queries = [DataQuery(where=where, fields=fields, start_date=dates_batch[0], end_date=dates_batch[1]) for dates_batch in date_batches] results = [GsDataApi.query_data(query=query, dataset_id=ReportDataset.PORTFOLIO_CONSTITUENTS.value) for query in queries] results = sum(results, []) return pd.DataFrame(results) if return_format == ReturnFormat.DATA_FRAME else results
def get_fundamentals( self, period: FundamentalMetricPeriod = FundamentalMetricPeriod.ONE_YEAR. value, direction: FundamentalMetricPeriodDirection = FundamentalMetricPeriodDirection. FORWARD.value, metrics: [FundamentalsMetrics] = FundamentalsMetrics.to_list(), start_date: dt.date = DateLimit.LOW_LIMIT.value, end_date: dt.date = DateLimit.TODAY.value) -> DataQueryResponse: if not self.id: self.__raise_initialization_error('retrieve fundamentals data') where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics) query = DataQuery(where=where, start_date=start_date, end_date=end_date) return GsDataApi.query_data( query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value)
def _get_fx_vol_swap_data(asset: Asset, expiry_tenor: str, strike_type: str = None, location: PricingLocation = None, source: str = None, real_time: bool = False, query_type: QueryType = QueryType.STRIKE_VOL) \ -> pd.DataFrame: if real_time: raise NotImplementedError( 'realtime inflation swap data not implemented') cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) if cross not in FX_VOL_SWAP_DEFAULTS: raise NotImplementedError( 'Data not available for {} FX Vol Swaps'.format(cross)) kwargs = dict( asset_class='FX', type='VolatilitySwap', expiry_tenor=expiry_tenor, asset_parameters_pair=cross, # asset_parameters_strike_vol=strike_type ) fxv_mqid = _get_tdapi_fxo_assets_vol_swaps(**kwargs) if location is None: pricing_location = PricingLocation.NYC else: pricing_location = PricingLocation(location) where = dict(pricingLocation=pricing_location.value) q = GsDataApi.build_market_data_query([fxv_mqid], query_type, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return df
def get_fundamentals(self, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today(), period: FundamentalMetricPeriod = FundamentalMetricPeriod.ONE_YEAR.value, direction: FundamentalMetricPeriodDirection = FundamentalMetricPeriodDirection.FORWARD.value, metrics: [FundamentalsMetrics] = FundamentalsMetrics.to_list()) -> pd.DataFrame: """ Retrieve fundamentals data for a basket across a date range :param start: start date (default minimum date value) :param end: end date (default is today) :param period: period for the relevant metric (default is 1y) :param direction: direction of the outlook period (default is forward) :param metrics: list of fundamentals metrics (default is all) :return: dataframe with fundamentals information **Usage** Retrieve fundamentals data for a basket across a date range **Examples** Retrieve historical dividend yield data for a basket >>> from gs_quant.markets.baskets import Basket >>> from gs_quant.markets.indices_utils import FundamentalsMetrics >>> >>> basket = Basket("GSMBXXXX") >>> basket.get_corporate_actions(metrics=[FundamentalsMetrics.DIVIDEND_YIELD]) **See also** :func:`get_corporate_actions` """ where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics) query = DataQuery(where=where, start_date=start, end_date=end) response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value) return pd.DataFrame(response)
def result(self): """ :return: a Pandas DataFrame containing the results of the report job """ status = self.status() if status == ReportStatus.cancelled: raise MqValueError('This report job in status "cancelled". Cannot retrieve results.') if status == ReportStatus.error: raise MqValueError('This report job is in status "error". Cannot retrieve results.') if status != ReportStatus.done: raise MqValueError('This report job is not done. Cannot retrieve results.') if self.__report_type in [ReportType.Portfolio_Factor_Risk, ReportType.Asset_Factor_Risk]: results = GsReportApi.get_factor_risk_report_results(risk_report_id=self.__report_id, start_date=self.__start_date, end_date=self.__end_date) return pd.DataFrame(results) if self.__report_type == ReportType.Portfolio_Performance_Analytics: query = DataQuery(where={'reportId': self.__report_id}, start_date=self.__start_date, end_date=self.__end_date) results = GsDataApi.query_data(query=query, dataset_id=ReportDataset.PPA_DATASET.value) return pd.DataFrame(results) return None
def get_data_coordinate( self, measure: DataMeasure, dimensions: Optional[DataDimensions] = None, frequency: Optional[DataFrequency] = None) -> DataCoordinate: id_ = self.get_marquee_id() dimensions = dimensions or {} dimensions[self.data_dimension] = id_ available: Dict = GsDataApi.get_data_providers(id_).get( measure.value, {}) if frequency == DataFrequency.DAILY: daily_dataset_id = available.get(DataFrequency.DAILY) return DataCoordinate(dataset_id=daily_dataset_id, measure=measure, dimensions=dimensions, frequency=frequency) if frequency == DataFrequency.REAL_TIME: rt_dataset_id = available.get(DataFrequency.REAL_TIME) return DataCoordinate(dataset_id=rt_dataset_id, measure=measure, dimensions=dimensions, frequency=frequency)
def get_corporate_actions( self, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today() + dt.timedelta(days=10), ca_type: List[CorporateActionType] = CorporateActionType.to_list() ) -> pd.DataFrame: """ Retrieve corporate actions for a basket across a date range :param start: start date (default minimum date value) :param end: end date (default is maximum date value) :param ca_type: list of corporate action types (default is all) :return: dataframe with corporate actions information **Usage** Retrieve corporate actions for a basket across a date range **Examples** Retrieve historical acquisition corporate actions for a basket >>> from gs_quant.markets.baskets import Basket >>> from gs_quant.markets.indices_utils import CorporateActionType >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_corporate_actions(ca_type=[CorporateActionType.ACQUISITION]) **See also** :func:`get_fundamentals` """ where = dict(assetId=self.id, corporateActionType=ca_type) query = DataQuery(where=where, start_date=start, end_date=end) response = GsDataApi.query_data( query=query, dataset_id=IndicesDatasets.CORPORATE_ACTIONS.value) return pd.DataFrame(response)
def test_get_many_defns_api(mocker): test_defn = DataSetEntity.from_dict(test_defn_dict) mock_response = {'results': (test_defn, ), 'totalResults': 1} expected_response = (test_defn, ) expected_payload = { 'limit': 100, 'enablePagination': 'true', 'scroll': '30s' } # mock GsSession mocker.patch.object(GsSession.__class__, 'default_value', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_get', return_value=mock_response) # run test response = GsDataApi.get_many_definitions() GsSession.current._get.assert_called_with('/data/datasets', payload=expected_payload, cls=DataSetEntity) assert response == expected_response
def test_auto_scroll_on_pages(mocker): response = { "requestId": "049de678-1480000", "totalPages": 5, "data": [ { "date": "2012-01-25", "assetId": "MADXKSGX6921CFNF", "value": 1 } ] } mocker.patch.object(ContextMeta, 'current', return_value=GsSession(Environment.QA)) mocker.patch.object(ContextMeta.current, '_post', return_value=response) query = DataQuery( start_date=dt.date(2017, 1, 15), end_date=dt.date(2017, 1, 18), where=FieldFilterMapDataQuery( currency="GBP" ) ) response = GsDataApi.get_results("test", response, query) assert len(response) == 5
def upload_asset_coverage_data(self, date: dt.date = None): """ Upload to the coverage dataset for given risk model and date :param date: date to upload coverage data for, default date is last date from risk model calendar Posting to the coverage dataset within in the last 5 days will enable the risk model to be seen in the Marquee UI dropdown for users with "execute" capabilities """ if not date: date = self.get_dates()[-1] update_time = dt.datetime.today().strftime("%Y-%m-%dT%H:%M:%SZ") gsid_list = self.get_asset_universe(date, assets=DataAssetsRequest( 'gsid', []), format=ReturnFormat.JSON).get(date) request_array = [{ 'date': date.strftime('%Y-%m-%d'), 'gsid': gsid, 'riskModel': self.id, 'updateTime': update_time } for gsid in set(gsid_list)] list_of_requests = list(divide_request(request_array, 1000)) for request_set in list_of_requests: print( GsDataApi.upload_data('RISK_MODEL_ASSET_COVERAGE', request_set))
def swap_rate(asset: Asset, tenor: str, benchmark_type: BenchmarkType = None, floating_index: str = None, *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day Fixed-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param tenor: relative date representation of expiration date e.g. 1m :param benchmark_type: benchmark type e.g. LIBOR :param floating_index: floating index rate :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime swap_rate not implemented') currency = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) currency = Currency(currency) # default benchmark types if benchmark_type is None: if currency == Currency.EUR: benchmark_type = BenchmarkType.EURIBOR elif currency == Currency.SEK: benchmark_type = BenchmarkType.STIBOR else: benchmark_type = BenchmarkType.LIBOR over_nights = [BenchmarkType.OIS] # default floating index if floating_index is None: if benchmark_type in over_nights: floating_index = '1d' else: if currency in [Currency.USD]: floating_index = '3m' elif currency in [ Currency.GBP, Currency.EUR, Currency.CHF, Currency.SEK ]: floating_index = '6m' mdapi_divider = " " if benchmark_type in over_nights else "-" mdapi_floating_index = BenchmarkType.OIS.value if benchmark_type is BenchmarkType.OIS else floating_index mdapi = currency.value + mdapi_divider + mdapi_floating_index rate_mqid = GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [mdapi])[mdapi] _logger.debug('where tenor=%s, floatingIndex=%s', tenor, floating_index) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.SWAP_RATE, where=FieldFilterMap(tenor=tenor), source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['swapRate']
def from_string(cls, value: str): from gs_quant.api.gs.data import GsDataApi return GsDataApi._coordinate_from_str(value)
def test_get_dataset_fields(mocker): mock_response = { "totalResults": 2, "results": [ { "id": "FIVCFB4GAWBT61GT", "name": "strikeReference", "description": "Reference for strike level (enum: spot, forward).", "type": "string", "unique": False, "fieldJavaType": "StringField", "parameters": { "enum": [ "spot", "forward", "normalized", "delta" ] }, "entitlements": { "view": [ "internal", "role:DataServiceAdmin", "external", "guid:8b4e2021fd12429885b30f6074037087" ], "edit": [ "role:DataServiceAdmin", "guid:8b4e2021fd12429885b30f6074037087" ], "admin": [ "role:DataServiceAdmin" ] }, "metadata": { "createdById": "8b4e2021fd12429885b30f6074037087", "createdTime": "2021-04-16T21:52:33.563Z", "lastUpdatedById": "8b4e2021fd12429885b30f6074037087", "lastUpdatedTime": "2021-04-16T21:52:33.563Z" } }, { "id": "FI4YBC6DS3PRE7W9", "name": "price", "description": "Price of instrument.", "type": "number", "unique": False, "fieldJavaType": "DoubleField", "parameters": {}, "entitlements": { "view": [ "internal", "role:DataServiceAdmin", "external" ], "edit": [ "role:DataServiceAdmin" ], "admin": [ "role:DataServiceAdmin" ] }, "metadata": { "createdById": "8b4e2021fd12429885b30f6074037087", "createdTime": "2021-04-16T21:36:11.269Z", "lastUpdatedById": "8b4e2021fd12429885b30f6074037087", "lastUpdatedTime": "2021-04-16T22:09:03.697Z" } } ] } mocker.patch.object(GsSession.__class__, 'default_value', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_post', return_value=mock_response) response = GsDataApi.get_dataset_fields(ids=['FIVCFB4GAWBT61GT', 'FI4YBC6DS3PRE7W9']) assert len(response) == 2 assert response == mock_response['results'] GsSession.current._post.assert_called_once_with('/data/fields/query', payload={'where': {'id': ['FIVCFB4GAWBT61GT', 'FI4YBC6DS3PRE7W9']}, 'limit': 10}, cls=DataSetFieldEntity)
def test_coordinates_data(mocker): bond_data = [{ 'mktType': 'Prime', 'mktAsset': '335320934', 'mktQuotingStyle': 'price', 'price': 1.0139, 'time': pd.to_datetime('2019-01-20T01:03:00Z') }, { 'mktType': 'Prime', 'mktAsset': '335320934', 'mktQuotingStyle': 'price', 'price': 1.0141, 'time': pd.to_datetime('2019-01-20T01:08:00Z') }] swap_data = [{ 'mktType': 'IR', 'mktAsset': 'USD', 'mktClass': 'Swap', 'mktPoint': ('2Y', ), 'mktQuotingStyle': 'ATMRate', 'ATMRate': 0.02592, 'time': pd.to_datetime('2019-01-20T01:09:45Z') }] bond_expected_result = pd.DataFrame( data={ 'time': [ pd.to_datetime('2019-01-20T01:03:00Z'), pd.to_datetime('2019-01-20T01:08:00Z') ], 'mktType': ['Prime', 'Prime'], 'mktAsset': ['335320934', '335320934'], 'mktQuotingStyle': ['price', 'price'], 'value': [1.0139, 1.0141] }, index=pd.DatetimeIndex(['2019-01-20T01:03:00', '2019-01-20T01:08:00']), ) swap_expected_result = pd.DataFrame( data={ 'time': [pd.to_datetime('2019-01-20T01:09:45Z')], 'mktType': ['IR'], 'mktAsset': ['USD'], 'mktClass': ['Swap'], 'mktPoint': [('2Y', )], 'mktQuotingStyle': ['ATMRate'], 'value': [0.02592] }, index=pd.DatetimeIndex(['2019-01-20T01:09:45']), ) # mock GsSession mocker.patch.object(GsSession.__class__, 'current', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_post', side_effect=[{ 'responses': [{ 'data': bond_data }] }, { 'responses': [{ 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }]) coord_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates[0], start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10)) assert_frame_equal(coord_data_result, bond_expected_result) str_coord_data_result = GsDataApi.coordinates_data( coordinates=test_str_coordinates[1], start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10)) assert_frame_equal(str_coord_data_result, swap_expected_result) coords_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates, start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10), as_multiple_dataframes=True) assert len(coords_data_result) == 2 assert_frame_equal(coords_data_result[0], bond_expected_result) assert_frame_equal(coords_data_result[1], swap_expected_result) str_coords_data_result = GsDataApi.coordinates_data( coordinates=test_str_coordinates, start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10), as_multiple_dataframes=True) assert len(str_coords_data_result) == 2 assert_frame_equal(str_coords_data_result[0], bond_expected_result) assert_frame_equal(str_coords_data_result[1], swap_expected_result) GsSession.current._post.assert_called_with('/data/coordinates/query', payload=mocker.ANY) assert GsSession.current._post.call_count == 4
def get_flagships_constituents( fields: [str] = [], basket_type: List[BasketType] = BasketType.to_list(), asset_class: List[AssetClass] = [AssetClass.Equity], region: List[Region] = None, styles: List[Union[CustomBasketStyles, ResearchBasketStyles]] = None, start: dt.date = None, end: dt.date = None, ) -> pd.DataFrame: """ Retrieve flagship baskets constituents :param fields: Fields to retrieve in addition to mqid, name, ticker, region, basket type, \ styles, live date, and asset class :param basket_type: Basket type(s) :param asset_class: Asset class (defaults to Equity) :param region: Basket region(s) :param styles: Basket style(s) :param start: Start date for which to retrieve constituents (defaults to previous business day) :param end: End date for which to retrieve constituents (defaults to previous business day) :return: flagship baskets constituents **Usage** Retrieve flagship baskets constituents **Examples** Retrieve a list of flagship baskets constituents >>> from gs_quant.markets.indices_utils import * >>> >>> get_flagships_constituents() **See also** :func:`get_flagships_with_assets` :func:`get_flagships_performance` :func:`get_flagship_baskets` """ start = start or prev_business_date() end = end or prev_business_date() fields = list( set(fields).union( set([ 'id', 'name', 'ticker', 'region', 'type', 'styles', 'liveDate', 'assetClass' ]))) query = dict(fields=fields, type=basket_type, asset_class=asset_class, is_pair_basket=[False], flagship=[True]) if region is not None: query.update(region=region) if styles is not None: query.update(styles=styles) basket_data = GsAssetApi.get_many_assets_data_scroll(**query, limit=2000, scroll='1m') basket_map = {get(basket, 'id'): basket for basket in basket_data} coverage = GsDataApi.get_coverage( dataset_id=IndicesDatasets.GSCB_FLAGSHIP.value, fields=['type', 'bbid'], include_history=True) cbs, rbs = [], [] for b in coverage: _id = get(b, 'assetId') _type = get(b, 'type') if _id in list(basket_map.keys()): start_date = dt.datetime.strptime(b['historyStartDate'], '%Y-%m-%d').date() start_date = start_date if start < start_date else start if _type == BasketType.CUSTOM_BASKET.value: data = GsDataApi.query_data( query=DataQuery(where=dict(assetId=_id), startDate=start_date, endDate=end), dataset_id=IndicesDatasets.GSBASKETCONSTITUENTS.value) basket_map[_id].update(constituents=data) cbs.append(basket_map[_id]) elif _type == BasketType.RESEARCH_BASKET.value: data = GsDataApi.query_data( query=DataQuery(where=dict(assetId=_id), startDate=start_date, endDate=end), dataset_id=IndicesDatasets.GIRBASKETCONSTITUENTS.value) basket_map[_id].update(constituents=data) rbs.append(basket_map[_id]) return pd.DataFrame(cbs + rbs)
def get_flagships_performance( fields: [str] = [], basket_type: List[BasketType] = BasketType.to_list(), asset_class: List[AssetClass] = [AssetClass.Equity], region: List[Region] = None, styles: List[Union[CustomBasketStyles, ResearchBasketStyles]] = None, start: dt.date = None, end: dt.date = None, ) -> pd.DataFrame: """ Retrieve performance data for flagship baskets :param fields: Fields to retrieve in addition to bbid, mqid, name, region, basket type, \ styles, live date, and asset class :param basket_type: Basket type(s) :param asset_class: Asset class (defaults to Equity) :param region: Basket region(s) :param styles: Basket style(s) :param start: Date for which to retrieve pricing (defaults to previous business day) :param end: Date for which to retrieve pricing (defaults to previous business day) :return: pricing data for flagship baskets **Usage** Retrieve performance data for flagship baskets **Examples** Retrieve performance data for flagship Asia custom baskets >>> from gs_quant.markets.indices_utils import * >>> >>> get_flagships_performance(basket_type=[BasketType.CUSTOM_BASKET], region=[Region.ASIA]) **See also** :func:`get_flagships_with_assets` :func:`get_flagship_baskets` :func:`get_flagships_constituents` """ start = start or prev_business_date() end = end or prev_business_date() fields = list( set(fields).union( set([ 'name', 'region', 'type', 'flagship', 'isPairBasket', 'styles', 'liveDate', 'assetClass' ]))) coverage = GsDataApi.get_coverage( dataset_id=IndicesDatasets.GSCB_FLAGSHIP.value, fields=fields) basket_regions = [] if region is None else [r.value for r in region] basket_styles = [] if styles is None else [s.value for s in styles] basket_types = [b_type.value for b_type in basket_type] baskets_map = {} for basket in coverage: if get(basket, 'flagship') is False or get(basket, 'isPairBasket') is True or \ region is not None and get(basket, 'region') not in basket_regions or \ get(basket, 'type') not in basket_types or \ get(basket, 'assetClass') not in [a.value for a in asset_class] or \ styles is not None and not any(s in get(basket, 'styles', []) for s in basket_styles): continue baskets_map[get(basket, 'assetId')] = basket response = GsDataApi.query_data( query=DataQuery(where=dict(assetId=list(baskets_map.keys())), startDate=start, endDate=end), dataset_id=IndicesDatasets.GSCB_FLAGSHIP.value) performance = [] for basket in response: basket_data = baskets_map[get(basket, 'assetId')] basket_data.update(closePrice=get(basket, 'closePrice')) basket_data.update(date=get(basket, 'date')) performance.append(basket_data) return pd.DataFrame(performance)
def basis_swap_term_structure(asset: Asset, spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, clearing_house: _ClearingHouse = None, pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) term structure across major currencies. :param asset: asset object loaded from security master :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of spread leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME" :param pricing_date: YYYY-MM-DD or relative date :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError('Data not available for {} basis swap rates'.format(currency.value)) clearing_house = _check_clearing_house(clearing_house) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: _check_benchmark_type(currency, benchmark_type) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults(currency, reference_benchmark_type, reference_tenor) for key, leg in legs_w_defaults.items(): if not re.fullmatch('(\\d+)([bdwmy])', leg['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + leg['floating_rate_tenor'] + ' index: ' + leg['benchmark_type']) forward_tenor = check_forward_tenor(forward_tenor) calendar = legs_w_defaults['spread']['pricing_location'].value if pricing_date is not None and pricing_date in list(GsCalendar.get(calendar).holidays): raise MqValueError('Specified pricing date is a holiday in {} calendar'.format(calendar)) csaTerms = currency.value + '-1' kwargs = dict(type='BasisSwap', asset_parameters_payer_rate_option=legs_w_defaults['spread']['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread']['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference']['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults['reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house.value, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location'].value) assets = GsAssetApi.get_many_assets(**kwargs) if len(assets) == 0: raise MqValueError('Specified arguments did not match any asset in the dataset') else: rate_mqids = [asset.id for asset in assets] asset_string = '' for mqid in rate_mqids: asset_string = asset_string + ',' + mqid _logger.debug('assets returned %s', asset_string) _logger.debug('where spread_benchmark_type=%s, spread_tenor=%s, reference_benchmark_type=%s, ' 'reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location'].value) start, end = _range_from_pricing_date(calendar, pricing_date) with DataContext(start, end): where = dict(csaTerms=csaTerms) q = GsDataApi.build_market_data_query(rate_mqids, QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) if df.empty: series = ExtendedSeries() else: latest = df.index.max() _logger.info('selected pricing date %s', latest) df = df.loc[latest] business_day = _get_custom_bd(calendar) df = df.assign(expirationDate=df.index + df['terminationTenor'].map(_to_offset) + business_day - business_day) df = df.set_index('expirationDate') df.sort_index(inplace=True) df = df.loc[DataContext.current.start_date: DataContext.current.end_date] series = ExtendedSeries() if df.empty else ExtendedSeries(df['basisSwapRate']) series.dataset_ids = getattr(df, 'dataset_ids', ()) return series
def test_coordinates_data(mocker): bond_data = [{ 'marketDataType': 'Prime', 'marketDataAsset': '335320934', 'field': 'price', 'price': 1.0139, 'time': pd.to_datetime('2019-01-20T01:03:00Z') }, { 'marketDataType': 'Prime', 'marketDataAsset': '335320934', 'field': 'price', 'price': 1.0141, 'time': pd.to_datetime('2019-01-20T01:08:00Z') }] swap_data = [{ 'marketDataType': 'IR', 'marketDataAsset': 'USD', 'pointClass': 'Swap', 'marketDataPoint': ('2Y', ), 'quotingStyle': 'ATMRate', 'ATMRate': 0.02592, 'time': pd.to_datetime('2019-01-20T01:09:45Z') }] bond_expected_result = [{ 'marketDataType': 'Prime', 'marketDataAsset': '335320934', 'field': 'price', 'value': 1.0139, 'time': pd.to_datetime('2019-01-20T01:03:00Z') }, { 'marketDataType': 'Prime', 'marketDataAsset': '335320934', 'field': 'price', 'value': 1.0141, 'time': pd.to_datetime('2019-01-20T01:08:00Z') }] swap_expected_result = [{ 'marketDataType': 'IR', 'marketDataAsset': 'USD', 'pointClass': 'Swap', 'marketDataPoint': ('2Y', ), 'quotingStyle': 'ATMRate', 'value': 0.02592, 'time': pd.to_datetime('2019-01-20T01:09:45Z') }] bond_expected_df = sort_risk(pd.DataFrame(bond_expected_result)) bond_expected_df = bond_expected_df.set_index( pd.DatetimeIndex(bond_expected_df.time.values)) swap_expected_df = sort_risk(pd.DataFrame(swap_expected_result)) swap_expected_df = swap_expected_df.set_index( pd.DatetimeIndex(swap_expected_df.time.values)) # mock GsSession mocker.patch.object(GsSession.__class__, 'current', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_post', side_effect=[{ 'responses': [{ 'data': bond_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }]) coord_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates[0], start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10)) assert_frame_equal(coord_data_result, bond_expected_df) coords_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates, start=dt.datetime(2019, 1, 2, 1, 0), end=dt.datetime(2019, 1, 2, 1, 10), as_multiple_dataframes=True) assert len(coords_data_result) == 2 assert_frame_equal(coords_data_result[0], bond_expected_df) assert_frame_equal(coords_data_result[1], swap_expected_df)
def swap_rate_2(asset: Asset, swap_tenor: str, benchmark_type: BenchmarkType = None, floating_rate_tenor: str = None, forward_tenor: str = 'Spot', *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day Fixed-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param benchmark_type: benchmark type e.g. LIBOR :param floating_rate_tenor: floating index rate :param forward_tenor: relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP', 'CHF', 'SEK']: raise NotImplementedError( 'Data not available for {} swap rates'.format(currency.value)) if benchmark_type is not None and \ benchmark_type.value not in CURRENCY_TO_SWAP_RATE_BENCHMARK[currency.value].keys(): raise MqValueError('%s is not supported for %s', benchmark_type, currency.value) defaults = _get_swap_leg_defaults(currency, benchmark_type, floating_rate_tenor) if not re.fullmatch('(\\d+)([bdwmy])', swap_tenor): raise MqValueError('invalid swap tenor ' + swap_tenor) if not re.fullmatch('(\\d+)([bdwmy])', floating_rate_tenor): raise MqValueError('invalid floating rate tenor ' + floating_rate_tenor) if forward_tenor is None or forward_tenor == 'Spot': forward_tenor = '0b' elif not re.fullmatch('(\\d+)([bdwmy])', forward_tenor): raise MqValueError('invalid forward tenor ' + forward_tenor) clearing_house = 'LCH' csaTerms = currency.value + '-1' fixed_rate = 'ATM' pay_or_receive = 'Receive' kwargs = dict( type='Swap', asset_parameters_termination_date=swap_tenor, asset_parameters_floating_rate_option=defaults['benchmark_type'], asset_parameters_fixed_rate=fixed_rate, asset_parameters_clearing_house=clearing_house, asset_parameters_floating_rate_designated_maturity=defaults[ 'floating_rate_tenor'], asset_parameters_effective_date=forward_tenor, asset_parameters_pay_or_receive=pay_or_receive, asset_parameters_notional_currency=currency.name, pricing_location=defaults['pricing_location']) rate_mqid = _convert_asset_for_mdapi_swap_rates(**kwargs) _logger.debug( 'where swap_tenor=%s, benchmark_type=%s, floating_rate_tenor=%s, forward_tenor=%s, ' 'pricing_location=%s', swap_tenor, defaults['benchmark_type'], defaults['floating_rate_tenor'], forward_tenor, defaults['pricing_location']) where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['swapRate']
def _get_fxo_data(asset: Asset, expiry_tenor: str, strike: str, option_type: str = None, expiration_location: str = None, location: PricingLocation = None, premium_payment_date: str = None, source: str = None, real_time: bool = False, query_type: QueryType = QueryType.IMPLIED_VOLATILITY) \ -> pd.DataFrame: if real_time: raise NotImplementedError( 'realtime inflation swap data not implemented') cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) if cross not in FX_DEFAULTS.keys(): raise NotImplementedError( 'Data not available for {} FX Vanilla options'.format(cross)) defaults = _get_fxo_defaults(cross) if not (tm_rates._is_valid_relative_date_tenor(expiry_tenor)): raise MqValueError('invalid expiry ' + expiry_tenor) if expiration_location is None: # expirationtime = defaults["expirationTime"] _ = defaults["expirationTime"] else: # expirationtime = expiration_location _ = expiration_location if premium_payment_date is None: premium_date = defaults["premiumPaymentDate"] else: premium_date = premium_payment_date if option_type == "Put": call_ccy = defaults["over"] put_ccy = defaults["under"] else: call_ccy = defaults["under"] put_ccy = defaults["over"] kwargs = dict( asset_class='FX', type='Option', asset_parameters_call_currency=call_ccy, asset_parameters_put_currency=put_ccy, asset_parameters_expiration_date=expiry_tenor, # asset_parameters_expiration_time=expirationtime, asset_parameters_option_type=option_type, asset_parameters_premium_payment_date=premium_date, asset_parameters_strike_price_relative=strike, ) rate_mqid = _get_tdapi_fxo_assets(**kwargs) if location is None: pricing_location = PricingLocation.NYC else: pricing_location = PricingLocation(location) where = dict(pricingLocation=pricing_location.value) # _logger.debug(f'where asset= {rate_mqid}, swap_tenor={swap_tenor}, index={defaults["index_type"]}, ' # f'forward_tenor={forward_tenor}, pricing_location={pricing_location.value}, ' # f'clearing_house={clearing_house.value}, notional_currency={currency.name}') q = GsDataApi.build_market_data_query([rate_mqid], query_type, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return df
def basis_swap_spread( asset: Asset, swap_tenor: str = '1y', spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: str = 'Spot', *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of paying leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: relative date representation of forward starting point eg: '1y' or 'Spot' for Spot Starting swap :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError( 'Data not available for {} basis swap rates'.format( currency.value)) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: if benchmark_type is not None and \ benchmark_type.value not in CURRENCY_TO_SWAP_RATE_BENCHMARK[currency.value].keys(): raise MqValueError('%s is not supported for %s', benchmark_type, currency.value) if not re.fullmatch('(\\d+)([bdwmy])', swap_tenor): raise MqValueError('invalid swap tenor ' + swap_tenor) for floating_rate_tenor in [spread_tenor, reference_tenor]: if not re.fullmatch('(\\d+)([bdwmy])', floating_rate_tenor): raise MqValueError('invalid floating rate tenor ' + floating_rate_tenor) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults( currency, reference_benchmark_type, reference_tenor) if forward_tenor == '0b' or forward_tenor is None or forward_tenor == 'Spot': forward_tenor = '0d' elif not re.fullmatch('(\\d+)([bdwmy])', forward_tenor): raise MqValueError('invalid forward tenor ' + forward_tenor) csaTerms = currency.value + '-1' clearing_house = 'LCH' kwargs = dict( type='BasisSwap', asset_parameters_termination_date=swap_tenor, asset_parameters_payer_rate_option=legs_w_defaults['spread'] ['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread'] ['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference'] ['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults[ 'reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location']) rate_mqid = _convert_asset_for_mdapi_swap_rates(**kwargs) _logger.debug( 'where swap_tenor=%s, spread_benchmark_type=%s, spread_tenor=%s, reference_benchmark_type=%s, ' 'reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', swap_tenor, legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location']) where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['basisSwapRate']
def test_coordinates_data(mocker): start = dt.datetime(2019, 1, 2, 1, 0) end = dt.datetime(2019, 1, 2, 1, 10) # mock GsSession and data response mocker.patch.object(GsSession.__class__, 'default_value', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_post', side_effect=[{ 'responses': [{ 'data': bond_data }] }, { 'responses': [{ 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }]) coord_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates[0], start=start, end=end) assert_frame_equal(coord_data_result, bond_expected_frame) str_coord_data_result = GsDataApi.coordinates_data( coordinates=test_str_coordinates[1], start=start, end=end) assert_frame_equal(str_coord_data_result, swap_expected_frame) coords_data_result = GsDataApi.coordinates_data( coordinates=test_coordinates, start=start, end=end, as_multiple_dataframes=True) assert len(coords_data_result) == 2 assert_frame_equal(coords_data_result[0], bond_expected_frame) assert_frame_equal(coords_data_result[1], swap_expected_frame) GsSession.current._post.reset_mock() str_coords_data_result = GsDataApi.coordinates_data( coordinates=test_str_coordinates, start=start, end=end, as_multiple_dataframes=True) assert len(str_coords_data_result) == 2 assert_frame_equal(str_coords_data_result[0], bond_expected_frame) assert_frame_equal(str_coords_data_result[1], swap_expected_frame) GsSession.current._post.assert_called_once_with( '/data/coordinates/query', payload=MDAPIDataQuery(market_data_coordinates=test_coordinates, start_time=start, end_time=end, vendor=MarketDataVendor.Goldman_Sachs, format="MessagePack"), request_headers={'Accept': 'application/msgpack'})
def basis_swap_term_structure( asset: Asset, spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: str = 'Spot', pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) term structure across major currencies. :param asset: asset object loaded from security master :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of spread leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps :param pricing_date: YYYY-MM-DD or relative date :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError( 'Data not available for {} basis swap rates'.format( currency.value)) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: if benchmark_type is not None and \ benchmark_type.value not in CURRENCY_TO_SWAP_RATE_BENCHMARK[currency.value].keys(): raise MqValueError('%s is not supported for %s', benchmark_type, currency.value) for floating_rate_tenor in [spread_tenor, reference_tenor]: if not re.fullmatch('(\\d+)([bdwmy])', floating_rate_tenor): raise MqValueError('invalid floating rate tenor ' + floating_rate_tenor) if forward_tenor == '0b' or forward_tenor is None or forward_tenor == 'Spot': forward_tenor = '0d' elif not re.fullmatch('(\\d+)([bdwmy])', forward_tenor): raise MqValueError('invalid forward tenor ' + forward_tenor) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults( currency, reference_benchmark_type, reference_tenor) csaTerms = currency.value + '-1' clearing_house = 'LCH' kwargs = dict( type='BasisSwap', asset_parameters_payer_rate_option=legs_w_defaults['spread'] ['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread'] ['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference'] ['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults[ 'reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location']) assets = GsAssetApi.get_many_assets(**kwargs) if len(assets) == 0: raise MqValueError( 'Specified arguments did not match any asset in the dataset') else: rate_mqids = [asset.id for asset in assets] _logger.debug( 'where spread_benchmark_type=%s, spread_tenor=%s, reference_benchmark_type=%s, ' 'reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location']) start, end = _range_from_pricing_date(assets[0].exchange, pricing_date) with DataContext(start, end): where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query(rate_mqids, QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) if df.empty: return pd.Series() latest = df.index.max() _logger.info('selected pricing date %s', latest) df = df.loc[latest] business_day = _get_custom_bd(asset.exchange) df = df.assign(expirationDate=df.index + df['terminationTenor'].map(_to_offset) + business_day - business_day) df = df.set_index('expirationDate') df.sort_index(inplace=True) df = df.loc[DataContext.current.start_date:DataContext.current.end_date] return df['basisSwapRate'] if not df.empty else pd.Series()
def test_coordinate_data_series(mocker): start = dt.datetime(2019, 1, 2, 1, 0) end = dt.datetime(2019, 1, 2, 1, 10) # mock GsSession and data response mocker.patch.object(GsSession.__class__, 'current', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_post', side_effect=[{ 'responses': [{ 'data': bond_data }] }, { 'responses': [{ 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }, { 'responses': [{ 'data': bond_data }, { 'data': swap_data }] }]) bond_expected_series = pd.Series(index=bond_expected_frame.index, data=bond_expected_frame.value.values) swap_expected_series = pd.Series(index=swap_expected_frame.index, data=swap_expected_frame.value.values) coord_data_result = GsDataApi.coordinates_data_series( coordinates=test_coordinates[0], start=start, end=end) assert_series_equal(coord_data_result, bond_expected_series) str_coord_data_result = GsDataApi.coordinates_data_series( coordinates=test_str_coordinates[1], start=start, end=end) assert_series_equal(str_coord_data_result, swap_expected_series) coords_data_result = GsDataApi.coordinates_data_series( coordinates=test_coordinates, start=start, end=end) assert len(coords_data_result) == 2 assert_series_equal(coords_data_result[0], bond_expected_series) assert_series_equal(coords_data_result[1], swap_expected_series) GsSession.current._post.reset_mock() str_coords_data_result = GsDataApi.coordinates_data_series( coordinates=test_str_coordinates, start=start, end=end) assert len(str_coords_data_result) == 2 assert_series_equal(str_coords_data_result[0], bond_expected_series) assert_series_equal(str_coords_data_result[1], swap_expected_series) GsSession.current._post.assert_called_with( '/data/coordinates/query', payload=MDAPIDataQuery(market_data_coordinates=test_coordinates, start_time=start, end_time=end, vendor=MarketDataVendor.Goldman_Sachs, format="MessagePack"))
def basis_swap_spread(asset: Asset, swap_tenor: str = '1y', spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, clearing_house: _ClearingHouse = None, *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of paying leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME" :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError('Data not available for {} basis swap rates'.format(currency.value)) clearing_house = _check_clearing_house(clearing_house) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: _check_benchmark_type(currency, benchmark_type) if not (re.fullmatch('(\\d+)([bdwmy])', swap_tenor) or re.fullmatch('(frb[1-9])', forward_tenor)): raise MqValueError('invalid swap tenor ' + swap_tenor) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults(currency, reference_benchmark_type, reference_tenor) for key, leg in legs_w_defaults.items(): if not re.fullmatch('(\\d+)([bdwmy])', leg['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + leg['floating_rate_tenor'] + ' index: ' + leg['benchmark_type']) forward_tenor = check_forward_tenor(forward_tenor) csaTerms = currency.value + '-1' kwargs = dict(type='BasisSwap', asset_parameters_termination_date=swap_tenor, asset_parameters_payer_rate_option=legs_w_defaults['spread']['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread']['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference']['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults['reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house.value, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location'].value) rate_mqid = _convert_asset_for_mdapi_swap_rates(**kwargs) _logger.debug('where asset=%s, swap_tenor=%s, spread_benchmark_type=%s, spread_tenor=%s, ' 'reference_benchmark_type=%s, reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', rate_mqid, swap_tenor, legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location'].value) where = dict(csaTerms=csaTerms) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) series = ExtendedSeries() if df.empty else ExtendedSeries(df['basisSwapRate']) series.dataset_ids = getattr(df, 'dataset_ids', ()) return series