def delete(self): """ Deletes the DataGrid if it has been persisted. :return: None """ if self.id_: GsSession.current._delete(f'{API}/{self.id_}', request_headers=DATAGRID_HEADERS) else: raise MqValueError('DataGrid has not been persisted.')
def get_definition(cls, dataset_id: str) -> DataSetEntity: definition = cls.__definitions.get(dataset_id) if not definition: definition = GsSession.current._get('/data/datasets/{}'.format(dataset_id), cls=DataSetEntity) if not definition: raise MqValueError('Unknown dataset {}'.format(dataset_id)) cls.__definitions[dataset_id] = definition return definition
def rebalance( cls, id_: str, asset_type: AssetType, inputs: IndicesRebalanceInputs ) -> Union[CustomBasketsResponse, ISelectResponse]: cls = CustomBasketsResponse if asset_type == AssetType.Custom_Basket or \ asset_type == AssetType.Research_Basket else ISelectResponse url = f'/indices/{id_}/rebalance' try: response = GsSession.current._post(url, payload=inputs, cls=cls) except HTTPError as err: raise MqValueError(f'Unable to rebalance index with {err}') return response
def create( cls, inputs: Union[CustomBasketsCreateInputs, IndicesDynamicConstructInputs] ) -> CustomBasketsResponse: try: response = GsSession.current._post('/indices', payload=inputs, cls=CustomBasketsResponse) except HTTPError as err: raise MqValueError(f'Unable to create index with {err}') return response
def save(self) -> str: if self._type: datagrid_id = self.__datagrid.save() dataviz_id = super()._save(sources=[ DataVizSource(type=DataVizSourceType.DATAGRID, id=datagrid_id) ]) return dataviz_id else: raise MqValueError( 'Figure not yet initialized/created. Please create a figure before saving it.' )
def create(self): if self._type: response = GsSession.current._post(f'{API}', self._parameters, request_headers=HEADERS) self._id = response['id'] return response['id'] else: raise MqValueError( 'Figure not yet initialized or created. Please create a figure before saving it.' )
def open(self) -> None: """ Opens the DataViz in the default browser. :return: None """ if self._id is None: raise MqValueError( 'DataViz must be created or saved before opening.') webbrowser.open( f'{GsSession.current.domain.replace(".web", "")}/s/markets/visualizations/{self._id}' )
def preprocess_implied_vol_strikes_eq(strike_reference: VolReference = None, relative_strike: Real = None): if relative_strike is None and strike_reference != VolReference.DELTA_NEUTRAL: raise MqValueError( 'Relative strike must be provided if your strike reference is not delta_neutral' ) if strike_reference == VolReference.DELTA_NEUTRAL: raise MqValueError( 'delta_neutral strike reference is not supported for equities.') if strike_reference == VolReference.DELTA_PUT: relative_strike = abs(100 - relative_strike) relative_strike = relative_strike if strike_reference == VolReference.NORMALIZED else relative_strike / 100 ref_string = "delta" if strike_reference in ( VolReference.DELTA_CALL, VolReference.DELTA_PUT, VolReference.DELTA_NEUTRAL) else strike_reference.value return ref_string, relative_strike
def open(self): """ Opens the DataGrid in the default browser. :return: None """ if self.id_ is None: raise MqValueError( 'DataGrid must be created or saved before opening.') webbrowser.open( f'{GsSession.current.domain.replace(".web", "")}/s/markets/grids/{self.id_}' )
def components(self, value): if len(value) > 12: raise MqValueError( f'{value} exceeds the max number of columns of 12.') width_sum = 0 for component in self.__components: if not isinstance(component, WorkspaceRow): width_sum += component.width if width_sum > 12: raise MqValueError( f'{width_sum} exceeds the max sum of widths of 12.') without_width_count = 0 for component in value: if not isinstance(component, WorkspaceRow): without_width_count += component.width or 1 if width_sum + without_width_count > 12: raise MqValueError( f'Cannot fit all components in row due to given total width of {width_sum} ' f'and {without_width_count} components without a width.') self.__components = value
def get_market_data(cls, query) -> pd.DataFrame: GsSession.current: GsSession body = GsSession.current._post('/data/markets', payload=query) container = body['responses'][0]['queryResponse'][0] if 'errorMessages' in container: raise MqValueError(container['errorMessages']) if 'response' not in container: return pd.DataFrame() df = pd.DataFrame(container['response']['data']) df.set_index('date' if 'date' in df.columns else 'time', inplace=True) df.index = pd.to_datetime(df.index) return df
def schedule(self, start_date: dt.date = None, end_date: dt.date = None, backcast: bool = None): if None in [self.id, self.__position_source_id]: raise MqValueError('Can only schedule reports with valid IDs and Position Source IDs.') if self.position_source_type != PositionSourceType.Portfolio and None in [start_date, end_date]: raise MqValueError('Must specify schedule start and end dates for report.') if None in [start_date, end_date]: position_dates = GsPortfolioApi.get_position_dates(self.position_source_id) if len(position_dates) == 0: raise MqValueError('Cannot schedule reports for a portfolio with no positions.') if start_date is None: start_date = business_day_offset(min(position_dates) - relativedelta(years=1), -1, roll='forward') \ if backcast else min(position_dates) if end_date is None: end_date = min(position_dates) if backcast else business_day_offset(dt.date.today(), -1, roll='forward') GsReportApi.schedule_report(report_id=self.id, start_date=start_date, end_date=end_date, backcast=backcast)
def open(self): """ Opens the DataGrid in the default browser. :return: None """ if self.id_ is None: raise MqValueError('DataGrid must be created or saved before opening.') domain = GsSession.current.domain.replace(".web", "") if domain == 'https://api.gs.com': domain = 'https://marquee.gs.com' url = f'{domain}/s/markets/grids/{self.id_}' webbrowser.open(url)
def get_thematic_beta(self, basket_identifier: str, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today()) -> pd.DataFrame: if not self.positioned_entity_type == EntityType.ASSET: raise NotImplementedError response = GsAssetApi.resolve_assets(identifier=[basket_identifier], fields=['id', 'type'], limit=1)[basket_identifier] _id, _type = get(response, '0.id'), get(response, '0.type') if len(response) == 0 or _id is None: raise MqValueError(f'Basket could not be found using identifier {basket_identifier}.') if _type not in BasketType.to_list(): raise MqValueError(f'Asset {basket_identifier} of type {_type} is not a Custom or Research Basket.') query = DataQuery(where={'assetId': self.id, 'basketId': _id}, start_date=start, end_date=end) response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.COMPOSITE_THEMATIC_BETAS.value) df = [] for r in response: df.append({'date': r['date'], 'assetId': r['assetId'], 'basketId': r['basketId'], 'thematicBeta': r['beta']}) df = pd.DataFrame(df) return df.set_index('date')
def _convert_pos_set_with_weights(position_set: PositionSet, currency: Currency) -> PositionSet: positions_to_price = [] for position in position_set.positions: if position.weight is None: raise MqValueError( 'If you are uploading a position set with a notional value, every position in that ' 'set must have a weight') if position.quantity is not None: raise MqValueError( 'If you are uploading a position set with a notional value, no position in that ' 'set can have a quantity') positions_to_price.append({ 'assetId': position.asset_id, 'weight': position.weight }) payload = { 'positions': positions_to_price, 'parameters': { 'targetNotional': position_set.reference_notional, 'currency': currency.value, 'pricingDate': position_set.date.strftime('%Y-%m-%d'), 'assetDataSetId': 'GSEOD', 'notionalType': 'Gross' } } try: price_results = GsSession.current._post('/price/positions', payload) except Exception as e: raise MqValueError( 'There was an error pricing your positions. Please try uploading your positions as ' f'quantities instead: {e}') positions = [ Position(identifier=p['assetId'], asset_id=p['assetId'], quantity=p['quantity']) for p in price_results['positions'] ] return PositionSet(date=position_set.date, positions=positions)
def _get_inflation_swap_data( asset: Asset, swap_tenor: str, index_type: str = None, forward_tenor: Optional[GENERIC_DATE] = None, clearing_house: tm_rates._ClearingHouse = None, source: str = None, real_time: bool = False, query_type: QueryType = QueryType.SWAP_RATE) -> pd.DataFrame: if real_time: raise NotImplementedError( 'realtime inflation swap data not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in CURRENCY_TO_INDEX_BENCHMARK.keys(): raise NotImplementedError( 'Data not available for {} inflation swap rates'.format( currency.value)) index_type = _check_inflation_index_type(currency, index_type) clearing_house = tm_rates._check_clearing_house(clearing_house) defaults = _get_inflation_swap_leg_defaults(currency, index_type) if not (tm_rates._is_valid_relative_date_tenor(swap_tenor)): raise MqValueError('invalid swap tenor ' + swap_tenor) forward_tenor = tm_rates._check_forward_tenor(forward_tenor) fixed_rate = 'ATM' kwargs = dict(type='InflationSwap', asset_parameters_termination_date=swap_tenor, asset_parameters_index=defaults['index_type'], asset_parameters_fixed_rate=fixed_rate, asset_parameters_clearing_house=clearing_house.value, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name) rate_mqid = _get_tdapi_inflation_rates_assets(**kwargs) _logger.debug( f'where asset= {rate_mqid}, swap_tenor={swap_tenor}, index={defaults["index_type"]}, ' f'forward_tenor={forward_tenor}, pricing_location={defaults["pricing_location"].value}, ' f'clearing_house={clearing_house.value}, notional_currency={currency.name}' ) q = GsDataApi.build_market_data_query([rate_mqid], query_type, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return df
def from_target(cls, report: TargetReport): if report.type not in [ReportType.Portfolio_Factor_Risk, ReportType.Asset_Factor_Risk]: raise MqValueError('This report is not a factor risk report.') return FactorRiskReport(risk_model_id=report.parameters.risk_model, fx_hedged=report.parameters.fx_hedged, report_id=report.id, position_source_id=report.position_source_id, position_source_type=report.position_source_type, report_type=report.type, latest_end_date=report.latest_end_date, status=report.status, percentage_complete=report.percentage_complete)
def _check_tenor_type(tenor_type: _SwapTenorType) -> _SwapTenorType: if isinstance(tenor_type, str) and tenor_type.upper() in _SwapTenorType.__members__: tenor_type = _SwapTenorType[tenor_type.upper()] if tenor_type is None: return _SwapTenorType.FORWARD_TENOR elif isinstance(tenor_type, _SwapTenorType): return tenor_type else: raise MqValueError('invalid tenor_type: ' + tenor_type + ' choose one among ' + ', '.join([ch.value for ch in _SwapTenorType]))
def __init__(self, id_: str = None, datagrid_id: str = None, datagrid: DataGrid = None, *, entitlements: Union[Entitlements, Entitlements_] = None, dataviz_dict: dict = None): super().__init__(self.__class__.__name__, id_, entitlements=entitlements, dataviz_dict=dataviz_dict) if id_: if not self._viz_response: raise MqValueError( 'Unable to instantiate DataViz. Unable to fetch visualization entity.' ) if len(self._sources) == 1 and self._sources[ 0].type == DataVizSourceType.DATAGRID.value: self.__datagrid = GsDataGridApi.get_datagrid( self._sources[0].id) else: raise MqValueError( 'Unable to instantiate DataViz. Invalid datagrid source or multiple set.' ) elif dataviz_dict: if datagrid: self.__datagrid = datagrid else: raise MqValueError( 'A valid Datagrid is required to initialize DataViz.') else: if datagrid: self.__datagrid = datagrid elif datagrid_id: self.__datagrid = GsDataGridApi.get_datagrid(datagrid_id) else: raise MqValueError( 'There must be a valid Datagrid or DataGrid Id to create a visualization.' )
def save(self) -> str: if self.__initialized: if not (self.__header_id or self.__header_alias) or \ not (self.__template_id or self.__template_alias): raise MqValueError( 'Header and Template ID or alias required to persist component.' ) dataviz_id = super()._save(sources=[ DataVizSource(type=DataVizSourceType.HEADER, id=self.__header_id, alias=self.__header_alias), DataVizSource(type=DataVizSourceType.TEMPLATE, id=self.__template_id, alias=self.__template_alias) ]) return dataviz_id else: raise MqValueError( 'Figure not yet initialized/created. Please create a figure before saving it.' )
def _save(self, sources: List[DataVizSource] = None) -> str: if self._type: try: if sources: self._sources = sources figure_json = self.__as_json() if self._id: # update GsSession.current._put(f'{API}/{self._id}', figure_json, request_headers=HEADERS) else: # create response = GsSession.current._post(f'{API}', figure_json, request_headers=HEADERS) self._id = response['id'] return self._id except Exception as e: raise MqValueError(f'Unable to save DataViz. {e}') else: raise MqValueError( 'Figure not yet initialized or created. Please create a figure before saving it.' )
def __init__(self, start=None, end=None, interval=None): super().__init__() self.__start = start self.__end = end if interval is None: self.__interval = None return if not isinstance(interval, str): raise MqTypeError('interval must be a str') if not re.fullmatch('[1-9]\\d{0,2}[a-z]', interval): raise MqValueError('interval must be a valid str e.g. 1m, 2h, 3d') self.__interval = interval
def backtest_result_from_response(cls, response: dict) -> BacktestResult: if 'RiskData' not in response: raise MqValueError('No risk data received') portfolio = response['Portfolio'] if 'Portfolio' in response else None risks = tuple( BacktestRisk(name=k, timeseries=tuple( FieldValueMap(date=r['date'], value=r['value']) for r in v)) for k, v, in response['RiskData'].items()) return BacktestResult(portfolio=portfolio, risks=risks)
def crosscurrency_swap_rate(asset: Asset, swap_tenor: str, rateoption_type: str = None, forward_tenor: Optional[GENERIC_DATE] = None, clearing_house: tm_rates._ClearingHouse = None, location: PricingLocation = None, *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day Zero Coupon CrossCurrency Swap curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param rateoption_type: benchmark type e.g. LIBOR :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME" :param location: Example - "TKO", "LDN", "NYC" :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if asset.get_type().value == AssetType.Cross.value: pair = asset.name [under, over] = [pair[i:i + 3] for i in range(0, 6, 3)] asset1 = SecurityMaster.get_asset(under, AssetIdentifier.BLOOMBERG_ID) asset2 = SecurityMaster.get_asset(over, AssetIdentifier.BLOOMBERG_ID) elif asset.get_type().value == AssetType.Currency.value: asset1 = asset asset2 = SecurityMaster.get_asset("USD", AssetIdentifier.BLOOMBERG_ID) else: raise MqValueError('Asset type not supported ' + asset.get_type().value) df = _get_crosscurrency_swap_data(asset1=asset1, asset2=asset2, swap_tenor=swap_tenor, rateoption_type=rateoption_type, forward_tenor=forward_tenor, clearing_house=clearing_house, source=source, real_time=real_time, query_type=QueryType.XCCY_SWAP_SPREAD, location=location) series = ExtendedSeries( dtype=float) if df.empty else ExtendedSeries(df['xccySwapSpread']) series.dataset_ids = getattr(df, 'dataset_ids', ()) return series
def _get_factor_data(report_id: str, factor_name: str, query_type: QueryType) -> pd.Series: # Check params report = RiskReport(report_id) if report.get_type() not in [ ReportType.Portfolio_Factor_Risk, ReportType.Asset_Factor_Risk ]: raise MqValueError('This report is not a factor risk report') risk_model_id = report.get_risk_model_id() if factor_name not in ['Factor', 'Specific', 'Total']: if query_type in [QueryType.DAILY_RISK, QueryType.ANNUAL_RISK]: raise MqValueError( 'Please pick a factor name from the following: ["Total", "Factor", "Specific"]' ) factor = Factor(risk_model_id, factor_name) factor_name = factor.name # Extract relevant data for each date col_name = query_type.value.replace(' ', '') col_name = decapitalize(col_name) data_type = decapitalize( col_name[6:]) if col_name.startswith('factor') else col_name factor_data = report.get_factor_data( factor=factor_name, start_date=DataContext.current.start_time, end_date=DataContext.current.end_time) factor_exposures = [{ 'date': data['date'], col_name: data[data_type] } for data in factor_data if data.get(data_type)] # Create and return timeseries df = pd.DataFrame(factor_exposures) if not df.empty: df.set_index('date', inplace=True) df.index = pd.to_datetime(df.index) return _extract_series_from_df(df, query_type)
def __init__(self, risk_model_id: str, factor_name: str): risk_model = FactorRiskModel(risk_model_id) factor_data = risk_model.get_factor_data(format=ReturnFormat.JSON) name_matches = [factor for factor in factor_data if factor['name'] == factor_name] if not name_matches: raise MqValueError(f'Factor with name {factor_name} does not in exist in risk model {risk_model_id}') factor = name_matches.pop() self.__risk_model_id: str = risk_model_id self.__id = factor['identifier'] self.__name: str = factor['name'] self.__type: str = factor['type'] self.__category: str = factor.get('factorCategory')
def get_market_data(cls, query) -> pd.DataFrame: GsSession.current: GsSession body = GsSession.current._post('/data/markets', payload=query) container = body['responses'][0]['queryResponse'][0] if 'errorMessages' in container: raise MqValueError(f"market data request {body['requestId']} failed: {container['errorMessages']}") if 'response' not in container: df = MarketDataResponseFrame() else: df = MarketDataResponseFrame(container['response']['data']) df.set_index('date' if 'date' in df.columns else 'time', inplace=True) df.index = pd.to_datetime(df.index) df.dataset_ids = tuple(container.get('dataSetIds', ())) return df
def result(self): """ :return: a Pandas DataFrame containing the results of the report job """ status = self.status() if status == ReportStatus.cancelled: raise MqValueError('This report job in status "cancelled". Cannot retrieve results.') if status == ReportStatus.error: raise MqValueError('This report job is in status "error". Cannot retrieve results.') if status != ReportStatus.done: raise MqValueError('This report job is not done. Cannot retrieve results.') if self.__report_type in [ReportType.Portfolio_Factor_Risk, ReportType.Asset_Factor_Risk]: results = GsReportApi.get_factor_risk_report_results(risk_report_id=self.__report_id, start_date=self.__start_date, end_date=self.__end_date) return pd.DataFrame(results) if self.__report_type == ReportType.Portfolio_Performance_Analytics: query = DataQuery(where={'reportId': self.__report_id}, start_date=self.__start_date, end_date=self.__end_date) results = GsDataApi.query_data(query=query, dataset_id=ReportDataset.PPA_DATASET.value) return pd.DataFrame(results) return None
def __resolve_identifiers(identifiers: List[str], date: datetime.date) -> Dict: response = GsAssetApi.resolve_assets( identifier=identifiers, fields=['name', 'id'], limit=1, as_of=date ) try: id_map = dict(zip(response.keys(), [dict(id=asset[0]['id'], name=asset[0]['name']) for asset in response.values()])) except IndexError: unmapped_assets = {_id for _id, asset in response.items() if not asset} raise MqValueError(f'Error in resolving the following identifiers: {unmapped_assets}') return id_map
def convert_positions_to_priceables(self) -> dict: position_sets = self.get_position_sets( ) if self.id else self.position_sets date_to_priceables_map = {} for pos_set in position_sets: date = pos_set.date priceables = GsAssetApi.get_instruments_for_positions( [pos.to_target() for pos in pos_set.positions]) if None in priceables: raise MqValueError( 'All positions on {} could not be successfully resolved into instruments' ) date_to_priceables_map[date] = priceables return date_to_priceables_map