def test_enum(): education = Education(school="My Alma Mater", degree=Degree.MASTERS) # to_dict() => convert model to dictionary d = to_dict(education) assert d == { 'school': 'My Alma Mater', 'degree': Degree.MASTERS.value, 'field_of_study': None, 'from_year': None, 'to_year': None } # now load from dictionary... new_education = Education(**d) assert to_dict(new_education) == d assert new_education == education d = to_dict(new_education) assert d == { 'school': 'My Alma Mater', 'degree': Degree.MASTERS.value, 'field_of_study': None, 'from_year': None, 'to_year': None }
async def do_fetch(self, step_state): fetch = step_state.get_fetch() get_logger().debug("fetch request", **related.to_dict(fetch)) kw = fetch.get_kwargs(is_aiohttp=True) try: method, url = fetch.method, fetch.url async with self.http.request(method, url, **kw) as context: response = await self.get_response(context) status = context.status except Exception as e: # pragma: no cover get_logger().error( "do_fetch exception", error=e, **related.to_dict(fetch) ) response = "Error" status = 500 get_logger().debug( "fetch response", response=response, status=status, **related.to_dict(fetch) ) return response, status
def test_person_with_address(): address = Address(street="123 Main Street", city="Springfield", zipcode="12345") assert repr(address) == "Address(street='123 Main Street', " \ "city='Springfield', zipcode='12345', " \ "street_two=None)" # repr = False for address person = Person(name="Jim", address=address) assert repr(person) == "Person(name='Jim', age=None)" # to_dict() => convert model to dictionary d = to_dict(person) assert d == { 'address': { 'city': 'Springfield', 'street': '123 Main Street', 'street_two': None, 'zipcode': '12345' }, 'education': [], 'age': None, 'name': 'Jim' } # now load from dictionary... new_person = Person(**d) assert to_dict(new_person) == d assert new_person == person
def do_fetch(self, step_state): fetch = step_state.get_fetch() get_logger().debug("fetch request", **related.to_dict(fetch)) kw = fetch.get_kwargs(is_aiohttp=False) context = requests.request(fetch.method, fetch.url, **kw) response = self.get_response(context) status = context.status_code get_logger().debug("fetch response", response=response, status=status, **related.to_dict(fetch)) return response, status
def get_headers(self, content_type=None): headers = {} # right now if content_type: headers[const.CONTENT_TYPE] = content_type headers.update(related.to_dict(self.suite.headers) or {}) if self.case: headers.update(related.to_dict(self.case.headers) or {}) headers.update(related.to_dict(self.request.headers) or {}) return Namespace(headers).evaluate(self.namespace)
def _upgrade_2019_09_05_specification(specification): from flask_potion import fields from web.server.api.aqt.api_models import GranularityResource granularity_converter = fields.ToOne(GranularityResource).converter for query in list(specification['queries'].values()): is_advanced_query_item = query['isAdvancedQueryItem'] settings = specification['settings'][query['settingId']] grouping_settings_map = settings['groupBySettings']['groupings'] if is_advanced_query_item: # find the time granularity grouping (if there is one). There # can only be up to one. granularity = None for aqt_group in query['advancedGroups']: if aqt_group['type'] == 'GRANULARITY': granularity = granularity_converter(aqt_group['item']) break if granularity: for group_settings in list(grouping_settings_map.values()): # check if a group setting has an invalid displayValueFormat if (group_settings['id'] == 'timestamp' and group_settings['displayValueFormat'] == 'DEFAULT'): group_settings['displayValueFormat'] = granularity.id break upgraded_specification = related.to_model(DashboardSpecification_20190911, specification) upgraded_specification.version = VERSION_2019_09_11 return related.to_dict(upgraded_specification)
def test_renamed(): obj = MyModel( is_for="Elise", criss="A", cross="B", is_not=True, is_list=["a", "b", "c"], is_dict={5: MyChild(my_int=5, my_uuid=EXAMPLE_UUID, my_float=3.14)}, is_type=DataType.OBJECT, is_enum=IntEnum.a, ) d = related.to_dict(obj) assert related.to_model(MyModel, d) == obj d.pop("dict") assert d == { "for": "Elise", "criss": "B", "cross": "A", "not": True, 'list': ['a', 'b', 'c'], "type": "object", "enum": 1 }
def _upgrade_2019_04_11_specification(specification): old_specification = DashboardSpecification_20190411( specification).serialize() upgraded_specification = related.to_model(DashboardSpecification_20190524, old_specification) upgraded_specification.version = VERSION_2019_05_24 return related.to_dict(upgraded_specification)
def update_all(self, items): '''Modify alert' ''' json_obj = [to_dict(item) for item in items] bulk_url = '%s%s' % (self._uri, '/bulk') return self.patch(bulk_url, json=json_obj)
def train_detectors_for_metric_configs(metric_configs): exit_code = 0 for metric_config in metric_configs: metric = Metric(related.to_dict(metric_config), metric_config.datasource) try: updated_detectors = [] for detector in metric.detectors: if detector.needs_training: detector.train(data=metric.query(), metric_type=metric.config["type"]) updated_detector = metric._detector_client.update_detector( detector) updated_detectors.append(updated_detector) logging.info( f"Trained '{detector.type}' detector with UUID: {detector.uuid}" ) else: logging.info( f"Training not required for '{detector.type}' detector with UUID: {detector.uuid}" ) except AdaptiveAlertingDetectorBuildError as e: logging.error( f"Unable to train detector for metric '{metric_config.name}', {e.msg}! Skipping!" ) exit_code = 0 except Exception as e: logging.exception( f"Exception {e.__class__.__name__} while training detector(s) for metric {metric_config.name}! Skipping!" ) trace = traceback.format_exc() logging.debug(f"Traceback: {trace}") exit_code = 1 return exit_code
def test_dictionary(company): company_dict = to_dict(company) assert company_dict['name'] == 'Acme Inc.' assert isinstance(company_dict['uuid'], string_types) assert isinstance(company_dict['url'], string_types) assert company_dict['established'] == "01/02/1903" assert company_dict['closed'] == "1904-05-06"
def build_detectors_for_metric_configs(metric_configs): exit_code = 0 for metric_config in metric_configs: metric = Metric(related.to_dict(metric_config), metric_config.datasource) try: new_detectors = metric.build_detectors() for detector in new_detectors: logging.info( f"New '{detector.type}' detector created with UUID: {detector.uuid}" ) if not new_detectors: logging.info( f"No detectors built for metric '{metric_config.name}'") except AdaptiveAlertingDetectorBuildError as e: logging.warning( f"Unable to train detector for metric '{metric_config.name}', {e.msg}! Skipping!" ) except Exception as e: logging.exception( f"Exception {e.__class__.__name__} while creating detector for metric {metric_config.name}! Skipping!" ) trace = traceback.format_exc() logging.debug(f"Traceback: {trace}") exit_code = 1 return exit_code
def test_person(): person = Person(name="Bob") assert repr(person) == "Person(name='Bob', age=None)" person = Person(name="Bill", age=40) assert repr(person) == "Person(name='Bill', age=40)" person = Person(name="John", age="50") assert repr(person) == "Person(name='John', age=50)" d = to_dict(person) assert d == {'address': None, 'education': [], 'age': 50, 'name': 'John'} new_person = Person(**d) assert to_dict(new_person) == d assert new_person == person
def test_person_with_education_sequence(): person = Person(name="Brainy", education=[ Education(school="School 2", degree=Degree.MASTERS), Education(school="School 1", degree=Degree.BACHELORS), ]) # to_dict() => convert model to dictionary d = to_dict(person) assert d == { 'address': None, 'age': None, 'education': [{ 'degree': Degree.MASTERS.value, 'field_of_study': None, 'from_year': None, 'school': 'School 2', 'to_year': None }, { 'degree': Degree.BACHELORS.value, 'field_of_study': None, 'from_year': None, 'school': 'School 1', 'to_year': None }], 'name': 'Brainy' } # now load from dictionary... new_person = Person(**d) assert new_person.education == person.education assert new_person == person
def save_metric_detector_mapping(self, detector_uuid, metric): metric_detector_mapping = build_metric_detector_mapping(detector_uuid, metric) create_metric_detector_mapping = requests.post( f"{self._url}/api/detectorMappings", json=related.to_dict(metric_detector_mapping), timeout=30, ) create_metric_detector_mapping.raise_for_status()
def diff_metric_configs(previous_metric_configs, current_metric_configs): diff = {"added": [], "modified": [], "deleted": []} previous_metric_config_keys = [i.tag_key for i in previous_metric_configs] previous_metric_json_configs = [ related.to_json(i) for i in previous_metric_configs ] current_metric_config_keys = [i.tag_key for i in current_metric_configs] for current_metric_config in current_metric_configs: if current_metric_config.tag_key not in previous_metric_config_keys: diff["added"].append(related.to_dict(current_metric_config)) elif related.to_json( current_metric_config) not in previous_metric_json_configs: diff["modified"].append(related.to_dict(current_metric_config)) for previous_metric_config in previous_metric_configs: if previous_metric_config.tag_key not in current_metric_config_keys: diff["deleted"].append(related.to_dict(previous_metric_config)) return diff
def update(self, alert_notif): '''Modify alert' ''' notif_json = to_dict(alert_notif) # NOTE(toshi): $uri isn't part of schema notif_json.pop('$uri') url = '%s%s' % (self.base_uri, alert_notif.uri) return self.patch(url, json=notif_json)
def _upgrade_2019_08_26_specification(specification): specification_keys_to_keep = [ 'options', 'dateRanges', 'filters', 'items', 'queries', 'settings', 'version', ] query_keys_to_move = [ 'type', 'customFields', 'filterModalSelections', 'frontendSelectionsFilter', 'isAdvancedQueryItem', 'settingId', ] # Any query object that is not linked to a layout item should be removed queries = {} for item_id in specification['items']: item = specification['items'][item_id] size = specification['sizes'][item['sizeId']] layout_metadata = { 'upperX': item['upperX'], 'upperY': item['upperY'], 'rows': size['rows'], 'columns': size['columns'], 'isLocked': item['isLocked'], } query_id = item['queryId'] new_item = { 'id': item_id, 'name': item['name'], 'layoutMetadata': layout_metadata, } query = specification['queries'][query_id] for key in query_keys_to_move: query[key] = item[key] query['itemId'] = item_id specification['items'][item_id] = new_item queries[query_id] = query new_specification = { key: specification[key] for key in specification_keys_to_keep } new_specification['queries'] = queries new_specification['text_elements'] = {} upgraded_specification = related.to_model(DashboardSpecification_20190905, new_specification) upgraded_specification.version = VERSION_2019_09_05 return related.to_dict(upgraded_specification)
def add_all(self, items): '''Add all alert notifications. ''' json_obj = [to_dict(item) for item in items] # NOTE(toshi): We have to remove $uri # pylint: disable=W0106 [item.pop('$uri') for item in json_obj] bulk_url = '%s%s' % (self._uri, '/bulk') return self.post(bulk_url, json=json_obj)
def load(cls, paths): """Find rigor.yml file and load it into a Config object.""" file_path = cls.find_file_path(paths) if file_path and os.path.exists(file_path): content = open(file_path).read() config = cls.loads(content, file_path) get_logger().info("config file", file_path=file_path) get_logger().debug("config details", **related.to_dict(config)) else: config = cls() get_logger().info("no config file not found", paths=paths) return config
def format_and_upgrade_specification(dashboard_specification): try: final_specification = convert_and_upgrade_specification( dashboard_specification) return related.to_dict(final_specification) except ValueError as e: logger = g.request_logger if hasattr(g, 'request_logger') else LOG title = get_dashboard_title(dashboard_specification) message = (u'Could not load specification for Dashboard \'{title}\'. ' 'Error was: {error}.').format(title=title, error=e) error = ValidationResult(message, ValidationFault.MALFORMED_SPECIFICATION) logger.warning(message) logger.warning(e) raise BadDashboardSpecification([error])
def add_query_to_custom_dashboard( raw_specification, view_type, query_selections, query_result_spec, is_advanced_query=False, ): # TODO(vedant) - We will need to actually have a merge function in the # server-side Dashboard Model. This is in the event that we end up # overwriting IDs in the input specification. Since ids are # a concatenation of UUID and type, it is EXTREMELY unlikely that we will # ever run into this scenario but we will need to eventually take it into # account. # First, create a brand new dashboard spec using only this query. # This will create any top-level dictionaries that we need (e.g. for settings, # sizes, items, etc.). From there we just merge these things into the # specification to update. converted_specification = convert_query_to_dashboard( view_type, query_selections, query_result_spec, is_advanced_query) # the input specification may have been from an older version, so we # need to make sure it's upgraded to the latest dashboard spec version raw_specification = upgrade_dashboard_specification(raw_specification) updated_specification = related.to_model(DashboardSpecification, raw_specification) # set the new coordinates for the newest item we just created (upper_x, upper_y) = get_coordinates_for_new_chart(updated_specification) new_item = list(converted_specification.items.values())[0] new_item.layout_metadata.upper_x = upper_x new_item.layout_metadata.upper_y = upper_y # get all the top-level dictionaries from the specification to update date_ranges = updated_specification.date_ranges items = updated_specification.items queries = updated_specification.queries settings = updated_specification.settings filters = updated_specification.filters # merge in all the top-level dictionaries from the specification we just # created using the query the user submitted items.update(converted_specification.items) queries.update(converted_specification.queries) settings.update(converted_specification.settings) date_ranges.update(converted_specification.date_ranges) filters.update(converted_specification.filters) return related.to_dict(updated_specification)
def update_detector(self, detector): """ * The service requires 'type' on update, but we treat it as immutable. * If values for 'detector_config', 'enabled', or 'trusted' are not passed, the current value is used. """ update_request = related.to_dict(detector) del update_request["training_interval"] del update_request["lastUpdateTimestamp"] del update_request["createdBy"] del update_request["meta"] response = requests.put( f"{self._url}/api/v2/detectors?uuid={detector.uuid}", json=update_request, timeout=30, ) response.raise_for_status() return self.get_detector(detector.uuid)
def _upgrade_2019_09_11_specification(specification): specification_keys_to_keep = [ 'options', 'dateRanges', 'filters', 'items', 'queries', 'settings', 'version', ] new_specification = { key: specification[key] for key in specification_keys_to_keep } new_specification['textItems'] = {} upgraded_specification = related.to_model(DashboardSpecification_20190918, new_specification) upgraded_specification.version = VERSION_2019_09_18 return related.to_dict(upgraded_specification)
def create(cls, step_result, scenario_result): if step_result is None: output = related.to_dict(scenario_result.scenario) output['__file__'] = scenario_result.case.file_path return cls(keyword="", line=3, name="Scenario Setup", doc_string=DocString.section("SCENARIO", output), match=Match(), result=StatusResult.create(True, 0)) else: return cls( keyword="", line=3, name=step_result.step.description, doc_string=DocString.create(step_result), match=Match.create(step_result.step), result=StatusResult.create(step_result.success, step_result.duration), )
def _upgrade_2019_08_23_specification(specification): from config.ui import FILTER_ORDER, DEFAULT_DATE_PICKER_TYPE, DEFAULT_FILTER_OPTIONS old_options = specification['options'] new_options = { 'columnCount': old_options['columnCount'], 'title': old_options['title'], 'filterPanelSettings': { 'showDashboardFilterButton': old_options['showDashboardFilterButton'], 'datePickerType': DEFAULT_DATE_PICKER_TYPE, 'filterPanelComponents': DEFAULT_FILTER_OPTIONS, 'enabledFilters': FILTER_ORDER, }, } specification['options'] = new_options # NOTE(pablo): there was a bug in the 2019-05-24 upgrade where some default # grouping items were not created. This fixes it for any dashboards that # were already upgraded after 2019-06-10: for layout_item in list(specification['items'].values()): settings = specification['settings'][layout_item['settingId']] groupings = settings['groupBySettings']['groupings'] has_string_dimension = False for grouping_obj in groupings.values(): if grouping_obj['type'] == 'STRING': has_string_dimension = True break if not has_string_dimension: groupings['nation'] = { 'id': 'nation', 'type': 'STRING', 'displayValueFormat': 'DEFAULT', 'label': None, } upgraded_specification = related.to_model(DashboardSpecification_20190826, specification) upgraded_specification.version = VERSION_2019_08_26 return related.to_dict(upgraded_specification)
def disable_detectors_for_metric_configs(metric_configs): exit_code = 0 for metric_config in metric_configs: metric = Metric(related.to_dict(metric_config), metric_config.datasource) try: disabled_detectors = metric.disable_detectors() for detector in disabled_detectors: logging.info( f"Detector/Detector Mapping with UUID '{detector.uuid}' disabled." ) if not disabled_detectors: logging.info( f"No detectors to disable for metric '{metric_config.name}'" ) except Exception as e: logging.exception( f"Exception {e.__class__.__name__} while disabling detectors for metric {metric_config.name}! Skipping!" ) trace = traceback.format_exc() logging.debug(f"Traceback: {trace}") exit_code = 1 return exit_code
def create_detector(self, detector): detector.created_by = self._user create_detector_request = related.to_dict(detector, suppress_empty_values=True) create_detector_response = requests.post( f"{self._url}/api/v2/detectors", json=create_detector_request, timeout=30 ) create_detector_response.raise_for_status() detector_uuid = create_detector_response.text create_detector_start = datetime.datetime.now() create_detector_elapsed = create_detector_start - datetime.datetime.now() detector = None while ( not detector and create_detector_elapsed.total_seconds() < CREATE_DETECTOR_TIMEOUT ): time.sleep(1.0) detector = self.get_detector(detector_uuid) create_detector_elapsed = datetime.datetime.now() - create_detector_start if detector: return detector else: raise DetectorBuilderClientError( f"Timeout waiting for detector uuid '{detector_uuid}' to be available from model service." )
def as_dict(self): kwargs = related.to_dict(self) kwargs.pop("profiles", None) kwargs.pop("file_path", None) return kwargs
def test_yaml(company): company_yaml = to_yaml(to_dict(company)) assert ("uuid: %s" % company.uuid) in company_yaml assert ("url: %s" % company.url.geturl()) in company_yaml