def test_query_all_aliases(self): """Tests query for all aliases""" def rhs(model, attr): # pylint:disable=missing-docstring attr = getattr(model, attr, None) if attr is not None and hasattr(attr, "_query_clause_element"): # pylint:disable=protected-access class_name = attr._query_clause_element().type.__class__.__name__ if class_name == "Boolean": return "1" return "1/1/2015" def data(model, attr, field): return [{ "object_name": model.__name__, "fields": "all", "filters": { "expression": define_op_expr(field.lower(), "=", rhs(model, attr)), } }] failed = set() for model in set(get_exportables().values()): # pylint:disable=protected-access for attr, field in AttributeInfo(model)._aliases.items(): if field is None: continue try: field = field["display_name"] if isinstance(field, dict) else field res = self.export_csv(data(model, attr, field)) self.assertEqual(res.status_code, 200) except Exception as err: # pylint:disable=broad-except failed.add((model, attr, field, err)) self.assertEqual(sorted(failed), [])
def test_importable_only(self): """Test importable only objects not exists in exportables""" result_importables = set(get_importables().keys()) result_exportables = set(get_exportables().keys()) importable_only = set(GGRC_IMPORTABLE_ONLY.keys()) self.assertTrue(importable_only.issubset(result_importables)) self.assertFalse(importable_only.issubset(result_exportables))
def __init__(self, ie_job): self.new_objects = defaultdict(structures.CaseInsensitiveDict) self.shared_state = {} self.response_data = [] self.cache_manager = cache_utils.get_cache_manager() self.ie_job = ie_job self.exportable = get_exportables()
def __init__(self, query, ca_disabled=False): importable = get_exportables() self.object_map = {o.__name__: o for o in importable.values()} self.query = self._clean_query(query) self.ca_disabled = ca_disabled self._set_attr_name_map() self._count = 0
def parse_item(self): """Parse object type field for assessment templates.""" exportables = converters.get_exportables() object_type = exportables.get(self.raw_value.strip().lower()) if not object_type: self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name) return return object_type.__name__
def __init__(self, row_converter, key, **options): self.key = key exportable = get_exportables() self.attr_name = options.get("attr_name", "") self.mapping_object = exportable.get(self.attr_name) self.new_slugs = row_converter.block_converter.converter.new_objects[ self.mapping_object] self.unmap = self.key.startswith(AttributeInfo.UNMAPPING_PREFIX) super(MappingColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self): """Parse object type field for assessment templates.""" exportables = converters.get_exportables() object_type = exportables.get(self.raw_value.lower()) if not object_type: self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name) return return object_type.__name__
def __init__(self, **kwargs): self.dry_run = kwargs.get("dry_run", True) self.csv_data = kwargs.get("csv_data", []) self.ids_by_type = kwargs.get("ids_by_type", []) self.block_converters = [] self.new_objects = defaultdict(structures.CaseInsensitiveDict) self.shared_state = {} self.response_data = [] self.exportable = get_exportables() self.indexer = get_indexer()
def count_objects(csv_data): """Count objects in csv data. Collect errors info.""" def get_info(name, rows, **error): """Create new info""" info = { "name": name, "rows": rows, "created": 0, "updated": 0, "ignored": 0, "deleted": 0, "deprecated": 0, "block_warnings": [], "block_errors": [], "row_warnings": [], "row_errors": [], } if error: if 'snapshot' in name.lower(): info["block_errors"].append( errors.SNAPSHOT_IMPORT_ERROR.format(**error)) else: info["block_errors"].append( errors.WRONG_OBJECT_TYPE.format(**error)) return info exportables = get_exportables() offsets_and_data_blocks = split_blocks(csv_data) blocks_info = [] failed = False counts = {} for offset, data, _ in offsets_and_data_blocks: class_name = data[1][0].strip().lower() object_class = exportables.get(class_name, "") rows = len(data) - 2 if 'snapshot' in class_name: blocks_info.append( get_info(class_name.title(), rows, line=offset + 2, object_name=class_name)) failed = True elif object_class: object_name = object_class.__name__ blocks_info.append(get_info(object_name, rows)) counts[object_name] = counts.get(object_name, 0) + rows else: blocks_info.append( get_info("", rows, line=offset + 2, object_name=class_name)) failed = True return counts, blocks_info, failed
def test_object_column_handlers(self): """Test column handlers on all exportable objects. This function makes sure that we don't use get wrong hadlers when fetching object column definitions. If a column has a specified handler_key then the appropriate handler must override the default handler for the column with the same name. Raises: AssertionError if any unexpected colum handlers are found. """ def test_single_object(obj): """Test colum handlers for a single object. Args: obj: sqlachemy model. Raises: AssertionError if object definition contains the wrong handler. """ handlers = column_handlers.COLUMN_HANDLERS column_definitions = import_helper.get_object_column_definitions(obj) for key, value in column_definitions.items(): if key in handlers: handler_key = value.get("handler_key", key) self.assertEqual( value["handler"], handlers[handler_key], "Object '{}', column '{}': expected {}, found {}".format( obj.__name__, key, handlers[key].__name__, value["handler"].__name__, ) ) verification_errors = [] for obj in set(converters.get_exportables().itervalues()): try: test_single_object(obj) except AssertionError as error: verification_errors.append(str(error)) verification_errors.sort() self.assertEqual(verification_errors, [])
def count_objects(csv_data): """Count objects in csv data. Collect errors info.""" def get_info(name, rows, **error): """Create new info""" info = { "name": name, "rows": rows, "created": 0, "updated": 0, "ignored": 0, "deleted": 0, "deprecated": 0, "block_warnings": [], "block_errors": [], "row_warnings": [], "row_errors": [], } if error: info["block_errors"].append(errors.WRONG_OBJECT_TYPE.format(**error)) return info exportables = get_exportables() offsets, data_blocks = split_array(csv_data) blocks_info = [] failed = False counts = {} for offset, data in zip(offsets, data_blocks): if len(data) < 2: continue # empty block class_name = data[1][0].strip().lower() object_class = exportables.get(class_name, "") rows = len(data) - 2 if object_class: object_name = object_class.__name__ blocks_info.append(get_info(object_name, rows)) counts[object_name] = counts.get(object_name, 0) + rows else: blocks_info.append(get_info("", rows, line=offset + 2, object_name=class_name)) failed = True return counts, blocks_info, failed
def count_objects(csv_data): """Count objects in csv data. Collect errors info.""" def get_info(name, rows, **error): """Create new info""" info = { "name": name, "rows": rows, "created": 0, "updated": 0, "ignored": 0, "deleted": 0, "deprecated": 0, "block_warnings": [], "block_errors": [], "row_warnings": [], "row_errors": [], } if error: info["block_errors"].append(errors.WRONG_OBJECT_TYPE.format(**error)) return info exportables = get_exportables() offsets_and_data_blocks = split_blocks(csv_data) blocks_info = [] failed = False counts = {} for offset, data, _ in offsets_and_data_blocks: class_name = data[1][0].strip().lower() object_class = exportables.get(class_name, "") rows = len(data) - 2 if object_class: object_name = object_class.__name__ blocks_info.append(get_info(object_name, rows)) counts[object_name] = counts.get(object_name, 0) + rows else: blocks_info.append(get_info("", rows, line=offset + 2, object_name=class_name)) failed = True return counts, blocks_info, failed
def __init__(self): self.new_objects = defaultdict(structures.CaseInsensitiveDict) self.shared_state = {} self.response_data = [] self.exportable = get_exportables()
from ggrc.converters import get_exportables from ggrc.converters import import_helper from ggrc.gdrive import file_actions as fa from ggrc.models import all_models from ggrc.models import background_task from ggrc.models import exceptions as models_exceptions from ggrc.models import import_export from ggrc.notifications import job_emails from ggrc.query import builder from ggrc.query import exceptions as query_exceptions from ggrc.utils import benchmark from ggrc.utils import errors as app_errors EXPORTABLES_MAP = {exportable.__name__: exportable for exportable in get_exportables().values()} IGNORE_FIELD_IN_TEMPLATE = { "Assessment": {"evidences_file", "end_date"}, "Audit": {"evidences_file"}, } # pylint: disable=invalid-name logger = getLogger(__name__) def check_required_headers(required_headers): """Check required headers to the current request""" headers_errors = [] for header, valid_values in required_headers.items():
def get_exportables(): """Get all exportables models except snapshot""" exportables = set(converters.get_exportables().values()) exportables.discard(all_models.Snapshot) return exportables