def import_model(path): # We need to use both: the metadata and the created model, as we do not # want to reproduce the model creation here global MODEL logger = get_logger() logger.setLevel("INFO") logger.info("importing model from %s" % path) metadata = read_model_metadata(path) cube_list = metadata.pop("cubes", []) for i, cube in enumerate(cube_list): cube_id = i + 1 cube["id"] = cube_id CUBES[str(cube_id)] = cube dim_list = metadata.pop("dimensions", []) for i, dim in enumerate(dim_list): dim = fix_dimension_metadata(dim) dim_id = i + 1 dim["id"] = dim_id DIMENSIONS[str(dim_id)] = dim MODEL = metadata
def validate(show_defaults, show_warnings, model_path): """Validate model metadata""" click.echo("Reading model %s" % model_path) model = cubes.read_model_metadata(model_path) click.echo("Validating model...") result = cubes.providers.validate_model(model) error_count = 0 warning_count = 0 default_count = 0 for error in result: if error.scope == "model": scope = "model" else: if error.object: scope = "%s '%s'" % (error.scope, error.object) else: scope = "unknown %s" % error.scope if error.property: scope += " property '%s'" % error.property show = True if error.severity == "error": error_count += 1 elif error.severity == "warning": warning_count += 1 show = show_warnings elif error.severity == "default": show = show_defaults default_count += 1 if show: print("%s in %s: %s" % (error.severity.upper(), scope, error.message)) if error_count == 0: if warning_count == 0: if default_count == 0: verdict = "model can be used" else: verdict = "model can be used, " \ "make sure that the defaults reflect reality" else: verdict = "not recommended to use the model, " \ "some issues might emerge" else: verdict = "model can not be used" print("") print("Defaults used %d" % default_count) print("Warning %d" % warning_count) print("Errors %d" % error_count) print("Summary %s" % verdict) if error_count > 0: exit(1)
def test_from_file(self): path = self.model_path("model.json") desc = cubes.read_model_metadata(path) self.assertIsInstance(desc, dict) self.assertTrue("cubes" in desc) self.assertTrue("dimensions" in desc) self.assertEqual(1, len(desc["cubes"])) self.assertEqual(6, len(desc["dimensions"]))
def test_from_file(self): path = self.model_path("model.json") desc = read_model_metadata(path) self.assertIsInstance(desc, dict) self.assertTrue("cubes" in desc) self.assertTrue("dimensions" in desc) self.assertEqual(1, len(desc["cubes"])) self.assertEqual(6, len(desc["dimensions"]))
def test_from_bundle(self): path = self.model_path("test.cubesmodel") desc = cubes.read_model_metadata(path) self.assertIsInstance(desc, dict) self.assertTrue("cubes" in desc) self.assertTrue("dimensions" in desc) self.assertEqual(1, len(desc["cubes"])) self.assertEqual(6, len(desc["dimensions"])) with self.assertRaises(ArgumentError): path = self.model_path("model.json") desc = cubes.read_model_metadata_bundle(path)
def test_from_bundle(self): path = self.model_path("test.cubesmodel") desc = read_model_metadata(path) self.assertIsInstance(desc, dict) self.assertTrue("cubes" in desc) self.assertTrue("dimensions" in desc) self.assertEqual(1, len(desc["cubes"])) self.assertEqual(6, len(desc["dimensions"])) with self.assertRaises(ArgumentError): path = self.model_path("model.json") desc = read_model_metadata_bundle(path)
def import_model(path): # We need to use both: the metadata and the created model, as we do not # want to reproduce the model creation here global MODEL cube_id_sequence = count(1) dimension_id_sequence = count(1) logger = get_logger() logger.setLevel("INFO") logger.info("importing model from %s" % path) metadata = read_model_metadata(path) cube_list = metadata.pop("cubes", []) for i, cube in enumerate(cube_list): cube_id = cube_id_sequence.next() cube["id"] = cube_id CUBES[str(cube_id)] = cube dim_list = metadata.pop("dimensions", []) for i, dim in enumerate(dim_list): dim = expand_dimension_metadata(dim) dim_id = dimension_id_sequence.next() dim["id"] = dim_id DIMENSIONS[str(dim_id)] = dim MODEL = metadata # Convert joins (of known types) # TODO: currently we assume that all JOINS are SQL joins as we have no way # to determine actual store and therefore the backend used for # interpreting this model joins = metadata.pop("joins", []) for join in joins: if "detail" in join: join["detail"] = _fix_sql_join_value(join["detail"]) if "master" in join: join["master"] = _fix_sql_join_value(join["master"]) join["__type__"] = "sql" MODEL["joins"] = joins