def run_tests_from_graph(self, test_schemas, test_data): linker = self.deserialize_graph() compiled_models = [ make_compiled_model(fqn, linker.get_node(fqn)) for fqn in linker.nodes() ] schema_name = self.target.schema print("Connecting to redshift") try: self.schema.create_schema_if_not_exists(schema_name) except psycopg2.OperationalError as e: print( "ERROR: Could not connect to the target database. Try `dbt debug` for more information" ) print(str(e)) sys.exit(1) test_runner = TestRunner() if test_schemas: schema_tests = [ m for m in compiled_models if m.is_test_type(test_runner.test_schema_type) ] else: schema_tests = [] if test_data: data_tests = [ m for m in compiled_models if m.is_test_type(test_runner.test_data_type) ] else: data_tests = [] all_tests = schema_tests + data_tests for m in all_tests: if m.should_execute(): context = self.context.copy() context.update(m.context()) m.compile(context) dep_list = [schema_tests, data_tests] on_failure = self.on_model_failure(linker, all_tests) results = self.execute_models(test_runner, dep_list, on_failure) return results
def run_from_graph(self, runner, limit_to): logger.info("Loading dependency graph file") linker = self.deserialize_graph() compiled_models = [ make_compiled_model(fqn, linker.get_node(fqn)) for fqn in linker.nodes() ] relevant_compiled_models = [ m for m in compiled_models if m.is_type(runner.run_type) ] for m in relevant_compiled_models: if m.should_execute(self.args, existing=[]): context = self.context.copy() context.update(m.context()) m.compile(context) schema_name = self.target.schema logger.info("Connecting to redshift") try: self.schema.create_schema_if_not_exists(schema_name) except psycopg2.OperationalError as e: logger.info("ERROR: Could not connect to the target database. Try" "`dbt debug` for more information") logger.info(str(e)) sys.exit(1) existing = self.schema.query_for_existing(schema_name) if limit_to is None: specified_models = None else: specified_models = [ find_model_by_name(relevant_compiled_models, name).fqn for name in limit_to ] model_dependency_list = self.as_concurrent_dep_list( linker, relevant_compiled_models, existing, self.target, specified_models) on_failure = self.on_model_failure(linker, relevant_compiled_models) results = self.execute_models(runner, model_dependency_list, on_failure) return results
def get_compiled_models(self, linker, nodes, node_type): compiled_models = [] for fqn in nodes: compiled_model = make_compiled_model(fqn, linker.get_node(fqn)) if not compiled_model.is_type(node_type): continue if not compiled_model.should_execute(self.args, self.existing_models): continue context = self.context.copy() context.update(compiled_model.context()) profile = self.project.run_environment() compiled_model.compile(context, profile, self.existing_models) compiled_models.append(compiled_model) return compiled_models