def _get_adapter(self, tmpdir): project_path = os.path.join(tmpdir, 'project') args = parse_args([ 'compile', '--profile', 'dbt-pytest', '--target', 'default', '--project-dir', project_path, '--profiles-dir', tmpdir, '--vars', yaml.safe_dump(self._base_vars()), ]) with open(os.path.join(args.profiles_dir, 'profiles.yml')) as fp: data = yaml.safe_load(fp) try: profile = data[args.profile] except KeyError: raise ValueError(f'profile {args.profile} not found') try: outputs = profile['outputs'] except KeyError: raise ValueError(f'malformed profile {args.profile}') try: target = outputs[args.target] except KeyError: raise ValueError( f'target {args.target} not found in {args.profile}' ) try: adapter_type = target['type'] except KeyError: raise ValueError( f'target {args.target} in {args.profile} has no type') _ = FACTORY.load_plugin(adapter_type) config = RuntimeConfig.from_args(args) FACTORY.register_adapter(config) adapter = FACTORY.lookup_adapter(config.credentials.type) return adapter
def invoke_dbt(parsed): task = None cfg = None try: if parsed.which in {'deps', 'clean'}: # deps doesn't need a profile, so don't require one. cfg = Project.from_current_directory() elif parsed.which != 'debug': # for debug, we will attempt to load the various configurations as # part of the task, so just leave cfg=None. cfg = RuntimeConfig.from_args(parsed) except DbtProjectError as e: logger.info("Encountered an error while reading the project:") logger.info(dbt.compat.to_string(e)) all_profiles = read_profiles(parsed.profiles_dir).keys() if len(all_profiles) > 0: logger.info("Defined profiles:") for profile in all_profiles: logger.info(" - {}".format(profile)) else: logger.info("There are no profiles defined in your " "profiles.yml file") logger.info(PROFILES_HELP_MESSAGE) dbt.tracking.track_invalid_invocation(config=cfg, args=parsed, result_type=e.result_type) return None except DbtProfileError as e: logger.info("Encountered an error while reading profiles:") logger.info(" ERROR {}".format(str(e))) dbt.tracking.track_invalid_invocation(config=cfg, args=parsed, result_type=e.result_type) return None flags.NON_DESTRUCTIVE = getattr(parsed, 'non_destructive', False) arg_drop_existing = getattr(parsed, 'drop_existing', False) arg_full_refresh = getattr(parsed, 'full_refresh', False) if arg_drop_existing: dbt.deprecations.warn('drop-existing') flags.FULL_REFRESH = True elif arg_full_refresh: flags.FULL_REFRESH = True logger.debug("running dbt with arguments %s", parsed) task = parsed.cls(args=parsed, config=cfg) return task, cfg
def __init__(self, args): self.args = args self.config = RuntimeConfig.from_args(args) self.source_project_path, self.destination_project_path = self.get_project_dirs( ) self.builder = SchemaBuilder(self.config.source_paths[0], self.config.credentials.database, self.source_project_path, self.destination_project_path, GetCatalogTask((), self.config))
def __init__(self, args): self.args = args if self.args.command == "show_upstream": self.direction = "upstream" elif self.args.command == "show_downstream": self.direction = "downstream" else: raise self.config = RuntimeConfig.from_args(args) self.model_path = self.config.source_paths[0] self.manifest = self._get_manifest()
def setUp(self): self.database = os.getenv('DBT_MODEL_TEST_DATABASE') self.schema = os.getenv('DBT_MODEL_TEST_SCHEMA') self.identifier_prefix = os.getenv('DBT_MODEL_TEST_IDENTIFIER_PREFIX') reset_adapters() kwargs = { 'profile': 'modeltests', 'profiles_dir': 'conf/', 'target': None, } config = RuntimeConfig.from_args(TestArgs(kwargs)) register_adapter(config) adapter = get_adapter(config) adapter.cleanup_connections() self.adapter = adapter
def built_schema(project_dir, schema, profiles_dir, test_kwargs, project_def): # make our args, write our project out args = TestArgs(profiles_dir=profiles_dir, kwargs=test_kwargs) project_def.write_to(project_dir) # build a config of our own os.chdir(project_dir) start = os.getcwd() try: cfg = RuntimeConfig.from_args(args) finally: os.chdir(start) register_adapter(cfg) adapter = get_adapter(cfg) execute(adapter, 'drop schema if exists {} cascade'.format(schema)) execute(adapter, 'create schema {}'.format(schema)) yield adapter = get_adapter(cfg) adapter.cleanup_connections() execute(adapter, 'drop schema if exists {} cascade'.format(schema))
def load_config(self): # we've written our profile and project. Now we want to instantiate a # fresh adapter for the tests. # it's important to use a different connection handle here so # we don't look into an incomplete transaction kwargs = { "profile": None, "profiles_dir": self.dbt_config_dir, "target": None } config = RuntimeConfig.from_args(TestArgs(kwargs)) adapter = get_adapter(config) adapter.cleanup_connections() self.connection = adapter.acquire_connection("__test") self.adapter_type = self.connection.type self.adapter = adapter self.config = config
def load_config(self): # we've written our profile and project. Now we want to instantiate a # fresh adapter for the tests. # it's important to use a different connection handle here so # we don't look into an incomplete transaction kwargs = { 'profile': None, 'profile_dir': DBT_CONFIG_DIR, 'target': None, } config = RuntimeConfig.from_args(TestArgs(kwargs)) adapter = get_adapter(config) adapter.cleanup_connections() self.adapter_type = adapter.type() self.adapter = adapter self.config = config self._drop_schemas() self._create_schemas()
def __init__(self, args): self.args = args self.config = RuntimeConfig.from_args(args)
def __init__(self, args): self.args = args self.config = RuntimeConfig.from_args(args) self.target_path = self.config.target_path self.run_results = self._get_run_results()
def __init__(self, args): self.args = args self.config = RuntimeConfig.from_args(args) self.model_path = self.config.source_paths[0] self.target_path = self.config.target_path self.manifest = self._get_manifest()