def assemble_view( cls, view_name: str, sql_table_name: str = None, derived_table: str = None, dimensions: List[dict] = None, dimension_groups: List[dict] = None, measures: List[dict] = None, sets: List[dict] = None, parameters: List[dict] = None, label: str = None, required_access_grants: list = None, extends: str = None, extension_is_required: bool = False, include_suggestions: bool = True, ): assembled_view_dict = {"view": {"name": view_name}} logger.info("Creating LookML View: {}".format(view_name)) # Validate inputs if not sql_table_name and not derived_table and not extends: raise DbteaException( name="missing-lookml-view-properties", title="Missing Necessary LookML View Properties", detail="Created LookML Views must specify either a `sql_table_name`, `derived_table` or `extends` in order " "to properly specify the view source", ) # Add optional view options as needed if label: assembled_view_dict["view"]["label"] = label if extends: assembled_view_dict["view"]["extends"] = extends if extension_is_required: assembled_view_dict["view"]["extension"] = "required" if sql_table_name: assembled_view_dict["view"]["sql_table_name"] = sql_table_name if derived_table: assembled_view_dict["view"]["derived_table"] = derived_table if required_access_grants: assembled_view_dict["view"][ "required_access_grants" ] = required_access_grants if not include_suggestions: assembled_view_dict["view"]["suggestions"] = "no" # Add body of View if parameters: assembled_view_dict["view"]["parameters"] = parameters if dimensions: assembled_view_dict["view"]["dimensions"] = dimensions if dimension_groups: assembled_view_dict["view"]["dimension_groups"] = dimension_groups if measures: assembled_view_dict["view"]["measures"] = measures if sets: assembled_view_dict["view"]["sets"] = sets return lkml.dump(assembled_view_dict)
def fetch_dbt_project_directory(custom_project_directory: str = None) -> str: """Return path to the base of the closest dbt project by traversing from current working directory backwards in order to find a dbt_project.yml file. If an optional custom project path is specified (which should be a full path to the base project path of a dbt project), return that directory instead. """ project_directory = os.getcwd() root_path = os.path.abspath(os.sep) if custom_project_directory: custom_directory_project_file = assemble_path(custom_project_directory, DBT_PROJECT_FILE) if os.path.exists(custom_directory_project_file): return custom_project_directory else: raise DbteaException( name="invalid-custom-dbt-project-directory", title="No dbt project found at supplied custom directory", detail= "No dbt_project.yml file found at supplied custom project directory {}, confirm your " "custom project directory is valid".format( custom_project_directory), ) while project_directory != root_path: dbt_project_file = assemble_path(project_directory, DBT_PROJECT_FILE) if os.path.exists(dbt_project_file): logger.info("Running dbtea against dbt project at path: {}".format( project_directory)) return project_directory project_directory = os.path.dirname(project_directory) raise DbteaException( name="missing-dbt-project", title="No dbt project found", detail= "No dbt_project.yml file found in current or any direct parent paths. You need to run dbtea " "from within dbt project in order to use its tooling, or supply a custom project directory", )
def parse_json_file(json_file_path: str) -> dict: """Parse JSON file to Python dictionary.""" if not file_exists(json_file_path): raise DbteaException( name="missing-json-file", title="JSON file set to parse is missing", detail= "Attempted to parse JSON file at path {}, however this path is not a file" .format(json_file_path), ) with open(json_file_path, "r") as json_stream: json_data = json.load(json_stream) return json_data
def parse_yaml_file(yaml_file_path: str) -> dict: """Parse dbt config YAML file to Python dictionary.""" if not file_exists(yaml_file_path): raise DbteaException( name="missing-yaml-file", title="YAML file set to parse is missing", detail= "Attempted to parse YAML file at path {}, however this path is not a file" .format(yaml_file_path), ) with open(yaml_file_path, "r") as yaml_stream: yaml_data = yaml.safe_load(yaml_stream) or {} return yaml_data
def from_existing_config_file(cls, local_config_file_path: str): """""" if utils.file_exists(local_config_file_path): return cls( file_name=Path(local_config_file_path).name, config_dir=Path(local_config_file_path).parent, config_data=utils.parse_yaml_file(local_config_file_path), ) else: return DbteaException( title="invalid-dbtea-config-file-local-path", name="Specified Dbtea config local path does not exist", detail= "You have specified a local config file path to a Dbtea config file which does not exist, " "check to confirm file at path: {} exists or any expected environment variables are specified", )
def _parse_artifact(self, artifact_file: str): """""" if artifact_file not in ARTIFACT_DATA_FILES.values(): logger.warning( "You have specified an artifact file which is not in the list of known dbt artifacts" ) artifact_path = utils.assemble_path( self.project_root, self.target_path, artifact_file ) if not utils.file_exists(artifact_path): raise DbteaException( name="artifact-file-missing", title="Artifact file {} is missing".format(artifact_file), detail="There is no artifact {} at path {}. You may not have yet generated this artifact and " "need to run models, source freshness or docs generation".format( artifact_file, artifact_path ), ) return utils.parse_json_file(artifact_path)
def create_lookml_model( model_name: str, output_to: str = "stdout", connection: str = None, label: str = None, includes: list = None, explores: List[dict] = None, access_grants: List[dict] = None, tests: List[dict] = None, datagroups: List[dict] = None, map_layers: List[dict] = None, named_value_formats: List[dict] = None, fiscal_month_offset: int = None, persist_for: str = None, persist_with: str = None, week_start_day: str = None, case_sensitive: bool = True, output_directory: str = None, ) -> Optional[str]: """""" assembled_model_dict = dict() logger.info("Creating LookML Model: {}".format(model_name)) # Validate inputs if output_to not in OUTPUT_TO_OPTIONS: raise DbteaException( name="invalid-lookml-model-properties", title="Invalid LookML Model Properties", detail="You must choose a valid output_to option from the following: {}".format( OUTPUT_TO_OPTIONS ), ) if output_to == "file" and not output_directory: raise DbteaException( name="missing-output-directory", title="No Model Output Directory Specified", detail="You must include an output_directory param if outputting model to a file", ) # Add optional model options if connection: assembled_model_dict["connection"] = connection if label: assembled_model_dict["label"] = label if includes: assembled_model_dict["includes"] = includes if persist_for: assembled_model_dict["persist_for"] = persist_for if persist_with: assembled_model_dict["persist_with"] = persist_with if fiscal_month_offset: assembled_model_dict["fiscal_month_offset"] = fiscal_month_offset if week_start_day: assembled_model_dict["week_start_day"] = week_start_day if not case_sensitive: assembled_model_dict["case_sensitive"] = "no" # Add body of Model if datagroups: assembled_model_dict["datagroups"] = datagroups if access_grants: assembled_model_dict["access_grants"] = access_grants if explores: assembled_model_dict["explores"] = explores if named_value_formats: assembled_model_dict["named_value_formats"] = named_value_formats if map_layers: assembled_model_dict["map_layers"] = map_layers if tests: assembled_model_dict["tests"] = tests if output_to == "stdout": return lkml.dump(assembled_model_dict) else: model_file_name = utils.assemble_path( output_directory, model_name + ".model.lkml" ) with open(model_file_name, "w") as output_stream: output_stream.write(lkml.dump(assembled_model_dict))