def test_get_models_throws_if_project_does_not_exist( fc: fetcher.Fetcher, project, model ): """fetcher.get_models() should throw if a model is not found.""" with pytest.raises(exceptions.NotFoundError) as exc: fc.get_models(project=project, model=model) assert "An error occured while getting projects." in str(exc.value)
def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model): """fetcher.get_models() should be able to filter on project or model.""" ml = fc.get_models(project=test_project_name) assert all(m.project_name == test_project_name for m in ml) ml = fc.get_models(model=test_model["name"]) assert all(m.name == test_model["name"] for m in ml) ml = fc.get_models(project=test_project_name, model=test_model["name"]) assert all( m.project_name == test_project_name and m.name == test_model["name"] for m in ml )
def _analyze_models(self, project=None, model=None, sortkey=None, limit=None, timeframe=90, min_queries=0): models = fetcher.get_models(self, project=project, model=model, verbose=1) used_models = fetcher.get_used_models(self, timeframe, min_queries) info = [] for m in models: explore_count = len(m['explores']) if m['name'] in used_models: query_run_count = used_models[m['name']] else: query_run_count = 0 unused_explores = fetcher.get_unused_explores( self, m['name'], timeframe, min_queries) info.append({ 'project': m['project_name'], 'model': m['name'], 'explore_count': explore_count, 'unused_explores': len(unused_explores), 'query_run_count': query_run_count }) valid_values = list(info[0].keys()) info = dc.sort(info, valid_values, sortkey) info = dc.limit(info, limit=limit) return info
def _vacuum_models(self, project=None, model=None, timeframe=90, min_queries=0): if model is None: model = fetcher.get_models(self, project=project) else: model = model.split() used_models = fetcher.get_used_models(self, timeframe) info = [] for m in model: explores = [ e['name'] for e in fetcher.get_explores(self, model=m, verbose=1) ] unused_explores = fetcher.get_unused_explores( self, m, timeframe, min_queries) query_run_count = used_models[m] if m in used_models.keys() else 0 unused_explores = ('\n').join(unused_explores) info.append({ 'model': m, 'unused_explores': unused_explores or 'None', 'model_query_run_count': query_run_count }) return info
def _analyze_models(self, project=None, model=None, sortkey=None, limit=None, timeframe=90, min_queries=0): print('fetching all models...') models = fetcher.get_models(self, project=project, model=model, verbose=1) print('complete.') print('fetching used models...') used_models = fetcher.get_used_models(self, timeframe, min_queries) print('complete.') info = [] total = len(used_models) complete = 1 for m in models: print('Processing {} of {} models'.format(complete, total)) explore_count = len(m['explores']) if m['name'] in used_models: query_run_count = used_models[m['name']] else: query_run_count = 0 unused_explores = fetcher.get_unused_explores( self, m['name'], timeframe, min_queries) info.append({ 'project': m['project_name'], 'model': m['name'], 'explore_count': explore_count, 'unused_explores': len(unused_explores), 'query_run_count': query_run_count }) complete += 1 valid_values = list(info[0].keys()) info = styler.sort(info, valid_values, sortkey) info = styler.limit(info, limit=limit) return info
def test_get_models_returns_models(fc: fetcher.Fetcher): """fetcher.get_models() should return a list of models.""" ml = fc.get_models() assert isinstance(ml, list) assert isinstance(ml[0], models.LookmlModel)