def test_two_false(self): environ = {'test_number': 1} p = predicates.Any(EqualsFour(), GreaterThan(3)) self.eval_unmet_predicate( p, environ, "At least one of the following predicates must be " "met: Number 1 doesn't equal 4, 1 is not greater " "than 3")
class DataSetController(BaseController): @expose('etl.templates.datasets.index') @require(predicates.not_anonymous()) def index(self): datasets = DBSession.query(DataSet).all() return dict(datasets=datasets) @expose('etl.templates.datasets.view') @expose(content_type="text/csv") @expose(content_type='application/json') @require(predicates.Any(predicates.not_anonymous(), is_api_authenticated())) @validate({'dataset': Convert(lambda v: DBSession.query(DataSet).filter_by(uid=v).one())}, error_handler=abort(404, error_handler=True)) def view(self, dataset, **kw): try: result = dataset.fetch() except Exception as e: log.exception('Failed to Retrieve Data') flash('ERROR: %s' % e, 'error') return dict(dataset=dataset, columns=[], results=[], count=0) if request.response_type == 'text/csv': return dateframe_to_csv(result) elif request.response_type == 'application/json': return dateframe_to_json(result) return dict( dataset=dataset, columns=list(result.columns), results=list(result.itertuples()), count=len(result), py2=py_version < 3 )
def test_two_mixed(self): environ = {'test_number': 5} p = predicates.Any(EqualsFour(), GreaterThan(3)) self.eval_met_predicate(p, environ)
def test_one_false(self): environ = {'test_number': 3} p = predicates.Any(EqualsTwo()) self.eval_unmet_predicate( p, environ, "At least one of the following predicates must be " "met: Number 3 doesn't equal 2")
def test_one_true(self): environ = {'test_number': 2} p = predicates.Any(EqualsTwo()) self.eval_met_predicate(p, environ)
class ExtractionsController(BaseController): filter = ExtractionFilterController() @expose('etl.templates.extractions.index') @require(predicates.not_anonymous()) def index(self, **kw): categories = DBSession.query(app_model.Category).all() uncategorised = DBSession.query(Extraction).filter_by( category_id=None).all() categories += [Bunch(extractions=uncategorised, name="No Category")] return dict(categories=categories, has_validation_errors=request.validation.errors, new_form=CreateExtractionForm) @expose() @require(predicates.in_group('managers')) @validate(CreateExtractionForm, error_handler=index) def create(self, name, **kw): DBSession.add(Extraction(name=name)) flash('New Extraction successfully created', 'ok') return redirect('./index') @expose() @require(predicates.in_any_group('manager', 'admin')) def delete(self, uid): extraction = DBSession.query(Extraction).get(uid) or abort(404) DBSession.delete(extraction) flash('Extraction correctly deleted') return redirect(tg.url('/extractions')) @expose('etl.templates.extractions.view') @expose(content_type="text/csv") @expose(content_type='application/json') @require(predicates.Any(predicates.not_anonymous(), is_api_authenticated())) @validate( { 'extraction': Convert( lambda v: DBSession.query(Extraction).filter_by(uid=v).one()) }, error_handler=abort(404, error_handler=True)) def view(self, extraction, extraction_filter=None, **kw): try: result = extraction.perform() except Exception as e: log.exception('Failed to Retrieve Data') flash('ERROR RETRIEVING DATA: %s' % e, 'error') return redirect('/error') e_filter = None try: if extraction_filter: if int(extraction_filter ) != -1: # -1 = original extraction requested by user e_filter = DBSession.query(ExtractionFilter).get( extraction_filter) if not e_filter: return abort(404) result = e_filter.perform(result) else: default = DBSession.query(ExtractionFilter).filter( ExtractionFilter.default == True, ExtractionFilter.extraction_id == extraction.uid).first() if default: e_filter = default result = default.perform(result) except Exception as e: log.exception('Failed to Retrieve Data') flash('ERROR RETRIEVING DATA: %s' % e, 'error') result = DataFrame() if request.response_type == 'text/csv': return dateframe_to_csv(result) elif request.response_type == 'application/json': return dateframe_to_json(result) visualizations = dict( (name, None) for name in extraction.visualization.split('+')) axis = [] if extraction.graph_axis: axis = [x.strip() for x in extraction.graph_axis.split(',')] visualizations = get_graph(result, axis, visualizations) if config.get("extraction.max_elements") is None: log.warn( "Cannot find max elements to render in config file. Using default 10000" ) if len(result) * len(result.columns) > int( config.get("extraction.max_elements", 10000)): flash( "There are too many data to extract, please add some filters", "error") filters = DBSession.query(ExtractionFilter).filter_by( extraction_id=extraction.uid).all() return dict(extraction=extraction, visualizations=visualizations, columns=result.columns, results=result.itertuples(), count=len(result), filters=filters, extraction_filter=e_filter, py2=py_version < 3) @expose() def reload_data(self, extraction): extraction = DBSession.query(Extraction).get(extraction) or abort(404) for dts in extraction.datasets: empty_cache(dts.dataset.cache_key()) empty_cache(dts.dataset.cache_key(DEFAULT_LIMIT_FOR_PERFORMANCE)) flash('Data reloaded') return redirect('/extractions/view/' + str(extraction.uid))