Пример #1
0
def field_polling_post(datasetname, columnkey):
    """
    post to check to verify that the column is good
    """

    #print request.get_json().get('columnval', None)
    ORcolumn = request.get_json().get('columnval', None)
    if not ORcolumn:
        return jsonify({"errors": ["could not find the column name"]})

    dataset = get_dataset(datasetname)

    if not require.dataset.update(dataset):
        return jsonify({"errors": ["Permission denied"]})

    try:
        columnsettings = api_form_data()

        #use this later if async run is necessary
        #runop = Run(columnsettings['columnval'], dataset, source)
        #db.session.add(runop)
        #db.session.commit()

        #check_column.apply_async(args=[source.id, columnkey, columnsettings['columnval'], runop.id], countdown=1)
        resultval = check_column(dataset.source.id, columnkey,
                                 columnsettings['columnval'])

        if len(resultval['errors']) == 0:
            return jsonify({"success": True})
        else:
            return jsonify(resultval)
    except Exception, e:
        print "here is my error", e
        return jsonify({"errors": ['Unknown Error has occurred']})
Пример #2
0
def save_default_model(datasetname):

    dataset = get_dataset(datasetname)

    if not dataset.mapping or not dataset.source:
        return jsonify({"errors":["No mapping for this dataset"]})

    if not dataset.dataorg:
        return jsonify({"errors":['Has no dataorg']})



    #get the OR instructions from dataset
    ORinstructions = dataset.source.getORInstructions()

    #get the OR instructions from dataset
    mapping = dataset.mapping

    dataorg = dataset.dataorg

    dataorg.ORTemplate = {"data": ORinstructions}
    dataorg.mappingTemplate = mapping


    db.session.commit()


    return jsonify({"success":True})
Пример #3
0
def field_polling_post(datasetname, columnkey):
    """
    post to check to verify that the column is good
    """

    #print request.get_json().get('columnval', None)
    ORcolumn = request.get_json().get('columnval', None)
    if not ORcolumn:
        return jsonify({"errors":["could not find the column name"]})

    dataset = get_dataset(datasetname)

    if not require.dataset.update(dataset):
        return jsonify({"errors":["Permission denied"]})

    try:
        columnsettings = api_form_data()

        #use this later if async run is necessary
        #runop = Run(columnsettings['columnval'], dataset, source)
        #db.session.add(runop)
        #db.session.commit()

        #check_column.apply_async(args=[source.id, columnkey, columnsettings['columnval'], runop.id], countdown=1)
        resultval = check_column(dataset.source.id, columnkey, columnsettings['columnval'])

        if len(resultval['errors']) == 0:
            return jsonify({"success":True})
        else:
            return jsonify(resultval)
    except Exception, e:
        print "here is my error", e
        return jsonify({"errors":['Unknown Error has occurred']})
Пример #4
0
def create():
    """
    This takes a json format post with label, name, description
    and creates a private dataset to put sources in
    The json_errors return a json object
    """

    # if not require.dataview.create():
    #     return jsonify({"errors":["Can not create new dataset.  Permission denied"]})

    try:
        dataview_form = api_form_data()
        #make checks here for various secuirty and validation
        if dataview_form['urlhash']:
            dataview = Dataview.by_urlhash(dataview_form['urlhash'])
            dataview.update(dataview_form)
        else:
            dataview = Dataview(dataview_form)
            db.session.add(dataview)

        db.session.commit()
        return jsonify(dataview)
    except Exception, e:
        ex_type, ex, tb = sys.exc_info()
        print traceback.print_tb(tb)
        return jsonify({"errors": ['Unknown Error has occurred: ' + str(e)]})
Пример #5
0
def field(datasetname):
    """
    get the column names and any existing info for them
    - add check for if source name does not exist
    """
    dataset = get_dataset(datasetname)

    if dataset.mapping:
        #we have a model.  Get the model info
        modeler = dataset.mapping['mapping']
        refineproj = dataset.source.get_or_create_ORProject()
        columns = refineproj.refineproj.columns
        return jsonify({
            "columns": columns,
            "modeler": modeler
        },
                       headers={'Cache-Control': 'no-cache'})
    else:
        refineproj = dataset.source.get_or_create_ORProject()
        headers = {'Cache-Control': 'no-cache'}

        basemodeler = DEFAULT_SOURCE_MAPPING

        return jsonify(
            {
                "columns": refineproj.refineproj.columns,
                'modeler': basemodeler
            },
            headers=headers)
Пример #6
0
def ORoperations(datasetname):
    try:
        dataset = get_dataset(datasetname)

        ORinstructions = dataset.source.getORInstructions()
        return jsonify(ORinstructions, headers= {'Cache-Control' : 'no-cache'})
    except Exception, e:
        return jsonify({"error":"Could not fetch the ORinstructions" + str(e)})
Пример #7
0
def ORoperations(datasetname):
    try:
        dataset = get_dataset(datasetname)

        ORinstructions = dataset.source.getORInstructions()
        return jsonify(ORinstructions, headers={'Cache-Control': 'no-cache'})
    except Exception, e:
        return jsonify(
            {"error": "Could not fetch the ORinstructions" + str(e)})
Пример #8
0
def field_polling_check(datasetname, columnname):
    """
    GET to check if the run is complete
    """
    dataset = get_dataset(datasetname)

    if dataset.data:
        #we have a model.  Get the model info
        return jsonify({"error":"not yet implemented"})
    else:
        refineproj = dataset.source.get_or_create_ORProject()
        # this is awkward.  the class should be extended
        return jsonify(refineproj.refineproj.columns)
Пример #9
0
def field_polling_check(datasetname, columnname):
    """
    GET to check if the run is complete
    """
    dataset = get_dataset(datasetname)

    if dataset.data:
        #we have a model.  Get the model info
        return jsonify({"error": "not yet implemented"})
    else:
        refineproj = dataset.source.get_or_create_ORProject()
        # this is awkward.  the class should be extended
        return jsonify(refineproj.refineproj.columns)
Пример #10
0
def delete(datasetname):
    try:
        dataset = get_dataset(datasetname)

        db.session.delete(dataset.source)
        db.session.commit()
        clear_index_cache()

        #drop solr index
        #solr.drop_index(source.name)
        return jsonify(True)
    except Exception, e:
        return jsonify({"errors": [e]})
Пример #11
0
def delete(datasetname):
    try:
        dataset = get_dataset(datasetname)
        require.dataset.update(dataset)

        db.session.delete(dataset.source)
        db.session.commit()
        clear_index_cache()


        #drop solr index
        #solr.drop_index(source.name)
        return jsonify(True)
    except Exception, e:
        return jsonify({"errors":[e]})
Пример #12
0
def cubes_model(star_name):

    cubes_arg = request.args.get("cubes", None)

    try:
        cubes = cubes_arg.split("|")
    except:
        raise RequestError("Parameter cubes with value  '%s'should be a valid cube names separated by a '|'"
                % (cubes_arg) )

    if len (cubes) > 5:
        raise RequestError("You can only join 5 cubes together at one time")  

    g.cube = get_complex_cube(star_name, cubes)

    hier_limits = None
    # Copy from the application context
    #g.json_record_limit = current_app.slicer.json_record_limit
    g.json_record_limit = 10000
    if "prettyprint" in request.args:
        g.prettyprint = str_to_bool(request.args.get("prettyprint"))
    else:
        g.prettyprint = current_app.slicer.prettyprint

    response = g.cube.to_dict(expand_dimensions=True,
                              with_mappings=False,
                              full_attribute_names=True,
                              create_label=True,
                              hierarchy_limits=hier_limits)

    response["features"] = workspace.cube_features(g.cube)

    return jsonify(response)
Пример #13
0
def index():
    #page = request.args.get('page')

    q = Dataset.get_all_admin().all()

    returnset = []
    for theobj in q:
        returnset.append(theobj)

    # if len(fields) < 1 and not getsources:
    #     return jsonify(q)

    # returnset = []
    # for obj in q:
    #     tempobj = {}
    #     if len(fields) >0:
    #         for field in fields:
    #             tempobj[field] = getattr(obj, field)
    #     else:
    #         tempobj = obj.as_dict()
    #     if getsources:
    #         tempobj['sources'] = obj.sources.all()
    #     returnset.append(tempobj)

    # TODO: Facets for territories and languages
    # TODO: filters on facet dimensions
    #maybe put the pager back in
    # print q
    # pager = Pager(q)
    return jsonify(returnset, headers={'Cache-Control': 'no-cache'})
Пример #14
0
def cubes_model(star_name):

    cubes_arg = request.args.get("cubes", None)

    try:
        cubes = cubes_arg.split("|")
    except:
        raise RequestError("Parameter cubes with value  '%s'should be a valid cube names separated by a '|'"
                % (cubes_arg) )

    if len (cubes) > 5:
        raise RequestError("You can only join 5 cubes together at one time")  

    g.cube = get_complex_cube(star_name, cubes)

    hier_limits = None
    # Copy from the application context
    #g.json_record_limit = current_app.slicer.json_record_limit
    g.json_record_limit = 10000
    if "prettyprint" in request.args:
        g.prettyprint = str_to_bool(request.args.get("prettyprint"))
    else:
        g.prettyprint = current_app.slicer.prettyprint

    response = g.cube.to_dict(expand_dimensions=True,
                              with_mappings=False,
                              full_attribute_names=True,
                              create_label=True,
                              hierarchy_limits=hier_limits)

    response["features"] = workspace.cube_features(g.cube)

    return jsonify(response)
Пример #15
0
def view(urlhash):
    """
    Get the dataset info to populate a form
    """

    dataview = Dataview.by_urlhash(urlhash)
    return jsonify(dataview, headers={'Cache-Control': 'no-cache'})
Пример #16
0
def update(name):
    """
    Update a dataset with a json object and name from the dataset form
    """
    try:
        dataset = get_dataset(name)
        schema = dataset_schema(ValidationState(dataset))
        data = schema.deserialize(api_form_data())

        dataset.update(data)
        db.session.commit()
        #clear_index_cache()
        return jsonify({"success": True})
    except Exception, e:
        print e
        return jsonify({"errors": ['Unknown Error has occurred']})
Пример #17
0
def index():
    #page = request.args.get('page')

    q = Dataset.get_all_admin().all()

    returnset = []
    for theobj in q:
        returnset.append(theobj)

    # if len(fields) < 1 and not getsources:
    #     return jsonify(q)
    
    # returnset = []
    # for obj in q:
    #     tempobj = {} 
    #     if len(fields) >0:
    #         for field in fields:
    #             tempobj[field] = getattr(obj, field)
    #     else:
    #         tempobj = obj.as_dict()
    #     if getsources:
    #         tempobj['sources'] = obj.sources.all()
    #     returnset.append(tempobj) 


    # TODO: Facets for territories and languages
    # TODO: filters on facet dimensions
    #maybe put the pager back in
    # print q
    # pager = Pager(q)
    return jsonify(returnset)
Пример #18
0
def create():
    """
    This takes a json format post with label, name, description
    and creates a private dataset to put sources in
    The json_errors return a json object
    """

    if not require.dataset.create():
        return jsonify(
            {"errors": ["Can not create new dataset.  Permission denied"]})

    try:
        dataset = api_form_data()
        if not dataset.get("dataorg", None):
            return jsonify(
                {"errors": ["You must select the data source organization"]})
        model = {'data': dataset}
        schema = dataset_schema(ValidationState(model))
        data = schema.deserialize(dataset)

        #should have a better place for sluggify
        if (data.get('name', None)):
            tempname = slugify(str(data.get('name')), max_length=50)
        else:
            tempname = slugify(str(data.get('label')), max_length=50)

        if Dataset.by_name(tempname) is not None:
            return jsonify(
                {"errors": ["A dataset with this name already exists "]})

        dataset = Dataset(data=data)
        dataset.managers.append(current_user)
        db.session.add(dataset)

        dataset_source = Source.by_source_name(dataset.name)
        if not dataset_source:
            dataset_source = Source(dataset=dataset, name=dataset.name)
            db.session.add(dataset_source)
        else:
            dataset_source.dataset = dataset
        #creating a new dataset so we have to create a source as well
        db.session.commit()
        return jsonify({"success": True, "dataset": dataset.name})
    except Exception, e:
        ex_type, ex, tb = sys.exc_info()
        print traceback.print_tb(tb)
        return jsonify({"errors": ['Unknown Error has occurred: ' + str(e)]})
Пример #19
0
def view(name):
    """
    Get the dataset info to populate a form
    """

    dataset = get_dataset(name)
    outputdict = dataset.detailed_dict()
    return jsonify(outputdict, headers= {'Cache-Control' : 'no-cache'})
Пример #20
0
def view(name):
    """
    Get the dataset info to populate a form
    """

    dataset = get_dataset(name)
    outputdict = dataset.detailed_dict()
    return jsonify(outputdict, headers={'Cache-Control': 'no-cache'})
Пример #21
0
def update(name):
    """
    Update a dataset with a json object and name from the dataset form
    """
    try:
        dataset = get_dataset(name)
        require.dataset.update(dataset)
        schema = dataset_schema(ValidationState(dataset))
        data = schema.deserialize(api_form_data())

        dataset.update(data)
        db.session.commit()
        #clear_index_cache()
        return jsonify({"success":True})
    except Exception, e:
        print e
        return jsonify({"errors":['Unknown Error has occurred']}) 
Пример #22
0
def update_model(datasetname):

    #we just got everything now let's save it
    sourcemeta = request.get_json().get("meta", None)
    sourcemodeler = request.get_json().get("modeler", None)
    #validate that we have everything here

    r = {"mapping":sourcemodeler}

    #let's handle the compounds
    for item in r['mapping'].values():
        if item['type'] in  ("compound", "geometry"):
            for attitem in item['attributes'].values():
                if attitem['column'] == 'countryid':
                    pass
                attitem['column'] = item['column']

    #if not hasattr(r['mapping'], 'theid'):
    r['mapping']['theid'] = {
                              "default_value": "",
                              "description": "Unique ID",
                              "datatype": "string",
                              "key": True,
                              "label": "UniqueID",
                              "column": "uniqueid",
                              "type": "attribute",
                              "form": {
                                "label": "Unique Identifier"
                                }
                            }

    r['mapping']['geom_time_id'] = {
                              "default_value": "",
                              "description": "Geometry Time ID",
                              "datatype": "integer",
                              "label": "Geometry Time ID",
                              "column": "geom_time_id",
                              "type": "geom_time_id",
                              "form": {
                                "label": "Geometry-Time ID"
                                }
                            }




    dataset = get_dataset(datasetname)
    dataset.mapping = r
    dataset.ORoperations = {'data': dataset.source.getORInstructions()}
    dataset.source.addData(r)
    db.session.commit()


    load_source(dataset.source.id)
    cache.clear()
    #add async request to load data

    return jsonify({"success":True})
Пример #23
0
def create():
    """
    This takes a json format post with label, name, description
    and creates a private dataset to put sources in
    The json_errors return a json object
    """

    if not require.dataset.create():
        return jsonify({"errors":["Can not create new dataset.  Permission denied"]})

    try:
        dataset = api_form_data()
        if not dataset.get("dataorg", None):
            return jsonify({"errors":["You must select the data source organization"]}) 
        model = {'data': dataset}
        schema = dataset_schema(ValidationState(model))
        data = schema.deserialize(dataset)

        #should have a better place for sluggify
        if (data.get('name', None)):
            tempname = slugify(str(data.get('name')), max_length=50)
        else:
            tempname = slugify(str(data.get('label')), max_length=50)

        if Dataset.by_name(tempname) is not None:
            return jsonify({"errors":["A dataset with this name already exists "]})

        dataset = Dataset(data=data)
        dataset.managers.append(current_user)
        db.session.add(dataset)
        
        dataset_source = Source.by_source_name(dataset.name)
        if not dataset_source:
            dataset_source = Source(dataset=dataset, name=dataset.name)
            db.session.add(dataset_source)
        else:
            dataset_source.dataset = dataset
        #creating a new dataset so we have to create a source as well
        db.session.commit()
        return jsonify({"success":True, "dataset":dataset.name})
    except Exception, e:
        ex_type, ex, tb = sys.exc_info()
        print traceback.print_tb(tb)
        return jsonify({"errors":['Unknown Error has occurred: ' + str(e)]})
Пример #24
0
def apply_default_model(datasetname):

    dataset = get_dataset(datasetname)

    if not dataset.dataorg or not dataset:
        return jsonify({"errors": ["Invalid URL.  Could not find dataorg"]})

    dataorg = dataset.dataorg

    if not dataorg.ORTemplate or not dataorg.mappingTemplate:
        return jsonify({"errors": ["Dataorg has no template"]})

    dataset.source.applyORInstructions(dataorg.ORTemplate)

    dataset.mapping = dataorg.mappingTemplate

    db.session.commit()

    return jsonify(dataset.source, headers={'Cache-Control': 'no-cache'})
Пример #25
0
def field(datasetname):
    """
    get the column names and any existing info for them
    - add check for if source name does not exist
    """
    dataset = get_dataset(datasetname)

    if dataset.mapping:
        #we have a model.  Get the model info
        modeler = dataset.mapping['mapping']
        refineproj = dataset.source.get_or_create_ORProject()
        columns = refineproj.refineproj.columns
        return jsonify({"columns":columns, "modeler":modeler}, headers= {'Cache-Control' : 'no-cache'})
    else:
        refineproj = dataset.source.get_or_create_ORProject()
        headers= {'Cache-Control' : 'no-cache'}

        basemodeler = DEFAULT_SOURCE_MAPPING

        return jsonify({"columns": refineproj.refineproj.columns, 'modeler':basemodeler}, headers=headers)
Пример #26
0
def update_model(datasetname):

    #we just got everything now let's save it
    sourcemeta = request.get_json().get("meta", None)
    sourcemodeler = request.get_json().get("modeler", None)
    #validate that we have everything here

    r = {"mapping": sourcemodeler}

    #let's handle the compounds
    for item in r['mapping'].values():
        if item['type'] in ("compound", "geometry"):
            for attitem in item['attributes'].values():
                if attitem['column'] == 'countryid':
                    pass
                attitem['column'] = item['column']

    #if not hasattr(r['mapping'], 'theid'):
    r['mapping']['theid'] = {
        "default_value": "",
        "description": "Unique ID",
        "datatype": "string",
        "key": True,
        "label": "UniqueID",
        "column": "uniqueid",
        "type": "attribute",
        "form": {
            "label": "Unique Identifier"
        }
    }

    r['mapping']['geom_time_id'] = {
        "default_value": "",
        "description": "Geometry Time ID",
        "datatype": "integer",
        "label": "Geometry Time ID",
        "column": "geom_time_id",
        "type": "geom_time_id",
        "form": {
            "label": "Geometry-Time ID"
        }
    }

    dataset = get_dataset(datasetname)
    dataset.mapping = r
    dataset.ORoperations = {'data': dataset.source.getORInstructions()}
    dataset.source.addData(r)
    db.session.commit()

    load_source(dataset.source.id)
    cache.clear()
    #add async request to load data

    return jsonify({"success": True})
Пример #27
0
def permissions():
    """
    Check a user's permissions for a given dataset. This could also be
    done via request to the user, but since we're not really doing a
    RESTful service we do this via the api instead.
    """
    if 'dataset' not in request.args:
        return jsonify({'error': 'Parameter dataset missing'}, status=400)

    # Get the dataset we want to check permissions for
    dataset = Dataset.by_name(request.args['dataset'])

    # Return permissions
    return jsonify({
        'create': can.dataset.create() and dataset is None,
        'read': False if dataset is None else can.dataset.read(dataset),
        'update': False if dataset is None else can.dataset.update(dataset),
        'delete': False if dataset is None else can.dataset.delete(dataset)
    },
    headers= {'Cache-Control' : 'no-cache'})
Пример #28
0
def permissions():
    """
    Check a user's permissions for a given dataset. This could also be
    done via request to the user, but since we're not really doing a
    RESTful service we do this via the api instead.
    """
    if 'dataset' not in request.args:
        return jsonify({'error': 'Parameter dataset missing'}, status=400)

    # Get the dataset we want to check permissions for
    dataset = Dataset.by_name(request.args['dataset'])

    # Return permissions
    return jsonify(
        {
            'create': is_admin(current_user),
            'read': is_admin(current_user),
            'update': is_admin(current_user),
            'delete': is_admin(current_user)
        },
        headers={'Cache-Control': 'no-cache'})
Пример #29
0
def feedback_post():
    """
    This takes a json format post with label, name, description
    and creates a private dataset to put sources in
    The json_errors return a json object
    """

    try:
        feedback = Feedback()
        feedback.email = request.form.get("email", None)
        feedback.name = request.form.get("name", None)
        feedback.message = request.form.get("message", None)
        feedback.url = request.form.get("url", None)

        db.session.add(feedback)
        db.session.commit()
        return jsonify({"success": True})
    except Exception, e:
        ex_type, ex, tb = sys.exc_info()
        print traceback.print_tb(tb)
        return jsonify({"errors": ['Unknown Error has occurred: ' + str(e)]})
Пример #30
0
def feedback_post():
    """
    This takes a json format post with label, name, description
    and creates a private dataset to put sources in
    The json_errors return a json object
    """

    try:
    	feedback = Feedback()
    	feedback.email = request.form.get("email", None)
    	feedback.name = request.form.get("name", None)
    	feedback.message = request.form.get("message", None)
    	feedback.url = request.form.get("url", None)

        db.session.add(feedback)
        db.session.commit()
        return jsonify({"success":True})
    except Exception, e:
        ex_type, ex, tb = sys.exc_info()
        print traceback.print_tb(tb)
        return jsonify({"errors":['Unknown Error has occurred: ' + str(e)]})
Пример #31
0
def apply_default_model(datasetname):

    dataset = get_dataset(datasetname)

    if not dataset.dataorg or not dataset:
        return jsonify({"errors":["Invalid URL.  Could not find dataorg"]})

    dataorg = dataset.dataorg

    if not dataorg.ORTemplate or not dataorg.mappingTemplate:
        return jsonify({"errors":["Dataorg has no template"]})

    dataset.source.applyORInstructions(dataorg.ORTemplate)


    dataset.mapping = dataorg.mappingTemplate


    db.session.commit()



    return jsonify(dataset.source, headers= {'Cache-Control' : 'no-cache'})
Пример #32
0
def save_default_model(datasetname):

    dataset = get_dataset(datasetname)

    if not dataset.mapping or not dataset.source:
        return jsonify({"errors": ["No mapping for this dataset"]})

    if not dataset.dataorg:
        return jsonify({"errors": ['Has no dataorg']})

    #get the OR instructions from dataset
    ORinstructions = dataset.source.getORInstructions()

    #get the OR instructions from dataset
    mapping = dataset.mapping

    dataorg = dataset.dataorg

    dataorg.ORTemplate = {"data": ORinstructions}
    dataorg.mappingTemplate = mapping

    db.session.commit()

    return jsonify({"success": True})
Пример #33
0
def model(datasetname):
    #if not sourcename then we are saving the defaults for dataset

    dataset = get_dataset(datasetname)
    if not dataset.source:
        #then create one
        dataset_source = Source.by_source_name(dataset.name)
        if not dataset_source:
            dataset_source = Source(name=dataset.name, dataset=dataset)
            db.session.add(dataset_source)
        else:
            dataset_source.dataset = dataset
        db.session.commit()

        #figure out what they need over there?
    return jsonify(dataset.source)
Пример #34
0
def model(datasetname):
    #if not sourcename then we are saving the defaults for dataset
    
    dataset = get_dataset(datasetname)
    if not dataset.source:
        #then create one
        dataset_source = Source.by_source_name(dataset.name)
        if not dataset_source:
            dataset_source = Source(name=dataset.name, dataset=dataset)
            db.session.add(dataset_source)
        else:
            dataset_source.dataset = dataset
        db.session.commit()

        #figure out what they need over there?
    return jsonify(dataset.source)
Пример #35
0
def update_model_createnew(datasetname):
    #refactor to include the update

    dataset = get_dataset(datasetname)

    #source will have name and URL
    sourceapi = api_form_data()

    if not sourceapi['name']:
        sourceapi['name'] = dataset.name
        #return jsonify({"errors":["You must enter a data name " + str(e)]})

    #verify that name is unique and URL is real
    #model = {'source': source}
    schema = source_schema(ValidationState(sourceapi))
    try:
        data = schema.deserialize(sourceapi)
    except Invalid, e:
        #print message in thefuture
        return jsonify({"errors": ["Invalid field " + str(e)]})
Пример #36
0
def update_model_createnew(datasetname):
    #refactor to include the update

    dataset = get_dataset(datasetname)


    #source will have name and URL
    sourceapi = api_form_data()

    if not sourceapi['name']:
        sourceapi['name'] = dataset.name
        #return jsonify({"errors":["You must enter a data name " + str(e)]})

    #verify that name is unique and URL is real
    #model = {'source': source}
    schema = source_schema(ValidationState(sourceapi))
    try:
        data = schema.deserialize(sourceapi)
    except Invalid, e:
        #print message in thefuture
        return jsonify({"errors":["Invalid field " + str(e)]})
Пример #37
0
def reference_preprocessors():
    return jsonify(sorted(AVAILABLE_FUNCTIONS, key=lambda d: d['label']))
Пример #38
0
def dataviews():
    #page = request.args.get('page')

    q = Dataview.all().all()

    return jsonify(q, headers={'Cache-Control': 'no-cache'})
Пример #39
0
def aggregate_cubes(star_name):

    cubes_arg = request.args.get("cubes", None)

    try:
        cubes = cubes_arg.split("|")
    except:
        raise RequestError(
            "Parameter cubes with value  '%s'should be a valid cube names separated by a '|'"
            % (cubes_arg))

    if len(cubes) > 5:
        raise RequestError("You can only join 5 cubes together at one time")

    g.cube = get_complex_cube(star_name, cubes)

    g.browser = current_app.cubes_workspace.browser(g.cube)

    cube = g.cube

    output_format = validated_parameter(request.args,
                                        "format",
                                        values=["json", "csv", "excel"],
                                        default="json")

    header_type = validated_parameter(request.args,
                                      "header",
                                      values=["names", "labels", "none"],
                                      default="labels")

    fields_str = request.args.get("fields")
    if fields_str:
        fields = fields_str.lower().split(',')
    else:
        fields = None

    # Aggregates
    # ----------

    aggregates = []
    for agg in request.args.getlist("aggregates") or []:
        aggregates += agg.split("|")

    drilldown = []

    ddlist = request.args.getlist("drilldown")
    if ddlist:
        for ddstring in ddlist:
            drilldown += ddstring.split("|")

    #this handles cuts with geometry__time
    prepare_cell_cubes_ext(restrict=False)

    prepare_cell("split", "split")

    result = g.browser.aggregate(g.cell,
                                 aggregates=aggregates,
                                 drilldown=drilldown,
                                 split=g.split,
                                 page=g.page,
                                 page_size=g.page_size,
                                 order=g.order)

    # Hide cuts that were generated internally (default: don't)
    if current_app.slicer.hide_private_cuts:
        result.cell = result.cell.public_cell()

    # Copy from the application context
    #g.json_record_limit = current_app.slicer.json_record_limit
    g.json_record_limit = 10000
    if "prettyprint" in request.args:
        g.prettyprint = str_to_bool(request.args.get("prettyprint"))
    else:
        g.prettyprint = current_app.slicer.prettyprint

    if output_format == "json":
        return jsonify(result)

    elif output_format not in ["csv", "excel"]:
        raise RequestError("unknown response format '%s'" % output_format)

    # csv
    if header_type == "names":
        header = result.labels
    elif header_type == "labels":
        header = []
        for l in result.labels:
            # TODO: add a little bit of polish to this
            if l == SPLIT_DIMENSION_NAME:
                header.append('Matches Filters')
            else:
                header += [
                    attr.label or attr.name
                    for attr in cube.get_attributes([l], aggregated=True)
                ]
    else:
        header = None

    fields = result.labels

    try:
        filename_output = cubes[0] + "_" + datetime.now().strftime("%Y-%m-%d")
    except:
        filename_output = "aggregate_" + datetime

    if output_format == "excel":
        output_string = xls_generator(result,
                                      fields,
                                      include_header=bool(header),
                                      header=header)
        headers = {
            "Content-Disposition":
            'attachment; filename="' + filename_output + '.xlsx"'
        }
        return Response(
            output_string,
            mimetype=
            "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
            headers=headers)
    else:

        generator = csv_generator(result,
                                  fields,
                                  include_header=bool(header),
                                  header=header)

        headers = {
            "Content-Disposition":
            'attachment; filename="' + filename_output + '.csv"'
        }
        return Response(generator, mimetype='text/csv', headers=headers)
Пример #40
0
def reference_data():
    dataorgs = fromModel(DataOrg.get_all().all())
    return jsonify({
        'dataTypes': sorted(DATATYPES, key=lambda d: d['label']),
        'dataorgs': sorted(dataorgs, key=lambda d: d['label'])
    })
Пример #41
0
def reference_preprocessors():
    return jsonify(sorted(AVAILABLE_FUNCTIONS, key=lambda d: d['label']))
Пример #42
0
        source = basesource
        source.reload_openrefine()

        #check if source exists
    if sourceapi.get('prefuncs', None):
        prefuncs = json.loads(sourceapi['prefuncs'])
        dbsave = {}
        for p in prefuncs:
            dbsave[p] = p
        dataset.prefuncs = dbsave

    #dataset.managers.append(current_user)

    db.session.commit()

    return jsonify(source)


@blueprint.route('/datasets/<datasetname>/runmodel', methods=['POST', 'PUT'])
@api_json_errors
def update_model(datasetname):

    #we just got everything now let's save it
    sourcemeta = request.get_json().get("meta", None)
    sourcemodeler = request.get_json().get("modeler", None)
    #validate that we have everything here

    r = {"mapping": sourcemodeler}

    #let's handle the compounds
    for item in r['mapping'].values():
Пример #43
0
def dataorgs():
    #page = request.args.get('page')

    q = DataOrg.get_all_admin().all()

    return jsonify(q, headers={'Cache-Control': 'no-cache'})
Пример #44
0
def dataorgs():
    #page = request.args.get('page')

    q = DataOrg.get_all_admin().all()

    return jsonify(q, headers= {'Cache-Control' : 'no-cache'})
Пример #45
0
def reference_data():
    dataorgs = fromModel(DataOrg.get_all().all())
    return jsonify({
        'dataTypes': sorted(DATATYPES, key=lambda d: d['label']),
        'dataorgs': sorted(dataorgs, key=lambda d: d['label'])
    })
Пример #46
0
def aggregate_cubes(star_name):

    cubes_arg = request.args.get("cubes", None)

    try:
        cubes = cubes_arg.split("|")
    except:
        raise RequestError("Parameter cubes with value  '%s'should be a valid cube names separated by a '|'"
                % (cubes_arg) )

    if len (cubes) > 5:
        raise RequestError("You can only join 5 cubes together at one time")  

    g.cube = get_complex_cube(star_name, cubes)
    

    g.browser = current_app.cubes_workspace.browser(g.cube)



    cube = g.cube


    output_format = validated_parameter(request.args, "format",
                                        values=["json", "csv", "excel"],
                                        default="json")

    header_type = validated_parameter(request.args, "header",
                                      values=["names", "labels", "none"],
                                      default="labels")


    fields_str = request.args.get("fields")
    if fields_str:
        fields = fields_str.lower().split(',')
    else:
        fields = None

    # Aggregates
    # ----------

    aggregates = []
    for agg in request.args.getlist("aggregates") or []:
        aggregates += agg.split("|")

    drilldown = []

    ddlist = request.args.getlist("drilldown")
    if ddlist:
        for ddstring in ddlist:
            drilldown += ddstring.split("|")

    #this handles cuts with geometry__time
    prepare_cell_cubes_ext(restrict=False)

    prepare_cell("split", "split")


    result = g.browser.aggregate(g.cell,
                                 aggregates=aggregates,
                                 drilldown=drilldown,
                                 split=g.split,
                                 page=g.page,
                                 page_size=g.page_size,
                                 order=g.order)


    # Hide cuts that were generated internally (default: don't)
    if current_app.slicer.hide_private_cuts:
        result.cell = result.cell.public_cell()

    # Copy from the application context
    #g.json_record_limit = current_app.slicer.json_record_limit
    g.json_record_limit = 10000
    if "prettyprint" in request.args:
        g.prettyprint = str_to_bool(request.args.get("prettyprint"))
    else:
        g.prettyprint = current_app.slicer.prettyprint

    if output_format == "json":
        return jsonify(result)

    elif output_format not in  ["csv","excel"]:
        raise RequestError("unknown response format '%s'" % output_format)

    # csv
    if header_type == "names":
        header = result.labels
    elif header_type == "labels":
        header = []
        for l in result.labels:
            # TODO: add a little bit of polish to this
            if l == SPLIT_DIMENSION_NAME:
                header.append('Matches Filters')
            else:
                header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ]
    else:
        header = None

    fields = result.labels

    
    try:
        filename_output = cubes[0] + "_" + datetime.now().strftime("%Y-%m-%d")
    except:
        filename_output = "aggregate_" + datetime


    if output_format == "excel":
        output_string = xls_generator(result,
                                 fields,
                                 include_header=bool(header),
                                 header=header)
        headers = {"Content-Disposition": 'attachment; filename="' + filename_output + '.xlsx"'}
        return Response(output_string,
                        mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
                        headers=headers)
    else:

        generator = csv_generator(result,
                                 fields,
                                 include_header=bool(header),
                                 header=header)
        
        headers = {"Content-Disposition": 'attachment; filename="' + filename_output + '.csv"'}
        return Response(generator,
                        mimetype='text/csv',
                        headers=headers)
Пример #47
0

        #check if source exists
    if sourceapi.get('prefuncs', None):
        prefuncs = json.loads(sourceapi['prefuncs'])
        dbsave = {}
        for p in prefuncs:
            dbsave[p] = p 
        dataset.prefuncs = dbsave

    db.session.commit()

    # if oroperations:
    #     source.applyORInstructions({'data':oroperations})
    
    return jsonify(source)




@blueprint.route('/datasets/<datasetname>/runmodel', methods=['POST', 'PUT'])
@api_json_errors
def update_model(datasetname):

    #we just got everything now let's save it
    sourcemeta = request.get_json().get("meta", None)
    sourcemodeler = request.get_json().get("modeler", None)
    #validate that we have everything here

    r = {"mapping":sourcemodeler}