示例#1
0
文件: views.py 项目: EzanLTD/plenario
def approve_dataset(source_url_hash):
    # get the MetaTable row and change the approved_status and bounce back to view-datasets.

    meta = session.query(MetaTable).get(source_url_hash)

    json_data_types = None
    if ((not meta.is_socrata_source) and meta.contributed_data_types):
        json_data_types = json.loads(meta.contributed_data_types)

    add_dataset_task.delay(source_url_hash, data_types=json_data_types)

    upd = {'approved_status': 'true'}

    meta.approved_status = 'true'
    session.commit()

    # Email the user who submitted that their dataset has been approved.
    # email the response to somebody

    msg_body = """Hello %s,\r\n
\r\n
Your dataset has been approved and added to Plenar.io:\r\n
\r\n
%s\r\n
\r\n
It should appear on http://plenar.io within 24 hours.\r\n
\r\n
Thank you!\r\n
The Plenario Team\r\n
http://plenar.io""" % (meta.contributor_name, meta.human_name)

    send_mail(subject="Your dataset has been added to Plenar.io",
              recipient=meta.contributor_email,
              body=msg_body)
示例#2
0
def submit(context):
    form = request.form
    is_shapefile = context['is_shapefile']
    is_admin = context['is_admin']

    try:  # Store the metadata
        if is_shapefile:
            if shape_already_submitted(form['dataset_name']):
                msg = 'A Shapefile with this name has already been submitted'
                raise RuntimeError(msg)
            else:
                meta = shape_meta_from_submit_form(form, is_approved=is_admin)
        else:
            meta = point_meta_from_submit_form(form, is_approved=is_admin)
    except RuntimeError as e:
        context['error_msg'] = e.message
        return render_with_context(context)
    else:
        # Successfully stored the metadata
        # Now fire ingestion task...
        if is_admin:
            if is_shapefile:
                add_shape_task.delay(meta.dataset_name)
            else:
                add_dataset_task.delay(meta.source_url_hash)
        # or send thankyou email
        else:
            return send_submission_email(meta.human_name,
                                         meta.contributor_name,
                                         meta.contributor_email)
        return view_datasets()
示例#3
0
def approve_dataset(source_url_hash):
    # get the MetaTable row and change the approved_status and bounce back to view-datasets.

    meta = session.query(MetaTable).get(source_url_hash)

    json_data_types = None
    if ((not meta.is_socrata_source) and meta.contributed_data_types):
        json_data_types = json.loads(meta.contributed_data_types)
        
    add_dataset_task.delay(source_url_hash, data_types=json_data_types)
    
    upd = { 'approved_status': 'true' }

    meta.approved_status = 'true'
    session.commit()

    # Email the user who submitted that their dataset has been approved.
    # email the response to somebody

    msg_body = """Hello %s,\r\n
\r\n
Your dataset has been approved and added to Plenar.io:\r\n
\r\n
%s\r\n
\r\n
It should appear on http://plenar.io within 24 hours.\r\n
\r\n
Thank you!\r\n
The Plenario Team\r\n
http://plenar.io""" % (meta.contributor_name, meta.human_name)

    send_mail(subject="Your dataset has been added to Plenar.io", 
        recipient=meta.contributor_email, body=msg_body)
示例#4
0
def submit(context):
    form = request.form
    is_shapefile = context['is_shapefile']
    is_admin = context['is_admin']

    try:  # Store the metadata
        if is_shapefile:
            if shape_already_submitted(form['dataset_name']):
                msg = 'A Shapefile with this name has already been submitted'
                raise RuntimeError(msg)
            else:
                meta = shape_meta_from_submit_form(form, is_approved=is_admin)
        else:
            meta = point_meta_from_submit_form(form, is_approved=is_admin)
    except RuntimeError as e:
        context['error_msg'] = e.message
        return render_with_context(context)
    else:
        # Successfully stored the metadata
        # Now fire ingestion task...
        if is_admin:
            if is_shapefile:
                add_shape_task.delay(meta.dataset_name)
            else:
                add_dataset_task.delay(meta.source_url_hash)
        # or send thankyou email
        else:
            return send_submission_email(meta.human_name,
                                         meta.contributor_name,
                                         meta.contributor_email)
        return view_datasets()
示例#5
0
def approve_dataset(source_url_hash):
    # Approve it
    meta = session.query(MetaTable).get(source_url_hash)
    meta.approved_status = True
    session.commit()
    # Ingest it
    add_dataset_task.delay(source_url_hash)
    send_approval_email(meta.human_name, meta.contributor_name,
                        meta.contributor_email)
示例#6
0
def approve_dataset(source_url_hash):
    # Approve it
    meta = session.query(MetaTable).get(source_url_hash)
    meta.approved_status = True
    session.commit()
    # Ingest it
    add_dataset_task.delay(source_url_hash)
    send_approval_email(meta.human_name, meta.contributor_name,
                        meta.contributor_email)
示例#7
0
文件: views.py 项目: EzanLTD/plenario
def add_dataset():
    dataset_info = {}
    errors = []
    socrata_source = False

    url = ""
    dataset_id = None
    md = None

    if request.args.get('dataset_url'):
        url = request.args.get('dataset_url')
        (dataset_info, errors,
         socrata_source) = get_context_for_new_dataset(url)

        # populate contributor info from session
        user = session.query(User).get(flask_session['user_id'])
        dataset_info['contributor_name'] = user.name
        dataset_info['contributor_organization'] = 'Plenario Admin'
        dataset_info['contributor_email'] = user.email

        # check if dataset with the same URL has already been loaded
        dataset_id = md5(url).hexdigest()
        md = session.query(MetaTable).get(dataset_id)
        if md:
            errors.append(
                "A dataset with that URL has already been loaded: '%s'" %
                md.human_name)

    if request.method == 'POST' and not md:
        md = add_dataset_to_metatable(request,
                                      url,
                                      dataset_id,
                                      dataset_info,
                                      socrata_source,
                                      approved_status=True)

        json_data_types = None
        if ((not md.is_socrata_source) and md.contributed_data_types):
            json_data_types = json.loads(md.contributed_data_types)

        add_dataset_task.delay(md.source_url_hash, data_types=json_data_types)

        flash('%s added successfully!' % md.human_name, 'success')
        return redirect(url_for('views.view_datasets'))

    context = {
        'dataset_info': dataset_info,
        'errors': errors,
        'socrata_source': socrata_source
    }
    return render_template('admin/add-dataset.html', **context)
示例#8
0
def add_dataset():
    dataset_info = {}
    errors = []
    socrata_source = False

    url = ""
    dataset_id = None
    md = None

    if request.args.get('dataset_url'):
        url = request.args.get('dataset_url')
        (dataset_info, errors, socrata_source) = get_context_for_new_dataset(url)

        # populate contributor info from session
        user = session.query(User).get(flask_session['user_id'])
        dataset_info['contributor_name'] = user.name
        dataset_info['contributor_organization'] = 'Plenario Admin'
        dataset_info['contributor_email'] = user.email

        # check if dataset with the same URL has already been loaded
        dataset_id = md5(url).hexdigest()
        md = session.query(MetaTable).get(dataset_id)
        if md:
            errors.append("A dataset with that URL has already been loaded: '%s'" % md.human_name)

    if request.method == 'POST' and not md:
        md = add_dataset_to_metatable(request, url, dataset_id, dataset_info, socrata_source, approved_status=True)
        
        json_data_types = None
        if ((not md.is_socrata_source) and md.contributed_data_types):
            json_data_types = json.loads(md.contributed_data_types)

        add_dataset_task.delay(md.source_url_hash, data_types=json_data_types)
        
        flash('%s added successfully!' % md.human_name, 'success')
        return redirect(url_for('views.view_datasets'))
        
    context = {'dataset_info': dataset_info, 'errors': errors, 'socrata_source': socrata_source}
    return render_template('admin/add-dataset.html', **context)
示例#9
0
def submit_dataset():
    # Slightly dumb way to make sure that POSTs are only coming from
    # originating domain for the time being
    referer = request.headers.get('Referer')
    if referer:
        referer = urlparse(referer).netloc
        req_url = urlparse(request.url).netloc
        if referer != req_url:
            abort(401)
    else:
        abort(401)
    resp = {'status': 'ok', 'message': ''}
    status_code = 200
    errors = []
    post = request.form.get('data')
    if not post:
        try:
            post = request.form.keys()[0]
        except IndexError:
            resp['status'] = 'error'
            resp['message'] = 'Unable to decode POST data'
            status_code = 400
    if status_code == 200:
        post = json.loads(post)
        if post.get('view_url'):
            if post.get('socrata'):
                source_domain = urlparse(post['view_url']).netloc
                four_by_four = re.findall(r'/([a-z0-9]{4}-[a-z0-9]{4})', post['view_url'])[-1]
                view_url = 'http://%s/api/views/%s' % (source_domain, four_by_four)
                dataset_info, errors, status_code = get_socrata_data_info(view_url)
                source_url = '%s/rows.csv?accessType=DOWNLOAD' % view_url
            else:
                dataset_info = {
                    'attribution': '',
                    'description': '',
                }
                source_url = post['view_url']
                dataset_info['name'] = urlparse(source_url).path.split('/')[-1]
            if errors:
                resp['message'] = ', '.join([e for e in errors])
                resp['status'] = 'error'
                status_code = 400
            else:
                dataset_id = md5(source_url).hexdigest()
                md = session.query(MetaTable).get(dataset_id)
                if not md:
                    d = {
                        'dataset_name': slugify(dataset_info['name'], delim=u'_'),
                        'human_name': dataset_info['name'],
                        'attribution': dataset_info['attribution'],
                        'description': dataset_info['description'],
                        'source_url': source_url,
                        'source_url_hash': dataset_id,
                        'update_freq': post['update_frequency'],
                        'business_key': post['field_definitions']['id_field'],
                        'observed_date': post['field_definitions']['date_field'],
                        'latitude': post['field_definitions'].get('latitude'),
                        'longitude': post['field_definitions'].get('longitude'),
                        'location': post['field_definitions'].get('location')
                    }
                    if len(d['dataset_name']) > 49:
                        d['dataset_name'] = d['dataset_name'][:50]
                    md = MetaTable(**d)
                    session.add(md)
                    session.commit()
                add_dataset.delay(md.source_url_hash, data_types=post.get('data_types'))
                resp['message'] = 'Dataset %s submitted successfully' % dataset_info['name']
        else:
            resp['status'] = 'error'
            resp['message'] = 'Must provide a url where data can be downloaded'
            status_code = 400
    resp = make_response(json.dumps(resp, default=dthandler), status_code)
    resp.headers['Content-Type'] = 'application/json'
    return resp
示例#10
0
def submit_dataset():
    # Slightly dumb way to make sure that POSTs are only coming from
    # originating domain for the time being
    referer = request.headers.get('Referer')
    if referer:
        referer = urlparse(referer).netloc
        req_url = urlparse(request.url).netloc
        if referer != req_url:
            abort(401)
    else:
        abort(401)
    resp = {'status': 'ok', 'message': ''}
    status_code = 200
    errors = []
    post = request.form.get('data')
    if not post:
        try:
            post = request.form.keys()[0]
        except IndexError:
            resp['status'] = 'error'
            resp['message'] = 'Unable to decode POST data'
            status_code = 400
    if status_code == 200:
        post = json.loads(post)
        if post.get('view_url'):
            if post.get('socrata'):
                source_domain = urlparse(post['view_url']).netloc
                four_by_four = re.findall(r'/([a-z0-9]{4}-[a-z0-9]{4})',
                                          post['view_url'])[-1]
                view_url = 'http://%s/api/views/%s' % (source_domain,
                                                       four_by_four)
                dataset_info, errors, status_code = get_socrata_data_info(
                    view_url)
                source_url = '%s/rows.csv?accessType=DOWNLOAD' % view_url
            else:
                dataset_info = {
                    'attribution': '',
                    'description': '',
                }
                source_url = post['view_url']
                dataset_info['name'] = urlparse(source_url).path.split('/')[-1]
            if errors:
                resp['message'] = ', '.join([e for e in errors])
                resp['status'] = 'error'
                status_code = 400
            else:
                dataset_id = md5(source_url).hexdigest()
                md = session.query(MetaTable).get(dataset_id)
                if not md:
                    d = {
                        'dataset_name': slugify(dataset_info['name'],
                                                delim=u'_'),
                        'human_name': dataset_info['name'],
                        'attribution': dataset_info['attribution'],
                        'description': dataset_info['description'],
                        'source_url': source_url,
                        'source_url_hash': dataset_id,
                        'update_freq': post['update_frequency'],
                        'business_key': post['field_definitions']['id_field'],
                        'observed_date':
                        post['field_definitions']['date_field'],
                        'latitude': post['field_definitions'].get('latitude'),
                        'longitude':
                        post['field_definitions'].get('longitude'),
                        'location': post['field_definitions'].get('location')
                    }
                    if len(d['dataset_name']) > 49:
                        d['dataset_name'] = d['dataset_name'][:50]
                    md = MetaTable(**d)
                    session.add(md)
                    session.commit()
                add_dataset.delay(md.source_url_hash,
                                  data_types=post.get('data_types'))
                resp[
                    'message'] = 'Dataset %s submitted successfully' % dataset_info[
                        'name']
        else:
            resp['status'] = 'error'
            resp['message'] = 'Must provide a url where data can be downloaded'
            status_code = 400
    resp = make_response(json.dumps(resp, default=dthandler), status_code)
    resp.headers['Content-Type'] = 'application/json'
    return resp